gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils import formats, six
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, CustomPaginationAdmin,
CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, FilteredChildAdmin, GroupAdmin,
InvitationAdmin, NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin,
SwallowAdmin, site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, CustomIdUser, Event, Genre, Group,
Invitation, Membership, Musician, OrderedObject, Parent, Quartet, Swallow,
SwallowOneToOne, UnorderedObject,
)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, admin.site)
request = self.factory.get('/child/')
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.queryset.query.select_related, {
'parent': {'name': {}}
})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
request = self.factory.get('/invitation/')
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
ia.list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
ia.list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, False)
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">(None)</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertInHTML('<td class="field-name">%s</td>' % editable_name_field, table_output, msg_prefix='Failed to find "name" list_editable field')
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda:
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, admin.site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, admin.site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, admin.site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
User.objects.create_superuser(
username='super', email='super@localhost', password='secret')
self.client.login(username='super', password='secret')
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, admin.site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, admin.site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow2 = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">(None)</td>')
self.assertContains(response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o)
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_changelist.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['users.json']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correcly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:auth_user_changelist')))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
|
|
"""Access to Python's configuration information."""
import os
import sys
from os.path import pardir, realpath
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
_INSTALL_SCHEMES = {
'posix_prefix': {
'stdlib': '{installed_base}/lib/python{py_version_short}',
'platstdlib': '{platbase}/lib/python{py_version_short}',
'purelib': '{base}/lib/python{py_version_short}/site-packages',
'platlib': '{platbase}/lib/python{py_version_short}/site-packages',
'include':
'{installed_base}/include/python{py_version_short}{abiflags}',
'platinclude':
'{installed_platbase}/include/python{py_version_short}{abiflags}',
'scripts': '{base}/bin',
'data': '{base}',
},
'posix_home': {
'stdlib': '{installed_base}/lib/python',
'platstdlib': '{base}/lib/python',
'purelib': '{base}/lib/python',
'platlib': '{base}/lib/python',
'include': '{installed_base}/include/python',
'platinclude': '{installed_base}/include/python',
'scripts': '{base}/bin',
'data': '{base}',
},
'nt': {
'stdlib': '{installed_base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{installed_base}/Include',
'platinclude': '{installed_base}/Include',
'scripts': '{base}/Scripts',
'data': '{base}',
},
'nt_user': {
'stdlib': '{userbase}/Python{py_version_nodot}',
'platstdlib': '{userbase}/Python{py_version_nodot}',
'purelib': '{userbase}/Python{py_version_nodot}/site-packages',
'platlib': '{userbase}/Python{py_version_nodot}/site-packages',
'include': '{userbase}/Python{py_version_nodot}/Include',
'scripts': '{userbase}/Scripts',
'data': '{userbase}',
},
'posix_user': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data': '{userbase}',
},
'osx_framework_user': {
'stdlib': '{userbase}/lib/python',
'platstdlib': '{userbase}/lib/python',
'purelib': '{userbase}/lib/python/site-packages',
'platlib': '{userbase}/lib/python/site-packages',
'include': '{userbase}/include',
'scripts': '{userbase}/bin',
'data': '{userbase}',
},
}
_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
'scripts', 'data')
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_BASE_PREFIX = os.path.normpath(sys.base_prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# set for cross builds
if "_PYTHON_PROJECT_BASE" in os.environ:
_PROJECT_BASE = _safe_realpath(os.environ["_PYTHON_PROJECT_BASE"])
def _is_python_source_dir(d):
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(d, "Modules", fn)):
return True
return False
_sys_home = getattr(sys, '_home', None)
if _sys_home and os.name == 'nt' and \
_sys_home.lower().endswith(('pcbuild', 'pcbuild\\amd64')):
_sys_home = os.path.dirname(_sys_home)
if _sys_home.endswith('pcbuild'): # must be amd64
_sys_home = os.path.dirname(_sys_home)
def is_python_build(check_home=False):
if check_home and _sys_home:
return _is_python_source_dir(_sys_home)
return _is_python_source_dir(_PROJECT_BASE)
_PYTHON_BUILD = is_python_build(True)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_INSTALL_SCHEMES[scheme]['include'] = '{srcdir}/Include'
_INSTALL_SCHEMES[scheme]['platinclude'] = '{projectbase}/.'
def _subst_vars(s, local_vars):
try:
return s.format(**local_vars)
except KeyError:
try:
return s.format(**os.environ)
except KeyError as var:
raise AttributeError('{%s}' % var)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _INSTALL_SCHEMES[scheme].items():
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
import re
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with open(filename, errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m1 = _findvar1_rx.search(value)
m2 = _findvar2_rx.search(value)
if m1 and m2:
m = m1 if m1.start() < m2.start() else m2
else:
m = m1 if m1 else m2
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if name.startswith('PY_') \
and name[3:] in renamed_variables:
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_sys_home or _PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
if hasattr(sys.implementation, '_multiarch'):
config_dir_name += '-%s' % sys.implementation._multiarch
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _generate_posix_vars():
"""Generate the Python module containing build-time variables."""
import pprint
vars = {}
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except OSError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise OSError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except OSError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise OSError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['BLDSHARED'] = vars['LDSHARED']
# There's a chicken-and-egg situation on OS X with regards to the
# _sysconfigdata module after the changes introduced by #15298:
# get_config_vars() is called by get_platform() as part of the
# `make pybuilddir.txt` target -- which is a precursor to the
# _sysconfigdata.py module being constructed. Unfortunately,
# get_config_vars() eventually calls _init_posix(), which attempts
# to import _sysconfigdata, which we won't have built yet. In order
# for _init_posix() to work, if we're on Darwin, just mock up the
# _sysconfigdata module manually and populate it with the build vars.
# This is more than sufficient for ensuring the subsequent call to
# get_platform() succeeds.
name = '_sysconfigdata'
if 'darwin' in sys.platform:
import types
module = types.ModuleType(name)
module.build_time_vars = vars
sys.modules[name] = module
pybuilddir = 'build/lib.%s-%s' % (get_platform(), sys.version[:3])
if hasattr(sys, "gettotalrefcount"):
pybuilddir += '-pydebug'
os.makedirs(pybuilddir, exist_ok=True)
destfile = os.path.join(pybuilddir, name + '.py')
with open(destfile, 'w', encoding='utf8') as f:
f.write('# system configuration generated and used by'
' the sysconfig module\n')
f.write('build_time_vars = ')
pprint.pprint(vars, stream=f)
# Create file used for sys.path fixup -- see Modules/getpath.c
with open('pybuilddir.txt', 'w', encoding='ascii') as f:
f.write(pybuilddir)
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# _sysconfigdata is generated at build time, see _generate_posix_vars()
from _sysconfigdata import build_time_vars
vars.update(build_time_vars)
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['EXT_SUFFIX'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
import re
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_sys_home or _PROJECT_BASE, "PC")
else:
inc_dir = _sys_home or _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_INSTALL_SCHEMES))
def get_path_names():
"""Return a tuple containing the paths names."""
return _SCHEME_KEYS
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
if expand:
return _expand_vars(scheme, vars)
else:
return _INSTALL_SCHEMES[scheme]
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['installed_base'] = _BASE_PREFIX
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['installed_platbase'] = _BASE_EXEC_PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name == 'nt':
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# For backward compatibility, see issue19555
SO = _CONFIG_VARS.get('EXT_SUFFIX')
if SO is not None:
_CONFIG_VARS['SO'] = SO
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
_CONFIG_VARS['userbase'] = _getuserbase()
multiarch = get_config_var('MULTIARCH')
if multiarch:
_CONFIG_VARS['multiarchsubdir'] = '/' + multiarch
else:
_CONFIG_VARS['multiarchsubdir'] = ''
# Always convert srcdir to an absolute path
srcdir = _CONFIG_VARS.get('srcdir', _PROJECT_BASE)
if os.name == 'posix':
if _PYTHON_BUILD:
# If srcdir is a relative path (typically '.' or '..')
# then it should be interpreted relative to the directory
# containing Makefile.
base = os.path.dirname(get_makefile_filename())
srcdir = os.path.join(base, srcdir)
else:
# srcdir is not meaningful since the installation is
# spread about the filesystem. We choose the
# directory containing the Makefile since we know it
# exists.
srcdir = os.path.dirname(get_makefile_filename())
_CONFIG_VARS['srcdir'] = _safe_realpath(srcdir)
# OS X platforms require special customization to handle
# multi-architecture, multi-os-version installers
if sys.platform == 'darwin':
import _osx_support
_osx_support.customize_config_vars(_CONFIG_VARS)
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
if name == 'SO':
import warnings
warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha
return sys.platform
# Set for cross builds explicitly
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxsize]
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
import re
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
import _osx_support
osname, release, machine = _osx_support.get_platform_osx(
get_config_vars(),
osname, release, machine)
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
if '--generate-posix-vars' in sys.argv:
_generate_posix_vars()
return
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
|
|
#!/usr/bin/python
from simple_salesforce import Salesforce
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from pyspark import Row
from pyspark.sql.types import *
#from pyspark.sql.functions import *
from optparse import OptionParser
from pyspark.sql import DataFrameWriter
import json
import re
import os
from datetime import datetime
# *** SPARK-ETL packages
import util
import udf_spark_etl
def main(sc, sqlContext, properties_file, spark_etl_logger):
""" This is main data extraction functionality
Data is extracted from SFDC and loaded into Spark SQL temp tables
"""
startTime = datetime.now()
# Enable logging
spark_etl_logger.info("***** Main process execution started at: "+str(startTime))
# Get app environment variables
d_app_variables = util.get_app_variables()
spark_etl_logger.info("Application environment variables: %s" %(d_app_variables))
spark_etl_logger.info("Processing Spark ETL properties file: %s" %(properties_file))
##### Get table properties defined in respective table ETL config file ####
# Store table properties in local dictionary for servicing the script
#No need to pass SPARK_ETL_CONF_DIR variable as driver script passes file with absolute path
dict_tbl_properties = util.get_json_config('', properties_file)
##### Defined SOQL statement takes precedence over list of source columns #####
##### SOQL statement will be proccessed and related metadata will be extracted from it
if len(dict_tbl_properties["soql_query"]) > 0:
# Process SOQL query if it is defined in config file
soqlStmt = dict_tbl_properties["soql_query"]
spark_etl_logger.info("Defined SOQL statement: "+ soqlStmt)
# Process list of fields and define schema for creating RDD
schemaCol = re.findall('SELECT\s(.+)\sFROM', dict_tbl_properties["soql_query"], flags=re.IGNORECASE)[0]
spark_etl_logger.info("Columns extracted from SOQL: " + schemaCol)
# Removing extra whitespaces from string elements while converting
schemaList = [rec.strip() for rec in schemaCol.split(',')]
# Convert column names into StructType for RDD
fields = [StructField(field_name, StringType(), True) for field_name in schemaList]
schema = StructType(fields)
# Define source table name - extract from SOQL Query
src_tbl_name = re.findall("FROM\s(\S+)", soqlStmt, flags=re.IGNORECASE)[0]
spark_etl_logger.info("Source table name: " + src_tbl_name)
# Define target table name
tgt_table_name = dict_tbl_properties["tgt_table"]
spark_etl_logger.info("Target table name: " + tgt_table_name)
else:
spark_etl_logger.info("SOQL statement is not defined, will process src_table and src_columns properties")
# Constructing SOQL statement from properties provided, converting list to str
soqlStmt = "SELECT " + ', '.join(dict_tbl_properties["src_columns"]) \
+ " FROM " \
+ dict_tbl_properties["src_table"] \
+ " " + dict_tbl_properties["where"] \
+ " " + dict_tbl_properties["limit"]
spark_etl_logger.info("Constructed SOQL statement: %s" %(soqlStmt))
# Process list of fields and define schema for creating RDD
schemaList = dict_tbl_properties["src_columns"]
spark_etl_logger.info("Schema from config file: %s" %(schemaList))
fields = [StructField(field_name, StringType(), True) for field_name in schemaList]
schema = StructType(fields)
# Define source table name
src_tbl_name = dict_tbl_properties["src_table"]
spark_etl_logger.info("Source table name: " + src_tbl_name)
# Define target table name for load into target data storage of your choice
tgt_table_name = dict_tbl_properties["tgt_table"]
spark_etl_logger.info("Target table name: ",tgt_table_name)
################### End process table properties defined in table ETL config file ##################
# Get Salesforce connection details from connections json file
spark_etl_logger.info("Processing SFDC connections information file sfdc_connections.json")
d_sfdc_conn = util.get_json_config(d_app_variables['SPARK_ETL_CONN_DIR'], "sfdc_connections.json")
spark_etl_logger.info("SFDC Connections: %s" %(list(d_sfdc_conn.keys())))
# Process SFDC Connection details
spark_etl_logger.info("SFDC Connection details: %s" %(d_sfdc_conn[dict_tbl_properties["sfdc_connection"]]))
# Establish connection to Salesforce. Using Simple-Salesforce package
exec("sf=" + util.get_sfdc_conn(**d_sfdc_conn[dict_tbl_properties["sfdc_connection"]]), globals())
###### Retrieve source table properties - use it to define target table DDL ####
#
# Store object description in list of dictionaries
# This structure returned by Simple-Salesforce
exec("tblDesc = sf."+src_tbl_name+".describe()", globals())
lColProperties = ['name', 'type', 'length', 'precision', 'custom', 'scale']
columnProperties = list()
for line in tblDesc['fields']: # Iterate through the list of dictionaries
# Keep only needed properties listed in lColProperties list and
# columns mapped in config properties file and remove the rest
rec = {k:line[k] for k in (lColProperties) if line["name"] in list(dict_tbl_properties["columns_map"].keys())}
if len(rec) == 0:continue
columnProperties.append(rec)
spark_etl_logger.info("Column properties: %s" %(rec))
# Record table properties in json file
with open(os.path.join(d_app_variables['SPARK_ETL_LOG_DIR'],tgt_table_name+"_schema.json"), "w") as tableMetadata_file:
json.dump(columnProperties, tableMetadata_file)
# Build DDL in order to create table in MySQL db
for record in columnProperties:
spark_etl_logger.info("Column MySQL datatype: " + record["name"]+" Type:"+record["type"]+" New: "+util.get_sfdc_mysql_dt(record["type"], str(record["length"]), str(record["precision"]), str(record["scale"])))
#*********************** Start Data Acquisition **************************#
#
# Extract data from SFDC - run SOQL statement.
# sf.query returns a list of OrderedDict
queryResultRaw = sf.query_all(soqlStmt)
#*********************** End Data Acquisition ****************************#
#********************* Clean up dataset *************************#
# Remove unrelated record metadata provided by SFDC
queryResult = list()
for line in queryResultRaw['records']:
rec=[(k,str(v)) for k, v in line.items() if k not in "attributes"]
queryResult.append(rec)
# Create RDD
v_rdd = sc.parallelize(queryResult)
rddElemCount = v_rdd.count()
spark_etl_logger.info("RDD was successfully created")
spark_etl_logger.info("Dataset contains: "+ str(rddElemCount) + " records")
# Create DataFrame from RDD
global sqlDataFrame, sqlDFPK
sqlDataFrame = v_rdd.map(lambda l: Row(**dict(l))).toDF()
spark_etl_logger.info("Generating PK")
sqlDFPK = udf_spark_etl.generate_pk('WID', sqlDataFrame)
#sqlDFPK = sqlDataFrame.withColumn('WID', monotonicallyIncreasingId()+1)
spark_etl_logger.info("Done generating PK")
spark_etl_logger.info("Created dataframe with extracted data:: ")
sqlDFPK.printSchema()
sqlDFPK.show()
####################### UDF functions #########################
# Create UDFs
#
# logic to handle null values
slen = udf(lambda s: 0 if s is None else len(s), IntegerType())
StrConcat = udf(lambda s: "ADD_SOMETHING"+s, StringType())
####################### End UDF functions #########################
######################## Mapping columns ############################
# Create a dict out of column list in form
for k,v in sorted(dict_tbl_properties["columns_map"].items()):
spark_etl_logger.info("Column mapping: "+k+":"+v)
# Construct command for column mapping
wCol =''
v_dfSQL_col = ''
for k,v in sorted(dict_tbl_properties["columns_map"].items()):
#wCol = wCol + ".withColumn(\'"+v+"\' , "+dfColumnsOrig+"."+k+")"
wCol = wCol + ".withColumnRenamed(\'"+k+"\' , \'"+v+"\')"
v_dfSQL_col = v_dfSQL_col + "\""+v+"\","
dfSQL_col = v_dfSQL_col.rstrip(',')
spark_etl_logger.info("The following command will be executed: dfRemapped = sqlDFPK %s" %(wCol))
# exec(dfColumnsRenamed+" = "+dfColumnsOrig+wCol, globals())
exec("global dfRemapped; dfRemapped = sqlDFPK"+wCol, globals())
dfRemapped.printSchema()
dfRemapped.show()
######################## End mapping columns ########################
# Generate PK
# Sample
#df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
#df0.select(monotonicallyIncreasingId().alias('id')).collect()
#################### Register DataFrame as Temp Table for SQL operatoins ####################
spark_etl_logger.info("Registering remapped data frame as Spark SQL temp table")
dfRemapped.registerTempTable(tgt_table_name)
# Run SQL (returns RDD)
rddSQL = sqlContext.sql("SELECT * FROM "+ tgt_table_name)
# Write DataFrame into AWS S3 bucket
print("Serialize DF into S3")
# dfRemapped.repartition(1).write.save("s3n://hive-qs-data/"+tgt_table_name+".json", "json", )
# dfRemapped.write.mode('append').json("s3n://hive-qs-data/"+tgt_table_name)
# rddSQL.rdd.saveAsTextFile(tgt_table_name+".csv")
# dfRemapped.rdd.map(lambda rec: ",".join([str(col) for col in rec])).saveAsTextFile("s3n://hive-qs-data/"+tgt_table_name)
# dfRemapped.repartition(1).rdd.map(lambda rec: ",".join([str(col) for col in rec])).saveAsTextFile("s3n://hive-qs-data/"+tgt_table_name)
print("Done serialize DF into S3")
endTime = datetime.now()
spark_etl_logger.info("***** Main process execution completed at: " + str(endTime))
spark_etl_logger.info("***** Main process execution took: " + str(endTime - startTime))
|
|
#!/usr/bin/python2.7
""" Pi Garage Alert
Author: Richard L. Lynch <rich@richlynch.com>
Description: Emails, tweets, or sends an SMS if a garage door is left open
too long.
Learn more at http://www.richlynch.com/code/pi_garage_alert
"""
##############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2013-2014 Richard L. Lynch <rich@richlynch.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
##############################################################################
import time
from time import strftime
import subprocess
import re
import sys
import json
import logging
from datetime import timedelta
import smtplib
import ssl
import traceback
from email.mime.text import MIMEText
import requests
import tweepy
import RPi.GPIO as GPIO
import httplib2
import sleekxmpp
from sleekxmpp.xmlstream import resolver, cert
from twilio.rest import TwilioRestClient
from twilio.rest.exceptions import TwilioRestException
import redis
sys.path.append('/usr/local/etc')
import pi_garage_alert_config as cfg
##############################################################################
# Jabber support
##############################################################################
# SleekXMPP requires UTF-8
if sys.version_info < (3, 0):
# pylint: disable=no-member
reload(sys)
sys.setdefaultencoding('utf8')
class Jabber(sleekxmpp.ClientXMPP):
"""Interfaces with a Jabber instant messaging service"""
def __init__(self, door_states, time_of_last_state_change):
self.logger = logging.getLogger(__name__)
self.connected = False
# Save references to door states for status queries
self.door_states = door_states
self.time_of_last_state_change = time_of_last_state_change
if not hasattr(cfg, 'JABBER_ID'):
self.logger.debug("Jabber ID not defined - Jabber support disabled")
return
if cfg.JABBER_ID == '':
self.logger.debug("Jabber ID not configured - Jabber support disabled")
return
self.logger.info("Signing into Jabber as %s", cfg.JABBER_ID)
sleekxmpp.ClientXMPP.__init__(self, cfg.JABBER_ID, cfg.JABBER_PASSWORD)
# Register event handlers
self.add_event_handler("session_start", self.handle_session_start)
self.add_event_handler("message", self.handle_message)
self.add_event_handler("ssl_invalid_cert", self.ssl_invalid_cert)
# ctrl-c processing
self.use_signals()
# Setup plugins. Order does not matter.
self.register_plugin('xep_0030') # Service Discovery
self.register_plugin('xep_0004') # Data Forms
self.register_plugin('xep_0060') # PubSub
self.register_plugin('xep_0199') # XMPP Ping
# If you are working with an OpenFire server, you may need
# to adjust the SSL version used:
# self.ssl_version = ssl.PROTOCOL_SSLv3
# Connect to the XMPP server and start processing XMPP stanzas.
# This will block if the network is down.
if hasattr(cfg, 'JABBER_SERVER') and hasattr(cfg, 'JABBER_PORT'):
# Config file overrode the default server and port
if not self.connect((cfg.JABBER_SERVER, cfg.JABBER_PORT)): # pylint: disable=no-member
return
else:
# Use default server and port from DNS SRV records
if not self.connect():
return
# Start up Jabber threads and return
self.process(block=False)
self.connected = True
def ssl_invalid_cert(self, raw_cert):
"""Handle an invalid certificate from the Jabber server
This may happen if the domain is using Google Apps
for their XMPP server and the XMPP server."""
hosts = resolver.get_SRV(self.boundjid.server, 5222,
'xmpp-client',
resolver=resolver.default_resolver())
domain_uses_google = False
for host, _ in hosts:
if host.lower()[-10:] == 'google.com':
domain_uses_google = True
if domain_uses_google:
try:
if cert.verify('talk.google.com', ssl.PEM_cert_to_DER_cert(raw_cert)):
logging.debug('Google certificate found for %s', self.boundjid.server)
return
except cert.CertificateError:
pass
logging.error("Invalid certificate received for %s", self.boundjid.server)
self.disconnect()
def handle_session_start(self, event):
"""Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Args:
event: An empty dictionary. The session_start
event does not provide any additional
data.
"""
# pylint: disable=unused-argument
self.send_presence()
self.get_roster()
def handle_message(self, msg):
"""Process incoming message stanzas.
Args:
msg: Received message stanza
"""
self.logger.info("Jabber from %s (%s): %s", msg['from'].bare, msg['type'], msg['body'])
# Only handle one-to-one conversations, and only if authorized
# users have been defined
if msg['type'] in ('chat', 'normal') and hasattr(cfg, 'JABBER_AUTHORIZED_IDS'):
# Check if user is authorized
if msg['from'].bare in cfg.JABBER_AUTHORIZED_IDS:
if msg['body'].lower() == 'status':
# Generate status report
states = []
for door in cfg.GARAGE_DOORS:
name = door['name']
state = self.door_states[name]
how_long = time.time() - self.time_of_last_state_change[name]
states.append("%s: %s (%s)" % (name, state, format_duration(how_long)))
response = ' / '.join(states)
else:
# Invalid command received
response = "I don't understand that command. Valid commands are: status"
self.logger.info("Replied to %s: %s", msg['from'], response)
msg.reply(response).send()
else:
self.logger.info("Ignored unauthorized user: %s", msg['from'].bare)
def send_msg(self, recipient, msg):
"""Send jabber message to specified recipient"""
if not self.connected:
self.logger.error("Unable to connect to Jabber - unable to send jabber message!")
return
self.logger.info("Sending Jabber message to %s: %s", recipient, msg)
self.send_message(mto=recipient, mbody=msg)
def terminate(self):
"""Terminate all jabber threads"""
if self.connected:
self.disconnect()
##############################################################################
# Twilio support
##############################################################################
class Twilio(object):
"""Class to connect to and send SMS using Twilio"""
def __init__(self):
self.twilio_client = None
self.logger = logging.getLogger(__name__)
def send_sms(self, recipient, msg):
"""Sends SMS message to specified phone number using Twilio.
Args:
recipient: Phone number to send SMS to.
msg: Message to send. Long messages will automatically be truncated.
"""
# User may not have configured twilio - don't initialize it until it's
# first used
if self.twilio_client is None:
self.logger.info("Initializing Twilio")
if cfg.TWILIO_ACCOUNT == '' or cfg.TWILIO_TOKEN == '':
self.logger.error("Twilio account or token not specified - unable to send SMS!")
else:
self.twilio_client = TwilioRestClient(cfg.TWILIO_ACCOUNT, cfg.TWILIO_TOKEN)
if self.twilio_client != None:
self.logger.info("Sending SMS to %s: %s", recipient, msg)
try:
self.twilio_client.sms.messages.create(
to=recipient,
from_=cfg.TWILIO_PHONE_NUMBER,
body=truncate(msg, 140))
except TwilioRestException as ex:
self.logger.error("Unable to send SMS: %s", ex)
except httplib2.ServerNotFoundError as ex:
self.logger.error("Unable to send SMS - internet connectivity issues: %s", ex)
except:
self.logger.error("Exception sending SMS: %s", sys.exc_info()[0])
##############################################################################
# Twitter support
##############################################################################
class Twitter(object):
"""Class to connect to and send DMs/update status on Twitter"""
def __init__(self):
self.twitter_api = None
self.logger = logging.getLogger(__name__)
def connect(self):
"""Initialize Twitter API object.
Args:
None
"""
# User may not have configured twitter - don't initialize it until it's
# first used
if self.twitter_api is None:
self.logger.info("Initializing Twitter")
if cfg.TWITTER_CONSUMER_KEY == '' or cfg.TWITTER_CONSUMER_SECRET == '':
self.logger.error("Twitter consumer key/secret not specified - unable to Tweet!")
elif cfg.TWITTER_ACCESS_KEY == '' or cfg.TWITTER_ACCESS_SECRET == '':
self.logger.error("Twitter access key/secret not specified - unable to Tweet!")
else:
auth = tweepy.OAuthHandler(cfg.TWITTER_CONSUMER_KEY, cfg.TWITTER_CONSUMER_SECRET)
auth.set_access_token(cfg.TWITTER_ACCESS_KEY, cfg.TWITTER_ACCESS_SECRET)
self.twitter_api = tweepy.API(auth)
def direct_msg(self, user, msg):
"""Send direct message to specified Twitter user.
Args:
user: User to send DM to.
msg: Message to send. Long messages will automatically be truncated.
"""
self.connect()
if self.twitter_api != None:
# Twitter doesn't like the same msg sent over and over, so add a timestamp
msg = strftime("%Y-%m-%d %H:%M:%S: ") + msg
self.logger.info("Sending twitter DM to %s: %s", user, msg)
try:
self.twitter_api.send_direct_message(user=user, text=truncate(msg, 140))
except tweepy.error.TweepError as ex:
self.logger.error("Unable to send Tweet: %s", ex)
def update_status(self, msg):
"""Update the users's status
Args:
msg: New status to set. Long messages will automatically be truncated.
"""
self.connect()
if self.twitter_api != None:
# Twitter doesn't like the same msg sent over and over, so add a timestamp
msg = strftime("%Y-%m-%d %H:%M:%S: ") + msg
self.logger.info("Updating Twitter status to: %s", msg)
try:
self.twitter_api.update_status(status=truncate(msg, 140))
except tweepy.error.TweepError as ex:
self.logger.error("Unable to update Twitter status: %s", ex)
##############################################################################
# Email support
##############################################################################
class Email(object):
"""Class to send emails"""
def __init__(self):
self.logger = logging.getLogger(__name__)
def send_email(self, recipient, subject, msg):
"""Sends an email to the specified email address.
Args:
recipient: Email address to send to.
subject: Email subject.
msg: Body of email to send.
"""
self.logger.info("Sending email to %s: subject = \"%s\", message = \"%s\"", recipient, subject, msg)
msg = MIMEText(msg)
msg['Subject'] = subject
msg['To'] = recipient
msg['From'] = cfg.EMAIL_FROM
try:
mail = smtplib.SMTP(cfg.SMTP_SERVER, cfg.SMTP_PORT)
if cfg.SMTP_USER != '' and cfg.SMTP_PASS != '':
mail.login(cfg.SMTP_USER, cfg.SMTP_PASS)
mail.sendmail(cfg.EMAIL_FROM, recipient, msg.as_string())
mail.quit()
except:
self.logger.error("Exception sending email: %s", sys.exc_info()[0])
##############################################################################
# Pushbullet support
##############################################################################
class Pushbullet(object):
"""Class to send Pushbullet notes"""
def __init__(self):
self.logger = logging.getLogger(__name__)
def send_note(self, access_token, title, body):
"""Sends a note to the specified access token.
Args:
access_token: Access token of the Pushbullet account to send to.
title: Note title
body: Body of the note to send
"""
self.logger.info("Sending Pushbullet note to %s: title = \"%s\", body = \"%s\"", access_token, title, body)
headers = {'Content-type': 'application/json'}
payload = {'type': 'note', 'title': title, 'body': body}
try:
session = requests.Session()
session.auth = (access_token, "")
session.headers.update(headers)
session.post("https://api.pushbullet.com/v2/pushes", data=json.dumps(payload))
except:
self.logger.error("Exception sending note: %s", sys.exc_info()[0])
##############################################################################
# Google Cloud Messaging support
##############################################################################
class GoogleCloudMessaging(object):
"""Class to send GCM notifications"""
def __init__(self):
self.logger = logging.getLogger(__name__)
def send_push(self, state, body):
"""Sends a push notification to the specified topic.
Args:
state: Garage door state as string ("0"|"1")
body: Body of the note to send
"""
status = "1" if state == 'open' else "0"
self.logger.info("Sending GCM push to %s: status = \"%s\", body = \"%s\"", cfg.GCM_TOPIC, status, body)
auth_header = "key=" + cfg.GCM_KEY
headers = {'Content-type': 'application/json', 'Authorization': auth_header}
payload = {'to': cfg.GCM_TOPIC, 'data': {'message': body, 'status': status}}
try:
session = requests.Session()
session.headers.update(headers)
session.post("https://gcm-http.googleapis.com/gcm/send", data=json.dumps(payload))
except:
self.logger.error("Exception sending push: %s", sys.exc_info()[0])
##############################################################################
# Sensor support
##############################################################################
def get_garage_door_state(pin):
"""Returns the state of the garage door on the specified pin as a string
Args:
pin: GPIO pin number.
"""
if GPIO.input(pin): # pylint: disable=no-member
state = 'open'
else:
state = 'closed'
return state
def get_uptime():
"""Returns the uptime of the RPi as a string
"""
with open('/proc/uptime', 'r') as uptime_file:
uptime_seconds = int(float(uptime_file.readline().split()[0]))
uptime_string = str(timedelta(seconds=uptime_seconds))
return uptime_string
def get_gpu_temp():
"""Return the GPU temperature as a Celsius float
"""
cmd = ['vcgencmd', 'measure_temp']
measure_temp_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = measure_temp_proc.communicate()[0]
gpu_temp = 'unknown'
gpu_search = re.search('([0-9.]+)', output)
if gpu_search:
gpu_temp = gpu_search.group(1)
return float(gpu_temp)
def get_cpu_temp():
"""Return the CPU temperature as a Celsius float
"""
cpu_temp = 'unknown'
with open("/sys/class/thermal/thermal_zone0/temp", "r") as temp_file:
cpu_temp = float(temp_file.read()) / 1000.0
return cpu_temp
def rpi_status():
"""Return string summarizing RPi status
"""
return "CPU temp: %.1f, GPU temp: %.1f, Uptime: %s" % (get_gpu_temp(), get_cpu_temp(), get_uptime())
##############################################################################
# Logging and alerts
##############################################################################
def send_alerts(logger, alert_senders, recipients, subject, msg, state, id):
"""Send subject and msg to specified recipients
Args:
recipients: An array of strings of the form type:address
subject: Subject of the alert
msg: Body of the alert
state: The state of the door
"""
# Redis
if cfg.REDIS_SERVER_URL != '':
redis_server = redis.Redis(
host=cfg.REDIS_SERVER_URL,
port=cfg.REDIS_SERVER_PORT,
password=cfg.REDIS_SERVER_PASSWORD)
redis_server.set("garage_door_%s" % id, state)
for recipient in recipients:
if recipient[:6] == 'email:':
alert_senders['Email'].send_email(recipient[6:], subject, msg)
elif recipient[:11] == 'twitter_dm:':
alert_senders['Twitter'].direct_msg(recipient[11:], msg)
elif recipient == 'tweet':
alert_senders['Twitter'].update_status(msg)
elif recipient[:4] == 'sms:':
alert_senders['Twilio'].send_sms(recipient[4:], msg)
elif recipient[:7] == 'jabber:':
alert_senders['Jabber'].send_msg(recipient[7:], msg)
elif recipient[:11] == 'pushbullet:':
alert_senders['Pushbullet'].send_note(recipient[11:], subject, msg)
elif recipient == 'gcm':
alert_senders['Gcm'].send_push(state, msg)
else:
logger.error("Unrecognized recipient type: %s", recipient)
##############################################################################
# Misc support
##############################################################################
def truncate(input_str, length):
"""Truncate string to specified length
Args:
input_str: String to truncate
length: Maximum length of output string
"""
if len(input_str) < (length - 3):
return input_str
return input_str[:(length - 3)] + '...'
def format_duration(duration_sec):
"""Format a duration into a human friendly string"""
days, remainder = divmod(duration_sec, 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
ret = ''
if days > 1:
ret += "%d days " % (days)
elif days == 1:
ret += "%d day " % (days)
if hours > 1:
ret += "%d hours " % (hours)
elif hours == 1:
ret += "%d hour " % (hours)
if minutes > 1:
ret += "%d minutes" % (minutes)
if minutes == 1:
ret += "%d minute" % (minutes)
if ret == '':
ret += "%d seconds" % (seconds)
return ret
##############################################################################
# Main functionality
##############################################################################
class PiGarageAlert(object):
"""Class with main function of Pi Garage Alert"""
def __init__(self):
self.logger = logging.getLogger(__name__)
def main(self):
"""Main functionality
"""
try:
# Set up logging
log_fmt = '%(asctime)-15s %(levelname)-8s %(message)s'
log_level = logging.INFO
if sys.stdout.isatty():
# Connected to a real terminal - log to stdout
logging.basicConfig(format=log_fmt, level=log_level)
else:
# Background mode - log to file
logging.basicConfig(format=log_fmt, level=log_level, filename=cfg.LOG_FILENAME)
# Banner
self.logger.info("==========================================================")
self.logger.info("Pi Garage Alert starting")
# Use Raspberry Pi board pin numbers
self.logger.info("Configuring global settings")
GPIO.setmode(GPIO.BOARD)
# Configure the sensor pins as inputs with pull up resistors
for door in cfg.GARAGE_DOORS:
self.logger.info("Configuring pin %d for \"%s\"", door['pin'], door['name'])
GPIO.setup(door['pin'], GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Last state of each garage door
door_states = dict()
# time.time() of the last time the garage door changed state
time_of_last_state_change = dict()
# Index of the next alert to send for each garage door
alert_states = dict()
# Create alert sending objects
alert_senders = {
"Jabber": Jabber(door_states, time_of_last_state_change),
"Twitter": Twitter(),
"Twilio": Twilio(),
"Email": Email(),
"Pushbullet": Pushbullet(),
"Gcm": GoogleCloudMessaging()
}
# Read initial states
for door in cfg.GARAGE_DOORS:
name = door['name']
state = get_garage_door_state(door['pin'])
door_states[name] = state
time_of_last_state_change[name] = time.time()
alert_states[name] = 0
self.logger.info("Initial state of \"%s\" is %s", name, state)
status_report_countdown = 5
while True:
for door in cfg.GARAGE_DOORS:
index = cfg.GARAGE_DOORS.index(door)
name = door['name']
state = get_garage_door_state(door['pin'])
time_in_state = time.time() - time_of_last_state_change[name]
# Check if the door has changed state
if door_states[name] != state:
door_states[name] = state
time_of_last_state_change[name] = time.time()
self.logger.info("State of \"%s\" changed to %s after %.0f sec", name, state, time_in_state)
# Reset alert when door changes state
if alert_states[name] > 0:
# Use the recipients of the last alert
recipients = door['alerts'][alert_states[name] - 1]['recipients']
send_alerts(self.logger, alert_senders, recipients, name, "%s is now %s" % (name, state), state, index)
alert_states[name] = 0
# Reset time_in_state
time_in_state = 0
# See if there are more alerts
if len(door['alerts']) > alert_states[name]:
# Get info about alert
alert = door['alerts'][alert_states[name]]
# Has the time elapsed and is this the state to trigger the alert?
if time_in_state > alert['time'] and state == alert['state']:
send_alerts(self.logger, alert_senders, alert['recipients'], name, "%s has been %s for %d seconds!" % (name, state, time_in_state), state, index)
alert_states[name] += 1
# Periodically log the status for debug and ensuring RPi doesn't get too hot
status_report_countdown -= 1
if status_report_countdown <= 0:
status_msg = rpi_status()
for name in door_states:
status_msg += ", %s: %s/%d/%d" % (name, door_states[name], alert_states[name], (time.time() - time_of_last_state_change[name]))
self.logger.info(status_msg)
status_report_countdown = 600
# Poll every 1 second
time.sleep(1)
except KeyboardInterrupt:
logging.critical("Terminating due to keyboard interrupt")
except:
logging.critical("Terminating due to unexpected error: %s", sys.exc_info()[0])
logging.critical("%s", traceback.format_exc())
GPIO.cleanup() # pylint: disable=no-member
alert_senders['Jabber'].terminate()
if __name__ == "__main__":
PiGarageAlert().main()
|
|
"""Middleware to handle forwarded data by a reverse proxy."""
from __future__ import annotations
from collections.abc import Awaitable, Callable
from ipaddress import IPv4Network, IPv6Network, ip_address
import logging
from types import ModuleType
from typing import Literal
from aiohttp.hdrs import X_FORWARDED_FOR, X_FORWARDED_HOST, X_FORWARDED_PROTO
from aiohttp.web import Application, HTTPBadRequest, Request, StreamResponse, middleware
from homeassistant.core import callback
_LOGGER = logging.getLogger(__name__)
@callback
def async_setup_forwarded(
app: Application,
use_x_forwarded_for: bool | None,
trusted_proxies: list[IPv4Network | IPv6Network],
) -> None:
"""Create forwarded middleware for the app.
Process IP addresses, proto and host information in the forwarded for headers.
`X-Forwarded-For: <client>, <proxy1>, <proxy2>`
e.g., `X-Forwarded-For: 203.0.113.195, 70.41.3.18, 150.172.238.178`
We go through the list from the right side, and skip all entries that are in our
trusted proxies list. The first non-trusted IP is used as the client IP. If all
items in the X-Forwarded-For are trusted, including the most left item (client),
the most left item is used. In the latter case, the client connection originated
from an IP that is also listed as a trusted proxy IP or network.
`X-Forwarded-Proto: <client>, <proxy1>, <proxy2>`
e.g., `X-Forwarded-Proto: https, http, http`
OR `X-Forwarded-Proto: https` (one entry, even with multiple proxies)
The X-Forwarded-Proto is determined based on the corresponding entry of the
X-Forwarded-For header that is used/chosen as the client IP. However,
some proxies, for example, Kubernetes NGINX ingress, only retain one element
in the X-Forwarded-Proto header. In that case, we'll just use what we have.
`X-Forwarded-Host: <host>`
e.g., `X-Forwarded-Host: example.com`
If the previous headers are processed successfully, and the X-Forwarded-Host is
present, it will be used.
Additionally:
- If no X-Forwarded-For header is found, the processing of all headers is skipped.
- Throw HTTP 400 status when untrusted connected peer provides
X-Forwarded-For headers.
- If multiple instances of X-Forwarded-For, X-Forwarded-Proto or
X-Forwarded-Host are found, an HTTP 400 status code is thrown.
- If malformed or invalid (IP) data in X-Forwarded-For header is found,
an HTTP 400 status code is thrown.
- The connected client peer on the socket of the incoming connection,
must be trusted for any processing to take place.
- If the number of elements in X-Forwarded-Proto does not equal 1 or
is equal to the number of elements in X-Forwarded-For, an HTTP 400
status code is thrown.
- If an empty X-Forwarded-Host is provided, an HTTP 400 status code is thrown.
- If an empty X-Forwarded-Proto is provided, or an empty element in the list,
an HTTP 400 status code is thrown.
"""
remote: Literal[False] | None | ModuleType = None
@middleware
async def forwarded_middleware(
request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
) -> StreamResponse:
"""Process forwarded data by a reverse proxy."""
nonlocal remote
if remote is None:
# Initialize remote method
try:
from hass_nabucasa import ( # pylint: disable=import-outside-toplevel
remote,
)
# venv users might have an old version installed if they don't have cloud around anymore
if not hasattr(remote, "is_cloud_request"):
remote = False
except ImportError:
remote = False
# Skip requests from Remote UI
if remote and remote.is_cloud_request.get():
return await handler(request)
# Handle X-Forwarded-For
forwarded_for_headers: list[str] = request.headers.getall(X_FORWARDED_FOR, [])
if not forwarded_for_headers:
# No forwarding headers, continue as normal
return await handler(request)
# Get connected IP
if (
request.transport is None
or request.transport.get_extra_info("peername") is None
):
# Connected IP isn't retrieveable from the request transport, continue
return await handler(request)
connected_ip = ip_address(request.transport.get_extra_info("peername")[0])
# We have X-Forwarded-For, but config does not agree
if not use_x_forwarded_for:
_LOGGER.error(
"A request from a reverse proxy was received from %s, but your "
"HTTP integration is not set-up for reverse proxies",
connected_ip,
)
raise HTTPBadRequest
# Ensure the IP of the connected peer is trusted
if not any(connected_ip in trusted_proxy for trusted_proxy in trusted_proxies):
_LOGGER.error(
"Received X-Forwarded-For header from an untrusted proxy %s",
connected_ip,
)
raise HTTPBadRequest
# Multiple X-Forwarded-For headers
if len(forwarded_for_headers) > 1:
_LOGGER.error(
"Too many headers for X-Forwarded-For: %s", forwarded_for_headers
)
raise HTTPBadRequest
# Process X-Forwarded-For from the right side (by reversing the list)
forwarded_for_split = list(reversed(forwarded_for_headers[0].split(",")))
try:
forwarded_for = [ip_address(addr.strip()) for addr in forwarded_for_split]
except ValueError as err:
_LOGGER.error(
"Invalid IP address in X-Forwarded-For: %s", forwarded_for_headers[0]
)
raise HTTPBadRequest from err
overrides: dict[str, str] = {}
# Find the last trusted index in the X-Forwarded-For list
forwarded_for_index = 0
for forwarded_ip in forwarded_for:
if any(forwarded_ip in trusted_proxy for trusted_proxy in trusted_proxies):
forwarded_for_index += 1
continue
overrides["remote"] = str(forwarded_ip)
break
else:
# If all the IP addresses are from trusted networks, take the left-most.
forwarded_for_index = -1
overrides["remote"] = str(forwarded_for[-1])
# Handle X-Forwarded-Proto
forwarded_proto_headers: list[str] = request.headers.getall(
X_FORWARDED_PROTO, []
)
if forwarded_proto_headers:
if len(forwarded_proto_headers) > 1:
_LOGGER.error(
"Too many headers for X-Forward-Proto: %s", forwarded_proto_headers
)
raise HTTPBadRequest
forwarded_proto_split = list(
reversed(forwarded_proto_headers[0].split(","))
)
forwarded_proto = [proto.strip() for proto in forwarded_proto_split]
# Catch empty values
if "" in forwarded_proto:
_LOGGER.error(
"Empty item received in X-Forward-Proto header: %s",
forwarded_proto_headers[0],
)
raise HTTPBadRequest
# The X-Forwarded-Proto contains either one element, or the equals number
# of elements as X-Forwarded-For
if len(forwarded_proto) not in (1, len(forwarded_for)):
_LOGGER.error(
"Incorrect number of elements in X-Forward-Proto. Expected 1 or %d, got %d: %s",
len(forwarded_for),
len(forwarded_proto),
forwarded_proto_headers[0],
)
raise HTTPBadRequest
# Ideally this should take the scheme corresponding to the entry
# in X-Forwarded-For that was chosen, but some proxies only retain
# one element. In that case, use what we have.
overrides["scheme"] = forwarded_proto[-1]
if len(forwarded_proto) != 1:
overrides["scheme"] = forwarded_proto[forwarded_for_index]
# Handle X-Forwarded-Host
forwarded_host_headers: list[str] = request.headers.getall(X_FORWARDED_HOST, [])
if forwarded_host_headers:
# Multiple X-Forwarded-Host headers
if len(forwarded_host_headers) > 1:
_LOGGER.error(
"Too many headers for X-Forwarded-Host: %s", forwarded_host_headers
)
raise HTTPBadRequest
forwarded_host = forwarded_host_headers[0].strip()
if not forwarded_host:
_LOGGER.error("Empty value received in X-Forward-Host header")
raise HTTPBadRequest
overrides["host"] = forwarded_host
# Done, create a new request based on gathered data.
request = request.clone(**overrides) # type: ignore[arg-type]
return await handler(request)
app.middlewares.append(forwarded_middleware)
|
|
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from nose.tools import assert_raises
import gevent
import gevent.local
import random
import md5
from zerorpc import zmq
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_resolve_endpoint():
test_endpoint = random_ipc_endpoint()
c = zerorpc.Context()
def resolve(endpoint):
if endpoint == 'titi':
return test_endpoint
return endpoint
cnt = c.register_middleware({
'resolve_endpoint': resolve
})
print 'registered_count:', cnt
assert cnt == 1
print 'resolve titi:', c.hook_resolve_endpoint('titi')
assert c.hook_resolve_endpoint('titi') == test_endpoint
print 'resolve toto:', c.hook_resolve_endpoint('toto')
assert c.hook_resolve_endpoint('toto') == 'toto'
class Resolver():
def resolve_endpoint(self, endpoint):
if endpoint == 'toto':
return test_endpoint
return endpoint
cnt = c.register_middleware(Resolver())
print 'registered_count:', cnt
assert cnt == 1
print 'resolve titi:', c.hook_resolve_endpoint('titi')
assert c.hook_resolve_endpoint('titi') == test_endpoint
print 'resolve toto:', c.hook_resolve_endpoint('toto')
assert c.hook_resolve_endpoint('toto') == test_endpoint
c2 = zerorpc.Context()
print 'resolve titi:', c2.hook_resolve_endpoint('titi')
assert c2.hook_resolve_endpoint('titi') == 'titi'
print 'resolve toto:', c2.hook_resolve_endpoint('toto')
assert c2.hook_resolve_endpoint('toto') == 'toto'
def test_resolve_endpoint_events():
test_endpoint = random_ipc_endpoint()
c = zerorpc.Context()
class Resolver():
def resolve_endpoint(self, endpoint):
if endpoint == 'some_service':
return test_endpoint
return endpoint
class Srv(zerorpc.Server):
def hello(self):
print 'heee'
return 'world'
srv = Srv(heartbeat=1, context=c)
with assert_raises(zmq.ZMQError):
srv.bind('some_service')
cnt = c.register_middleware(Resolver())
assert cnt == 1
srv.bind('some_service')
gevent.spawn(srv.run)
client = zerorpc.Client(heartbeat=1, context=c)
client.connect('some_service')
assert client.hello() == 'world'
client.close()
srv.close()
class Tracer:
'''Used by test_task_context_* tests'''
def __init__(self, identity):
self._identity = identity
self._locals = gevent.local.local()
self._log = []
@property
def trace_id(self):
return self._locals.__dict__.get('trace_id', None)
def load_task_context(self, event_header):
self._locals.trace_id = event_header.get('trace_id', None)
print self._identity, 'load_task_context', self.trace_id
self._log.append(('load', self.trace_id))
def get_task_context(self):
if self.trace_id is None:
# just an ugly code to generate a beautiful little hash.
self._locals.trace_id = '<{0}>'.format(md5.md5(
str(random.random())[3:]
).hexdigest()[0:6].upper())
print self._identity, 'get_task_context! [make a new one]', self.trace_id
self._log.append(('new', self.trace_id))
else:
print self._identity, 'get_task_context! [reuse]', self.trace_id
self._log.append(('reuse', self.trace_id))
return { 'trace_id': self.trace_id }
def test_task_context():
endpoint = random_ipc_endpoint()
srv_ctx = zerorpc.Context()
cli_ctx = zerorpc.Context()
srv_tracer = Tracer('[server]')
srv_ctx.register_middleware(srv_tracer)
cli_tracer = Tracer('[client]')
cli_ctx.register_middleware(cli_tracer)
class Srv:
def echo(self, msg):
return msg
@zerorpc.stream
def stream(self):
yield 42
srv = zerorpc.Server(Srv(), context=srv_ctx)
srv.bind(endpoint)
srv_task = gevent.spawn(srv.run)
c = zerorpc.Client(context=cli_ctx)
c.connect(endpoint)
assert c.echo('hello') == 'hello'
for x in c.stream():
assert x == 42
srv.stop()
srv_task.join()
assert cli_tracer._log == [
('new', cli_tracer.trace_id),
('reuse', cli_tracer.trace_id),
]
assert srv_tracer._log == [
('load', cli_tracer.trace_id),
('reuse', cli_tracer.trace_id),
('load', cli_tracer.trace_id),
('reuse', cli_tracer.trace_id),
]
def test_task_context_relay():
endpoint1 = random_ipc_endpoint()
endpoint2 = random_ipc_endpoint()
srv_ctx = zerorpc.Context()
srv_relay_ctx = zerorpc.Context()
cli_ctx = zerorpc.Context()
srv_tracer = Tracer('[server]')
srv_ctx.register_middleware(srv_tracer)
srv_relay_tracer = Tracer('[server_relay]')
srv_relay_ctx.register_middleware(srv_relay_tracer)
cli_tracer = Tracer('[client]')
cli_ctx.register_middleware(cli_tracer)
class Srv:
def echo(self, msg):
return msg
srv = zerorpc.Server(Srv(), context=srv_ctx)
srv.bind(endpoint1)
srv_task = gevent.spawn(srv.run)
c_relay = zerorpc.Client(context=srv_relay_ctx)
c_relay.connect(endpoint1)
class SrvRelay:
def echo(self, msg):
return c_relay.echo('relay' + msg) + 'relayed'
srv_relay = zerorpc.Server(SrvRelay(), context=srv_relay_ctx)
srv_relay.bind(endpoint2)
srv_relay_task = gevent.spawn(srv_relay.run)
c = zerorpc.Client(context=cli_ctx)
c.connect(endpoint2)
assert c.echo('hello') == 'relayhellorelayed'
srv_relay.stop()
srv.stop()
srv_relay_task.join()
srv_task.join()
assert cli_tracer._log == [
('new', cli_tracer.trace_id),
]
assert srv_relay_tracer._log == [
('load', cli_tracer.trace_id),
('reuse', cli_tracer.trace_id),
('reuse', cli_tracer.trace_id),
]
assert srv_tracer._log == [
('load', cli_tracer.trace_id),
('reuse', cli_tracer.trace_id),
]
def test_task_context_relay_fork():
endpoint1 = random_ipc_endpoint()
endpoint2 = random_ipc_endpoint()
srv_ctx = zerorpc.Context()
srv_relay_ctx = zerorpc.Context()
cli_ctx = zerorpc.Context()
srv_tracer = Tracer('[server]')
srv_ctx.register_middleware(srv_tracer)
srv_relay_tracer = Tracer('[server_relay]')
srv_relay_ctx.register_middleware(srv_relay_tracer)
cli_tracer = Tracer('[client]')
cli_ctx.register_middleware(cli_tracer)
class Srv:
def echo(self, msg):
return msg
srv = zerorpc.Server(Srv(), context=srv_ctx)
srv.bind(endpoint1)
srv_task = gevent.spawn(srv.run)
c_relay = zerorpc.Client(context=srv_relay_ctx)
c_relay.connect(endpoint1)
class SrvRelay:
def echo(self, msg):
def dothework(msg):
return c_relay.echo(msg) + 'relayed'
g = gevent.spawn(zerorpc.fork_task_context(dothework,
srv_relay_ctx), 'relay' + msg)
print 'relaying in separate task:', g
r = g.get()
print 'back to main task'
return r
srv_relay = zerorpc.Server(SrvRelay(), context=srv_relay_ctx)
srv_relay.bind(endpoint2)
srv_relay_task = gevent.spawn(srv_relay.run)
c = zerorpc.Client(context=cli_ctx)
c.connect(endpoint2)
assert c.echo('hello') == 'relayhellorelayed'
srv_relay.stop()
srv.stop()
srv_relay_task.join()
srv_task.join()
assert cli_tracer._log == [
('new', cli_tracer.trace_id),
]
assert srv_relay_tracer._log == [
('load', cli_tracer.trace_id),
('reuse', cli_tracer.trace_id),
('load', cli_tracer.trace_id),
('reuse', cli_tracer.trace_id),
('reuse', cli_tracer.trace_id),
]
assert srv_tracer._log == [
('load', cli_tracer.trace_id),
('reuse', cli_tracer.trace_id),
]
def test_task_context_pushpull():
endpoint = random_ipc_endpoint()
puller_ctx = zerorpc.Context()
pusher_ctx = zerorpc.Context()
puller_tracer = Tracer('[puller]')
puller_ctx.register_middleware(puller_tracer)
pusher_tracer = Tracer('[pusher]')
pusher_ctx.register_middleware(pusher_tracer)
trigger = gevent.event.Event()
class Puller:
def echo(self, msg):
trigger.set()
puller = zerorpc.Puller(Puller(), context=puller_ctx)
puller.bind(endpoint)
puller_task = gevent.spawn(puller.run)
c = zerorpc.Pusher(context=pusher_ctx)
c.connect(endpoint)
trigger.clear()
c.echo('hello')
trigger.wait()
puller.stop()
puller_task.join()
assert pusher_tracer._log == [
('new', pusher_tracer.trace_id),
]
assert puller_tracer._log == [
('load', pusher_tracer.trace_id),
]
def test_task_context_pubsub():
endpoint = random_ipc_endpoint()
subscriber_ctx = zerorpc.Context()
publisher_ctx = zerorpc.Context()
subscriber_tracer = Tracer('[subscriber]')
subscriber_ctx.register_middleware(subscriber_tracer)
publisher_tracer = Tracer('[publisher]')
publisher_ctx.register_middleware(publisher_tracer)
trigger = gevent.event.Event()
class Subscriber:
def echo(self, msg):
trigger.set()
subscriber = zerorpc.Subscriber(Subscriber(), context=subscriber_ctx)
subscriber.bind(endpoint)
subscriber_task = gevent.spawn(subscriber.run)
c = zerorpc.Publisher(context=publisher_ctx)
c.connect(endpoint)
trigger.clear()
# We need this retry logic to wait that the subscriber.run coroutine starts
# reading (the published messages will go to /dev/null until then).
for attempt in xrange(0, 10):
c.echo('pub...')
if trigger.wait(0.2):
break
subscriber.stop()
subscriber_task.join()
assert publisher_tracer._log == [
('new', publisher_tracer.trace_id),
]
assert subscriber_tracer._log == [
('load', publisher_tracer.trace_id),
]
class InspectExceptionMiddleware(Tracer):
def __init__(self, barrier=None):
self.called = False
self._barrier = barrier
Tracer.__init__(self, identity='[server]')
def server_inspect_exception(self, request_event, reply_event, task_context, exc_info):
assert 'trace_id' in task_context
assert request_event.name == 'echo'
if self._barrier: # Push/Pull
assert reply_event is None
else: # Req/Rep or Req/Stream
assert reply_event.name == 'ERR'
exc_type, exc_value, exc_traceback = exc_info
self.called = True
if self._barrier:
self._barrier.set()
class Srv(object):
def echo(self, msg):
raise RuntimeError(msg)
@zerorpc.stream
def echoes(self, msg):
raise RuntimeError(msg)
def test_server_inspect_exception_middleware():
endpoint = random_ipc_endpoint()
middleware = InspectExceptionMiddleware()
ctx = zerorpc.Context()
ctx.register_middleware(middleware)
module = Srv()
server = zerorpc.Server(module, context=ctx)
server.bind(endpoint)
gevent.spawn(server.run)
client = zerorpc.Client()
client.connect(endpoint)
try:
client.echo('This is a test which should call the InspectExceptionMiddleware')
except zerorpc.exceptions.RemoteError as ex:
assert ex.name == 'RuntimeError'
client.close()
server.close()
assert middleware.called is True
def test_server_inspect_exception_middleware_puller():
endpoint = random_ipc_endpoint()
barrier = gevent.event.Event()
middleware = InspectExceptionMiddleware(barrier)
ctx = zerorpc.Context()
ctx.register_middleware(middleware)
module = Srv()
server = zerorpc.Puller(module, context=ctx)
server.bind(endpoint)
gevent.spawn(server.run)
client = zerorpc.Pusher()
client.connect(endpoint)
barrier.clear()
client.echo('This is a test which should call the InspectExceptionMiddleware')
barrier.wait(timeout=2)
client.close()
server.close()
assert middleware.called is True
def test_server_inspect_exception_middleware_stream():
endpoint = random_ipc_endpoint()
middleware = InspectExceptionMiddleware()
ctx = zerorpc.Context()
ctx.register_middleware(middleware)
module = Srv()
server = zerorpc.Server(module, context=ctx)
server.bind(endpoint)
gevent.spawn(server.run)
client = zerorpc.Client()
client.connect(endpoint)
try:
client.echo('This is a test which should call the InspectExceptionMiddleware')
except zerorpc.exceptions.RemoteError as ex:
assert ex.name == 'RuntimeError'
client.close()
server.close()
assert middleware.called is True
|
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import mock
from oslotest import base as test_base
from oslo.utils import netutils
class NetworkUtilsTest(test_base.BaseTestCase):
def test_no_host(self):
result = netutils.urlsplit('http://')
self.assertEqual('', result.netloc)
self.assertEqual(None, result.port)
self.assertEqual(None, result.hostname)
self.assertEqual('http', result.scheme)
def test_parse_host_port(self):
self.assertEqual(('server01', 80),
netutils.parse_host_port('server01:80'))
self.assertEqual(('server01', None),
netutils.parse_host_port('server01'))
self.assertEqual(('server01', 1234),
netutils.parse_host_port('server01',
default_port=1234))
self.assertEqual(('::1', 80),
netutils.parse_host_port('[::1]:80'))
self.assertEqual(('::1', None),
netutils.parse_host_port('[::1]'))
self.assertEqual(('::1', 1234),
netutils.parse_host_port('[::1]',
default_port=1234))
self.assertEqual(('2001:db8:85a3::8a2e:370:7334', 1234),
netutils.parse_host_port(
'2001:db8:85a3::8a2e:370:7334',
default_port=1234))
def test_urlsplit(self):
result = netutils.urlsplit('rpc://myhost?someparam#somefragment')
self.assertEqual(result.scheme, 'rpc')
self.assertEqual(result.netloc, 'myhost')
self.assertEqual(result.path, '')
self.assertEqual(result.query, 'someparam')
self.assertEqual(result.fragment, 'somefragment')
result = netutils.urlsplit(
'rpc://myhost/mypath?someparam#somefragment',
allow_fragments=False)
self.assertEqual(result.scheme, 'rpc')
self.assertEqual(result.netloc, 'myhost')
self.assertEqual(result.path, '/mypath')
self.assertEqual(result.query, 'someparam#somefragment')
self.assertEqual(result.fragment, '')
result = netutils.urlsplit(
'rpc://user:pass@myhost/mypath?someparam#somefragment',
allow_fragments=False)
self.assertEqual(result.scheme, 'rpc')
self.assertEqual(result.netloc, 'user:pass@myhost')
self.assertEqual(result.path, '/mypath')
self.assertEqual(result.query, 'someparam#somefragment')
self.assertEqual(result.fragment, '')
def test_urlsplit_ipv6(self):
ipv6_url = 'http://[::1]:443/v2.0/'
result = netutils.urlsplit(ipv6_url)
self.assertEqual(result.scheme, 'http')
self.assertEqual(result.netloc, '[::1]:443')
self.assertEqual(result.path, '/v2.0/')
self.assertEqual(result.hostname, '::1')
self.assertEqual(result.port, 443)
ipv6_url = 'http://user:pass@[::1]/v2.0/'
result = netutils.urlsplit(ipv6_url)
self.assertEqual(result.scheme, 'http')
self.assertEqual(result.netloc, 'user:pass@[::1]')
self.assertEqual(result.path, '/v2.0/')
self.assertEqual(result.hostname, '::1')
self.assertEqual(result.port, None)
ipv6_url = 'https://[2001:db8:85a3::8a2e:370:7334]:1234/v2.0/xy?ab#12'
result = netutils.urlsplit(ipv6_url)
self.assertEqual(result.scheme, 'https')
self.assertEqual(result.netloc, '[2001:db8:85a3::8a2e:370:7334]:1234')
self.assertEqual(result.path, '/v2.0/xy')
self.assertEqual(result.hostname, '2001:db8:85a3::8a2e:370:7334')
self.assertEqual(result.port, 1234)
self.assertEqual(result.query, 'ab')
self.assertEqual(result.fragment, '12')
def test_urlsplit_params(self):
test_url = "http://localhost/?a=b&c=d"
result = netutils.urlsplit(test_url)
self.assertEqual({'a': 'b', 'c': 'd'}, result.params())
self.assertEqual({'a': 'b', 'c': 'd'}, result.params(collapse=False))
test_url = "http://localhost/?a=b&a=c&a=d"
result = netutils.urlsplit(test_url)
self.assertEqual({'a': 'd'}, result.params())
self.assertEqual({'a': ['b', 'c', 'd']}, result.params(collapse=False))
test_url = "http://localhost"
result = netutils.urlsplit(test_url)
self.assertEqual({}, result.params())
test_url = "http://localhost?"
result = netutils.urlsplit(test_url)
self.assertEqual({}, result.params())
def test_set_tcp_keepalive(self):
mock_sock = mock.Mock()
netutils.set_tcp_keepalive(mock_sock, True, 100, 10, 5)
calls = [
mock.call.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, True),
]
if hasattr(socket, 'TCP_KEEPIDLE'):
calls += [
mock.call.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE, 100)
]
if hasattr(socket, 'TCP_KEEPINTVL'):
calls += [
mock.call.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL, 10),
]
if hasattr(socket, 'TCP_KEEPCNT'):
calls += [
mock.call.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPCNT, 5)
]
mock_sock.assert_has_calls(calls)
mock_sock.reset_mock()
netutils.set_tcp_keepalive(mock_sock, False)
self.assertEqual(1, len(mock_sock.mock_calls))
def test_is_valid_ipv4(self):
self.assertTrue(netutils.is_valid_ipv4('42.42.42.42'))
self.assertFalse(netutils.is_valid_ipv4('-1.11.11.11'))
self.assertFalse(netutils.is_valid_ipv4(''))
def test_is_valid_ipv6(self):
self.assertTrue(netutils.is_valid_ipv6('::1'))
self.assertFalse(netutils.is_valid_ipv6(
'1fff::a88:85a3::172.31.128.1'))
self.assertFalse(netutils.is_valid_ipv6(''))
def test_is_valid_ip(self):
self.assertTrue(netutils.is_valid_ip('127.0.0.1'))
self.assertTrue(netutils.is_valid_ip('2001:db8::ff00:42:8329'))
self.assertFalse(netutils.is_valid_ip('256.0.0.0'))
self.assertFalse(netutils.is_valid_ip('::1.2.3.'))
self.assertFalse(netutils.is_valid_ip(''))
def test_valid_port(self):
valid_inputs = [1, '1', 2, '3', '5', 8, 13, 21,
'80', '3246', '65535']
for input_str in valid_inputs:
self.assertTrue(netutils.is_valid_port(input_str))
def test_valid_port_fail(self):
invalid_inputs = ['-32768', '0', 0, '65536', 528491, '528491',
'528.491', 'thirty-seven', None]
for input_str in invalid_inputs:
self.assertFalse(netutils.is_valid_port(input_str))
def test_get_my_ip(self):
sock_attrs = {
'return_value.getsockname.return_value': ['1.2.3.4', '']}
with mock.patch('socket.socket', **sock_attrs):
addr = netutils.get_my_ipv4()
self.assertEqual(addr, '1.2.3.4')
@mock.patch('socket.socket')
@mock.patch('oslo_utils.netutils._get_my_ipv4_address')
def test_get_my_ip_socket_error(self, ip, mock_socket):
mock_socket.side_effect = socket.error
ip.return_value = '1.2.3.4'
addr = netutils.get_my_ipv4()
self.assertEqual(addr, '1.2.3.4')
|
|
__author__ = 'Antonio Segura Cano'
import os
import re
import numpy as np
# We'll create a FSMachine class
# FSMachine class will generate all methods about the program
# A random FSMachine: FSMachine.random
# KISS2 headers include the following information:
#
# .i # number of inputs
# .o # number of outputs
# .p # number of products
# .s # number of states used
# .r # RESET state [optional]
class FSMachine:
""" FSMachine class """
def __init__(self, n=10):
"""
:param n: Number of Final State Machines that you want to create at the package (default: 10)
:return: FSMachine package ready to work with it.
"""
self.n = n
def random(self, seed="seed", min=1, max=8, states=10):
"""
:param seed: Introduce a seed to generate random FSMs (default: "seed")
:param min: The minimum number of inputs or outputs in the FMS (included)
:param max: The maximum number of inputs or outputs in the FMS (included)
:param states:
:return: A pack of random FSMs
"""
np.random.seed(int(seed, 36))
npri = np.random.random_integers
for fsm in range(self.n):
numinput = npri(min, max)
numoutput = npri(min, max)
stateslist = ['s'+str(i) for i in range(states)]
for state in stateslist:
for premise in range(2**numinput):
input = fix_size(bin(premise)[2:], numinput)
o = npri(2**numoutput) - 1
output = fix_size(bin(o)[2:], numoutput)
nextstate = npri(stateslist.__len__()) - 1
print input + ' ' + state + ' ' + stateslist[nextstate] + ' ' + output
# Util functions
def kiss2png(filepath):
infile = open(filepath, 'r')
outfile = open("./temp.txt", 'a')
outfile.write("digraph g{\n\t")
metadata = {}
nline = 1
verifystates = {}
resetstate = ""
for line in infile:
pattern = re.compile("^.[ioprs]")
p = pattern.findall(line)
chunksline = line.split()
writemem = ''
if p:
key = chunksline[0].replace(".", "")
val = chunksline[1]
metadata[key] = val
if key == "r":
resetstate = val
else:
lenc = chunksline.__len__()
if lenc != 4:
if lenc == 0:
continue
log(filepath, nline)
break
else:
if not (treatment_size(chunksline[0], metadata["i"]) and treatment_size(chunksline[3], metadata["o"])):
log(filepath, nline)
break
else:
currentstate = chunksline[1]
if not resetstate:
resetstate = currentstate
# if not verifystates.has_key(currentstate):
if currentstate not in verifystates:
verifystates[currentstate] = 1
else:
verifystates[currentstate] += 1
writemem += currentstate + '->' + chunksline[2] + \
' [label="' + chunksline[0] + ' ' + chunksline[3] + '"];\n\t'
outfile.write(writemem)
nline += 1
outfile.write("\r}")
infile.close()
outfile.close()
ok = True
for state in verifystates:
mypow = 2**int(metadata["i"])
if verifystates[state] != mypow:
ok = False
log(filepath, nline)
break
print resetstate
if ok:
os.system("dot temp.txt -o result.png -Tpng && rm temp.txt")
def treatment_size(s, l):
return s.__len__() == int(l)
def fix_size(s, l):
r = s
if s.__len__() != l:
r = fix_size("0"+s, l)
return r
def log(filepath, numline):
print "Format kiss2 wrong at line " + numline.__str__()
os.system('(date "+DATE: %Y-%m-%d%nTIME: %H:%M" && echo "' +
filepath + ' wrong at line '+numline.__str__() + '") >> ../logs/error.log')
def wild_state(s1, s2):
n, i = 0, 0
r = True
for letter in s1:
if letter != s2[i]:
n += 1
if 1 < n:
r = False
break
i += 1
if n == 0:
r = False
print r
def contains(l,n,*args):
r = enumerate(args)
if not r:
r = ""
res = r.next()[1]
if n in l:
res = l[n]
return res
class FSM:
"""FSM:
s0=[{i:xx,o:xx,s:xx}]
"""
def __init__(self, states = False):
"""
:return: FSM object initialized
"""
self.defined = False
self.states = {}
self.reset = ""
if states:
if type(states) is str:
infile = open(states, 'r')
pattern = re.compile("^.[ioprs]")
for line in infile:
p = pattern.findall(line)
chunksline = line.split()
if not chunksline:
continue
if p:
key = chunksline[0].replace(".", "")
val = chunksline[1]
if key == "r":
self.reset = val
else:
astate = chunksline[1]
if astate not in self.states:
self.states[astate] = []
self.states[astate].append((chunksline[2],chunksline[0],chunksline[3]))
else:
self.states = states
if not self.reset:
self.reset = self.states.iterkeys().next()
def build(self, function, **kwargs):
pass
def tokiss2(self):
pass
def toimage(self):
if not self.defined:
print "You must initialize a FSM "
else:
print "OK"
def toimage2(self, filepath):
infile = open(filepath, 'r')
outfile = open("./temp.txt", 'a')
outfile.write("digraph g{\n\t")
metadata = {}
nline = 1
verifystates = {}
resetstate = ""
for line in infile:
pattern = re.compile("^.[ioprs]")
p = pattern.findall(line)
chunksline = line.split()
writemem = ''
if p:
key = chunksline[0].replace(".", "")
val = chunksline[1]
metadata[key] = val
if key == "r":
resetstate = val
else:
lenc = chunksline.__len__()
if lenc != 4:
if lenc == 0:
continue
log(filepath, nline)
break
else:
if not (treatment_size(chunksline[0], metadata["i"]) and treatment_size(chunksline[3], metadata["o"])):
log(filepath, nline)
break
else:
currentstate = chunksline[1]
if not resetstate:
resetstate = currentstate
# if not verifystates.has_key(currentstate):
if currentstate not in verifystates:
verifystates[currentstate] = 1
else:
verifystates[currentstate] += 1
writemem += currentstate + '->' + chunksline[2] + \
' [label="' + chunksline[0] + ' ' + chunksline[3] + '"];\n\t'
outfile.write(writemem)
nline += 1
outfile.write("\r}")
infile.close()
outfile.close()
ok = True
for state in verifystates:
mypow = 2**int(metadata["i"])
if verifystates[state] != mypow:
ok = False
log(filepath, nline)
break
print resetstate
if ok:
os.system("dot temp.txt -o result.png -Tpng && rm temp.txt")
def verify(data):
ok = True
if type(data) == str:
infile = open(data, 'r')
nline = 1
metadata = {}
verifystates = {}
for line in infile:
pattern = re.compile("^.[ioprs]")
p = pattern.findall(line)
chunksline = line.split()
if p:
key = chunksline[0].replace(".", "")
val = chunksline[1]
metadata[key] = val
else:
lenc = chunksline.__len__()
if lenc != 4:
if lenc == 0:
continue
log(data, nline)
break
else:
if not (treatment_size(chunksline[0], metadata["i"]) and treatment_size(chunksline[3], metadata["o"])):
log(data, nline)
break
else:
currentstate = chunksline[1]
if currentstate not in verifystates:
verifystates[currentstate] = 1
else:
verifystates[currentstate] += 1
nline += 1
infile.close()
for state in verifystates:
mypow = 2**int(metadata["i"])
if verifystates[state] != mypow:
ok = False
log(data, nline)
break
if type(data) == dict:
print "Diccionario"
return ok
# x = FSM("../res/testkiss2.kiss2")
# verify(x.states)
def obtainwild(str):
counter = 0
for letter in str:
if letter == "*":
counter += 1
# Pasar a binario desde 0 hasta range(counter)
pass
obtainwild("0*1*")
# i2 o1 s8 p32
# random -> el rango de entradas va de [n,m] uniforme
# valor de campana mu y sigma
# mu, sigma = 0, 0.1 # mean and standard deviation
# >>> s = np.random.normal(mu, sigma, 1000)
#
# completamente especificada
# inespecificada (representacion)
|
|
"""Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <fdrake@acm.org>
"""
__revision__ = "$Id: sysconfig.py 38201 2005-01-06 23:16:03Z jackjansen $"
import os
import re
import string
import sys
from errors import DistutilsPlatformError
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
argv0_path = os.path.dirname(os.path.abspath(sys.executable))
landmark = os.path.join(argv0_path, "Modules", "Setup")
python_build = os.path.isfile(landmark)
del argv0_path, landmark
def get_python_version ():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return sys.version[:3]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
if python_build:
base = os.path.dirname(os.path.abspath(sys.executable))
if plat_specific:
inc_dir = base
else:
inc_dir = os.path.join(base, "Include")
if not os.path.exists(inc_dir):
inc_dir = os.path.join(os.path.dirname(base), "Include")
return inc_dir
return os.path.join(prefix, "include", "python" + sys.version[:3])
elif os.name == "nt":
return os.path.join(prefix, "include")
elif os.name == "mac":
if plat_specific:
return os.path.join(prefix, "Mac", "Include")
else:
return os.path.join(prefix, "Include")
elif os.name == "os2":
return os.path.join(prefix, "Include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
if sys.version < "2.2":
return prefix
else:
return os.path.join(PREFIX, "Lib", "site-packages")
elif os.name == "mac":
if plat_specific:
if standard_lib:
return os.path.join(prefix, "Lib", "lib-dynload")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
elif os.name == "os2":
if standard_lib:
return os.path.join(PREFIX, "Lib")
else:
return os.path.join(PREFIX, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
(cc, cxx, opt, basecflags, ccshared, ldshared, so_ext) = \
get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', 'CCSHARED', 'LDSHARED', 'SO')
if os.environ.has_key('CC'):
cc = os.environ['CC']
if os.environ.has_key('CXX'):
cxx = os.environ['CXX']
if os.environ.has_key('LDSHARED'):
ldshared = os.environ['LDSHARED']
if os.environ.has_key('CPP'):
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if os.environ.has_key('LDFLAGS'):
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if basecflags:
opt = basecflags + ' ' + opt
if os.environ.has_key('CFLAGS'):
opt = opt + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if os.environ.has_key('CPPFLAGS'):
cpp = cpp + ' ' + os.environ['CPPFLAGS']
opt = opt + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
cc_cmd = cc + ' ' + opt
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc)
compiler.shared_lib_extension = so_ext
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
inc_dir = os.curdir
else:
inc_dir = get_python_inc(plat_specific=1)
if sys.version < '2.2':
config_h = 'config.h'
else:
# The name of the config.h file changed in 2.2
config_h = 'pyconfig.h'
return os.path.join(inc_dir, config_h)
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(os.path.dirname(sys.executable), "Makefile")
lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
return os.path.join(lib_dir, "config", "Makefile")
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Z0-9_]+) [*]/\n")
#
while 1:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m:
g[m.group(1)] = 0
return g
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1)
if g is None:
g = {}
done = {}
notdone = {}
while 1:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = string.strip(v)
if "$" in v:
notdone[n] = v
else:
try: v = int(v)
except ValueError: pass
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
if done.has_key(n):
after = value[m.end():]
value = value[:m.start()] + str(done[n]) + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = string.strip(value)
else:
done[name] = value
del notdone[name]
elif notdone.has_key(n):
# get it on a subsequent round
pass
else:
done[n] = ""
after = value[m.end():]
value = value[:m.start()] + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = string.strip(value)
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while 1:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
g = {}
# load the installed Makefile:
try:
filename = get_makefile_filename()
parse_makefile(filename, g)
except IOError, msg:
my_msg = "invalid Python installation: unable to open %s" % filename
if hasattr(msg, "strerror"):
my_msg = my_msg + " (%s)" % msg.strerror
raise DistutilsPlatformError(my_msg)
# On MacOSX we need to check the setting of the environment variable
# MACOSX_DEPLOYMENT_TARGET: configure bases some choices on it so
# it needs to be compatible.
# If it isn't set we set it to the configure-time value
if sys.platform == 'darwin' and g.has_key('CONFIGURE_MACOSX_DEPLOYMENT_TARGET'):
cfg_target = g['CONFIGURE_MACOSX_DEPLOYMENT_TARGET']
cur_target = os.getenv('MACOSX_DEPLOYMENT_TARGET', '')
if cur_target == '':
cur_target = cfg_target
os.putenv('MACOSX_DEPLOYMENT_TARGET', cfg_target)
if cfg_target != cur_target:
my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure'
% (cur_target, cfg_target))
raise DistutilsPlatformError(my_msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if python_build:
g['LDSHARED'] = g['BLDSHARED']
elif sys.version < '2.1':
# The following two branches are for 1.5.2 compatibility.
if sys.platform == 'aix4': # what about AIX 3.x ?
# Linker script is in the config directory, not in Modules as the
# Makefile says.
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp)
elif sys.platform == 'beos':
# Linker script is in the config directory. In the Makefile it is
# relative to the srcdir, which after installation no longer makes
# sense.
python_lib = get_python_lib(standard_lib=1)
linkerscript_path = string.split(g['LDSHARED'])[0]
linkerscript_name = os.path.basename(linkerscript_path)
linkerscript = os.path.join(python_lib, 'config',
linkerscript_name)
# XXX this isn't the right place to do this: adding the Python
# library to the link, if needed, should be in the "build_ext"
# command. (It's also needed for non-MS compilers on Windows, and
# it's taken care of for them by the 'build_ext.get_libraries()'
# method.)
g['LDSHARED'] = ("%s -L%s/lib -lpython%s" %
(linkerscript, PREFIX, sys.version[0:3]))
global _config_vars
_config_vars = g
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
global _config_vars
_config_vars = g
def _init_mac():
"""Initialize the module as appropriate for Macintosh systems"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
import MacOS
if not hasattr(MacOS, 'runtimemodel'):
g['SO'] = '.ppc.slb'
else:
g['SO'] = '.%s.slb' % MacOS.runtimemodel
# XXX are these used anywhere?
g['install_lib'] = os.path.join(EXEC_PREFIX, "Lib")
g['install_platlib'] = os.path.join(EXEC_PREFIX, "Mac", "Lib")
# These are used by the extension module build
g['srcdir'] = ':'
global _config_vars
_config_vars = g
def _init_os2():
"""Initialize the module as appropriate for OS/2"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
return get_config_vars().get(name)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ConfigParser
import argparse
import fnmatch
import logging
import json
import os
import sys
import time
import unittest
from collections import OrderedDict
from autothreadharness.harness_case import HarnessCase
from autothreadharness.open_thread_controller import OpenThreadController
from autothreadharness import settings
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
"""Logger: The global logger"""
logger.setLevel(logging.INFO)
RESUME_SCRIPT_PATH = "%appdata%\\Microsoft\\Windows\\Start Menu\\Programs\\" "Startup\\continue_harness.bat"
class SimpleTestResult(unittest.TestResult):
executions = 0
def __init__(self, path, auto_reboot_args=None, keep_explorer=False, add_all_devices=False):
"""Record test results in json file
Args:
path (str): File path to record the results
auto_reboot (bool): Whether reboot when harness die
"""
super(SimpleTestResult, self).__init__()
self.path = path
self.auto_reboot_args = auto_reboot_args
self.result = json.load(open(self.path, 'r'))
self.log_handler = None
self.started = None
self.keep_explorer = keep_explorer
self.add_all_devices = add_all_devices
SimpleTestResult.executions += 1
logger.info('Initial state is %s', json.dumps(self.result, indent=2))
def startTest(self, test):
logger.info(
'\n========================================\n%s\n========================================',
test.__class__.__name__,
)
test.add_all_devices = self.add_all_devices
# create start up script if auto reboot enabled
if self.auto_reboot_args:
test.auto_reboot = True
os.system('echo %s > "%s"' %
(' '.join(self.auto_reboot_args + ['-c', test.__class__.__name__]), RESUME_SCRIPT_PATH))
# record start timestamp
self.started = time.strftime('%Y-%m-%dT%H:%M:%S')
os.system('mkdir %s' % test.result_dir)
self.log_handler = logging.FileHandler('%s\\auto-%s.log' % (test.result_dir, time.strftime('%Y%m%d%H%M%S')))
self.log_handler.setLevel(logging.DEBUG)
self.log_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(self.log_handler)
def add_result(self, test, passed, error=None):
"""Record test result into json file
Args:
test (TestCase): The test just run
passed (bool): Whether the case is passed
"""
fails = self.result.get(test.__class__.__name__, {}).get('fails', 0)
if passed is False:
fails += 1
self.result[str(test.__class__.__name__)] = {
'started': self.started,
'stopped': time.strftime('%Y-%m-%dT%H:%M:%S'),
'passed': passed,
'fails': fails,
'error': error,
'executions': SimpleTestResult.executions,
}
if self.auto_reboot_args:
os.system('del "%s"' % RESUME_SCRIPT_PATH)
json.dump(OrderedDict(sorted(self.result.items(), key=lambda t: t[0])), open(self.path, 'w'), indent=2)
# save logs
logger.removeHandler(self.log_handler)
self.log_handler.close()
self.log_handler = None
time.sleep(2)
# close explorers
if not self.keep_explorer:
os.system('taskkill /f /im explorer.exe && start explorer.exe')
def addSuccess(self, test):
logger.info('case[%s] pass', test.__class__.__name__)
super(SimpleTestResult, self).addSuccess(test)
self.add_result(test, True)
def addFailure(self, test, err):
logger.warning('case[%s] fail', test.__class__.__name__)
super(SimpleTestResult, self).addFailure(test, err)
self.add_result(test, False)
def addError(self, test, err):
logger.error('case[%s] error', test.__class__.__name__, exc_info=err)
if err and err[0] is SystemExit:
if self.auto_reboot_args:
logger.warning('rebooting..')
os.system('shutdown /r /t 1')
else:
logger.warning('exiting..')
sys.exit(1)
super(SimpleTestResult, self).addError(test, err)
self.add_result(test, None, str(err[1]))
def list_devices(names=None, continue_from=None, **kwargs):
"""List devices in settings file and print versions"""
if not names:
names = [device for device, _type in settings.GOLDEN_DEVICES if _type == 'OpenThread']
if continue_from:
continue_from = names.index(continue_from)
else:
continue_from = 0
for port in names[continue_from:]:
try:
with OpenThreadController(port) as otc:
print('%s: %s' % (port, otc.version))
except BaseException:
logger.exception('failed to get version of %s' % port)
def discover(
names=None,
pattern=['*.py'],
skip='efp',
dry_run=False,
denylist=None,
name_greps=None,
manual_reset=False,
delete_history=False,
max_devices=0,
continue_from=None,
result_file='./result.json',
auto_reboot=False,
keep_explorer=False,
add_all_devices=False,
):
"""Discover all test cases and skip those passed
Args:
pattern (str): Pattern to match case modules, refer python's unittest
documentation for more details
skip (str): types cases to skip
"""
if not os.path.exists(settings.OUTPUT_PATH):
os.mkdir(settings.OUTPUT_PATH)
if delete_history:
os.system('del history.json')
if denylist:
try:
excludes = [line.strip('\n') for line in open(denylist, 'r').readlines() if not line.startswith('#')]
except BaseException:
logger.exception('Failed to open test case denylist file')
raise
else:
excludes = []
log = None
if os.path.isfile(result_file):
try:
log = json.load(open(result_file, 'r'))
except BaseException:
logger.exception('Failed to open result file')
if not log:
log = {}
json.dump(log, open(result_file, 'w'), indent=2)
new_th = False
harness_info = ConfigParser.ConfigParser()
harness_info.read('%s\\info.ini' % settings.HARNESS_HOME)
if harness_info.has_option('Thread_Harness_Info', 'Version') and harness_info.has_option(
'Thread_Harness_Info', 'Mode'):
harness_version = harness_info.get('Thread_Harness_Info', 'Version').rsplit(' ', 1)[1]
harness_mode = harness_info.get('Thread_Harness_Info', 'Mode')
if harness_mode == 'External' and harness_version > '1.4.0':
new_th = True
if harness_mode == 'Internal' and harness_version > '49.4':
new_th = True
suite = unittest.TestSuite()
if new_th:
discovered = unittest.defaultTestLoader.discover('cases', pattern)
else:
discovered = unittest.defaultTestLoader.discover('cases_R140', pattern)
if names and continue_from:
names = names[names.index(continue_from):]
for s1 in discovered:
for s2 in s1:
for case in s2:
if case.__class__ is HarnessCase:
continue
case_name = str(case.__class__.__name__)
# grep name
if name_greps and not any(fnmatch.fnmatch(case_name, name_grep) for name_grep in name_greps):
logger.info('case[%s] skipped by name greps', case_name)
continue
# allowlist
if len(names) and case_name not in names:
logger.info('case[%s] skipped', case_name)
continue
# skip cases
if case_name in log:
if ((log[case_name]['passed'] and ('p' in skip)) or
(log[case_name]['passed'] is False and ('f' in skip)) or (log[case_name]['passed'] is None and
('e' in skip))):
logger.warning('case[%s] skipped for its status[%s]', case_name, log[case_name]['passed'])
continue
# continue from
if continue_from:
if continue_from != case_name:
logger.warning('case[%s] skipped for continue from[%s]', case_name, continue_from)
continue
else:
continue_from = None
# denylist
if case_name in excludes:
logger.warning('case[%s] skipped for denylist', case_name)
continue
# max devices
if max_devices and case.golden_devices_required > max_devices:
logger.warning('case[%s] skipped for exceeding max golden devices allowed[%d]', case_name,
max_devices)
continue
suite.addTest(case)
logger.info('case[%s] added', case_name)
if auto_reboot:
argv = []
argv.append('"%s"' % os.sep.join([os.getcwd(), 'start.bat']))
argv.extend(['-p', pattern])
argv.extend(['-k', skip])
argv.extend(['-o', result_file])
argv.append('-a')
if manual_reset:
argv.append('-m')
if delete_history:
argv.append('-d')
auto_reboot_args = argv + names
else:
auto_reboot_args = None
if os.path.isfile(RESUME_SCRIPT_PATH):
os.system('del "%s"' % RESUME_SCRIPT_PATH)
# manual reset
if manual_reset:
settings.PDU_CONTROLLER_TYPE = 'MANUAL_PDU_CONTROLLER'
settings.PDU_CONTROLLER_OPEN_PARAMS = {}
settings.PDU_CONTROLLER_REBOOT_PARAMS = {}
result = SimpleTestResult(result_file, auto_reboot_args, keep_explorer, add_all_devices)
for case in suite:
logger.info(case.__class__.__name__)
if dry_run:
return
suite.run(result)
return result
def main():
parser = argparse.ArgumentParser(description='Thread harness test case runner')
parser.add_argument('--auto-reboot',
'-a',
action='store_true',
default=False,
help='restart system when harness service die')
parser.add_argument('names',
metavar='NAME',
type=str,
nargs='*',
default=None,
help='test case name, omit to test all')
parser.add_argument('--denylist',
'-b',
metavar='DENYLIST_FILE',
type=str,
help='file to list test cases to skip',
default=None)
parser.add_argument('--continue-from', '-c', type=str, default=None, help='first case to test')
parser.add_argument('--delete-history', '-d', action='store_true', default=False, help='clear history on startup')
parser.add_argument('--keep-explorer',
'-e',
action='store_true',
default=False,
help='do not restart explorer.exe at the end')
parser.add_argument('--name-greps', '-g', action='append', default=None, help='grep case by names')
parser.add_argument('--list-file', '-i', type=str, default=None, help='file to list cases names to test')
parser.add_argument(
'--skip',
'-k',
metavar='SKIP',
type=str,
help='type of results to skip. e for error, f for fail, p for pass.',
default='',
)
parser.add_argument('--list-devices', '-l', action='store_true', default=False, help='list devices')
parser.add_argument('--manual-reset', '-m', action='store_true', default=False, help='reset devices manually')
parser.add_argument('--dry-run', '-n', action='store_true', default=False, help='just show what to run')
parser.add_argument(
'--result-file',
'-o',
type=str,
default=settings.OUTPUT_PATH + '\\result.json',
help='file to store and read current status',
)
parser.add_argument('--pattern',
'-p',
metavar='PATTERN',
type=str,
help='file name pattern, default to "*.py"',
default='*.py')
parser.add_argument('--rerun-fails', '-r', type=int, default=0, help='number of times to rerun failed test cases')
parser.add_argument('--add-all-devices',
'-t',
action='store_true',
default=False,
help='add all devices to the test bed')
parser.add_argument('--max-devices', '-u', type=int, default=0, help='max golden devices allowed')
args = vars(parser.parse_args())
if args['list_file']:
try:
names = [line.strip('\n') for line in open(args['list_file'], 'r').readlines() if not line.startswith('#')]
except BaseException:
logger.exception('Failed to open test case list file')
raise
else:
args['names'] = args['names'] + names
args.pop('list_file')
if args.pop('list_devices', False):
list_devices(**args)
return
rerun_fails = args.pop('rerun_fails')
result = discover(**args)
if rerun_fails > 0:
for i in range(rerun_fails):
failed_names = {name for name in result.result if result.result[name]['passed'] is False}
if not failed_names:
break
logger.info('Rerunning failed test cases')
logger.info('Rerun #{}:'.format(i + 1))
result = discover(
names=failed_names,
pattern=args['pattern'],
skip='',
result_file=args['result_file'],
auto_reboot=args['auto_reboot'],
keep_explorer=args['keep_explorer'],
add_all_devices=args['add_all_devices'],
)
if __name__ == '__main__':
main()
|
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
import numpy as np
import pytest
import cirq
def assert_optimizes(
before: cirq.Circuit,
expected: cirq.Circuit,
optimizer: Optional[Callable[[cirq.Circuit], None]] = None,
deprecated_msg: str = "Use cirq.merge_k_qubit_unitaries",
):
with cirq.testing.assert_deprecated(deprecated_msg, deadline='v1.0'):
if optimizer is None:
optimizer = cirq.MergeSingleQubitGates().optimize_circuit
optimizer(before)
# Ignore differences that would be caught by follow-up optimizations.
followup_transformers = [cirq.drop_negligible_operations, cirq.drop_empty_moments]
for transform in followup_transformers:
before = transform(before) # type: ignore # error: "object" not callable
expected = transform(expected) # type: ignore # error: "object" not callable
assert before == expected, f'BEFORE:\n{before}\nEXPECTED:\n{expected}'
def test_leaves_singleton():
with cirq.testing.assert_deprecated("Use cirq.merge_k_qubit_unitaries", deadline='v1.0'):
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
c = cirq.Circuit([cirq.Moment([cirq.X(q)])])
m.optimization_at(c, 0, c.operation_at(q, 0))
cirq.testing.assert_same_circuits(c, cirq.Circuit([cirq.Moment([cirq.X(q)])]))
def test_not_both():
with cirq.testing.assert_deprecated("Use cirq.merge_k_qubit_unitaries", deadline='v1.0'):
with pytest.raises(ValueError):
_ = cirq.MergeSingleQubitGates(
synthesizer=lambda *args: None, rewriter=lambda *args: None
)
def test_combines_sequence():
with cirq.testing.assert_deprecated("Use cirq.merge_k_qubit_unitaries", deadline='v1.0'):
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
c = cirq.Circuit(cirq.X(q) ** 0.5, cirq.Z(q) ** 0.5, cirq.X(q) ** -0.5)
opt_summary = m.optimization_at(c, 0, c.operation_at(q, 0))
assert opt_summary.clear_span == 3
assert list(opt_summary.clear_qubits) == [q]
assert len(opt_summary.new_operations) == 1
assert isinstance(opt_summary.new_operations[0].gate, cirq.MatrixGate)
cirq.testing.assert_allclose_up_to_global_phase(
cirq.unitary(opt_summary.new_operations[0]), cirq.unitary(cirq.Y ** 0.5), atol=1e-7
)
def test_removes_identity_sequence():
q = cirq.NamedQubit('q')
assert_optimizes(
before=cirq.Circuit(
[
cirq.Moment([cirq.Z(q)]),
cirq.Moment([cirq.H(q)]),
cirq.Moment([cirq.X(q)]),
cirq.Moment([cirq.H(q)]),
]
),
expected=cirq.Circuit(),
)
def test_stopped_at_2qubit():
with cirq.testing.assert_deprecated("Use cirq.merge_k_qubit_unitaries", deadline='v1.0'):
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
q2 = cirq.NamedQubit('q2')
c = cirq.Circuit(
[
cirq.Moment([cirq.Z(q)]),
cirq.Moment([cirq.H(q)]),
cirq.Moment([cirq.X(q)]),
cirq.Moment([cirq.H(q)]),
cirq.Moment([cirq.CZ(q, q2)]),
cirq.Moment([cirq.H(q)]),
]
)
opt_summary = m.optimization_at(c, 0, c.operation_at(q, 0))
assert opt_summary.clear_span == 4
assert list(opt_summary.clear_qubits) == [q]
if len(opt_summary.new_operations) != 0:
assert len(opt_summary.new_operations) == 1
assert isinstance(opt_summary.new_operations[0].gate, cirq.MatrixGate)
cirq.testing.assert_allclose_up_to_global_phase(
cirq.unitary(opt_summary.new_operations[0]), np.eye(2), atol=1e-7
)
def test_ignores_2qubit_target():
with cirq.testing.assert_deprecated("Use cirq.merge_k_qubit_unitaries", deadline='v1.0'):
m = cirq.MergeSingleQubitGates()
q = cirq.NamedQubit('q')
q2 = cirq.NamedQubit('q2')
c = cirq.Circuit(
[
cirq.Moment([cirq.CZ(q, q2)]),
]
)
m.optimization_at(c, 0, c.operation_at(q, 0))
cirq.testing.assert_same_circuits(c, cirq.Circuit([cirq.Moment([cirq.CZ(q, q2)])]))
def test_ignore_unsupported_gate():
class UnsupportedDummy(cirq.SingleQubitGate):
pass
q0 = cirq.LineQubit(0)
circuit = cirq.Circuit(
UnsupportedDummy()(q0),
)
c_orig = cirq.Circuit(circuit)
with cirq.testing.assert_deprecated("Use cirq.merge_k_qubit_unitaries", deadline='v1.0'):
cirq.MergeSingleQubitGates().optimize_circuit(circuit)
assert circuit == c_orig
def test_rewrite():
q0 = cirq.LineQubit(0)
q1 = cirq.LineQubit(1)
circuit = cirq.Circuit(
cirq.X(q0),
cirq.X(q1),
cirq.Y(q0),
cirq.CZ(q0, q1),
cirq.Y(q1),
)
with cirq.testing.assert_deprecated("Use cirq.merge_k_qubit_unitaries", deadline='v1.0'):
cirq.MergeSingleQubitGates(rewriter=lambda ops: cirq.H(ops[0].qubits[0])).optimize_circuit(
circuit
)
circuit = cirq.drop_empty_moments(circuit)
cirq.testing.assert_same_circuits(
circuit,
cirq.Circuit(
cirq.H(q0),
cirq.H(q1),
cirq.CZ(q0, q1),
cirq.H(q1),
),
)
def test_merge_single_qubit_gates_into_phased_x_z():
a, b = cirq.LineQubit.range(2)
assert_optimizes(
before=cirq.Circuit(
cirq.X(a),
cirq.Y(b) ** 0.5,
cirq.CZ(a, b),
cirq.H(a),
cirq.Z(a),
),
expected=cirq.Circuit(
cirq.PhasedXPowGate(phase_exponent=1)(a),
cirq.Y(b) ** 0.5,
cirq.CZ(a, b),
(cirq.PhasedXPowGate(phase_exponent=-0.5)(a)) ** 0.5,
),
optimizer=cirq.merge_single_qubit_gates_into_phased_x_z,
deprecated_msg="Use cirq.merge_single_qubit_gates_to_phased_x_and_z",
)
def test_merge_single_qubit_gates_into_phxz():
def phxz(a, x, z):
return cirq.PhasedXZGate(
axis_phase_exponent=a,
x_exponent=x,
z_exponent=z,
)
a, b = cirq.LineQubit.range(2)
assert_optimizes(
before=cirq.Circuit(
cirq.X(a),
cirq.Y(b) ** 0.5,
cirq.CZ(a, b),
cirq.H(a),
cirq.Z(a),
),
expected=cirq.Circuit(
phxz(-1, 1, 0).on(a),
phxz(0.5, 0.5, 0).on(b),
cirq.CZ(a, b),
phxz(-0.5, 0.5, 0).on(a),
),
optimizer=cirq.merge_single_qubit_gates_into_phxz,
deprecated_msg="Use cirq.merge_single_qubit_gates_to_phxz",
)
|
|
"""sympify -- convert objects SymPy internal format"""
# from basic import Basic, BasicType, S
# from numbers import Integer, Real
import decimal
class SympifyError(ValueError):
def __init__(self, expr, base_exc=None):
self.expr = expr
self.base_exc = base_exc
def __str__(self):
if self.base_exc is None:
return "SympifyError: %s" % (self.expr,)
return "Sympify of expression '%s' failed, because of exception being raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__, str(self.base_exc))
def sympify(a, locals=None, convert_xor=True):
"""Converts an arbitrary expression to a type that can be used
inside sympy. For example, it will convert python int's into
instance of sympy.Rational, floats into instances of sympy.Real,
etc. It is also able to coerce symbolic expressions which does
inherit after Basic. This can be useful in cooperation with SAGE.
It currently accepts as arguments:
- any object defined in sympy (except maybe matrices [TODO])
- standard numeric python types: int, long, float, Decimal
- strings (like "0.09" or "2e-19")
- booleans (will leave them unchanged)
If the argument is already a type that sympy understands, it will do
nothing but return that value. This can be used at the begining of a
function to ensure you are working with the correct type.
>>> from sympy import *
>>> sympify(2).is_integer
True
>>> sympify(2).is_real
True
>>> sympify(2.0).is_real
True
>>> sympify("2.0").is_real
True
>>> sympify("2e-45").is_real
True
"""
# XXX instead of duplicating _sympify it would be better to call _sympify
# directly from here, but a lot of SymPy still calls sympify (no '_') and
# this will add unneccesary overhead.
#
# When everything settles, let's refactor this.
# -- kirr
if locals is None:
locals = {}
if isinstance(a, Basic):
return a
if isinstance(a, BasicType):
return a
elif isinstance(a, bool):
return a
elif isinstance(a, (int, long)):
return Integer(a)
elif isinstance(a, (float, decimal.Decimal)):
return Real(a)
elif isinstance(a, complex):
real, imag = map(sympify, (a.real, a.imag))
ireal, iimag = int(real), int(imag)
if ireal + iimag*1j == a:
return ireal + iimag*S.ImaginaryUnit
return real + S.ImaginaryUnit * imag
elif isinstance(a, (list,tuple,set)):
return type(a)([sympify(x) for x in a])
# let's see if 'a' implements conversion methods such as '_sympy_' or
# '__int__', that returns a SymPy (by definition) or SymPy compatible
# expression, so we just use it
for methname, conv in [
('_sympy_',None),
('__float__', Real),
('__int__', Integer),
]:
meth = getattr(a, methname, None)
if meth is None:
continue
# we have to be careful -- calling Class.__int__() almost always is not
# a good idea
try:
v = meth()
except TypeError:
continue
if conv is not None:
v = conv(v)
return v
else:
# XXX this is here because of cyclic-import issues
from sympy.matrices import Matrix
if isinstance(a, Matrix):
raise NotImplementedError('matrix support')
if not isinstance(a, str):
# At this point we were given an arbitrary expression
# which does not inherit from Basic and doesn't implement
# _sympy_ (which is a canonical and robust way to convert
# anything to SymPy expression).
#
# As a last chance, we try to take "a"'s normal form via str()
# and try to parse it. If it fails, then we have no luck and
# return an exception
a = str(a)
if convert_xor:
a = a.replace('^','**')
import ast_parser
return ast_parser.parse_expr(a, locals)
raise SympifyError("%r is NOT a valid SymPy expression" % a)
def _sympify(a):
"""short version of sympify for internal usage
When adding and comparing symbolic expressions, it is unwise to allow
e.g. strings to mixin. On the other hand Python integers and floats are
allowed.
So we don't use full-featured sympify in __add__ and __eq__ methods, but
instead use this small-crafted function there instead.
>>> Integer(1) == 1
True
>>> Integer(1) == '1'
False
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> x + 1
1 + x
>>> x + '1'
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for +: 'Symbol' and 'str'
see: sympify
"""
if isinstance(a, Basic):
return a
if isinstance(a, BasicType):
return a
elif isinstance(a, (int, long)):
return Integer(a)
elif isinstance(a, (float, decimal.Decimal)):
return Real(a)
elif isinstance(a, complex):
real, imag = map(sympify, (a.real, a.imag))
ireal, iimag = int(real), int(imag)
if ireal + iimag*1j == a:
return ireal + iimag*S.ImaginaryUnit
return real + S.ImaginaryUnit * imag
# let's see if 'a' implements conversion methods such as '_sympy_' or
# '__int__', that returns a SymPy (by definition) or SymPy compatible
# expression, so we just use it
for methname, conv in [
('_sympy_',None),
('__float__', Real),
('__int__', Integer),
]:
meth = getattr(a, methname, None)
if meth is None:
continue
# we have to be careful -- calling Class.__int__() almost always is not
# a good idea
try:
v = meth()
except TypeError:
continue
if conv is not None:
v = conv(v)
return v
raise SympifyError("%r is NOT a valid SymPy expression" % (a,))
from numbers import Integer, Real
from basic import Basic, BasicType, S
|
|
# Copyright (c) 2021, Manfred Moitzi
# License: MIT License
import pytest
import ezdxf
from ezdxf.entities import MPolygon
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
# MPolygon is similar to Hatch
@pytest.fixture
def entity():
return MPolygon.from_text(MPOLYGON_NO_FILL)
def test_if_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert "MPOLYGON" in ENTITY_CLASSES
def test_default_init():
entity = MPolygon()
assert entity.dxftype() == "MPOLYGON"
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = MPolygon.new(
handle="ABBA",
owner="0",
dxfattribs={
"color": 7, # color of boundary paths!
},
)
assert entity.dxf.layer == "0"
assert entity.dxf.color == 7
assert entity.dxf.version == 1
assert entity.dxf.solid_fill == 0
assert entity.dxf.fill_color == ezdxf.const.BYLAYER
def test_fill_properties_without_solid_filling():
entity = MPolygon()
entity.dxf.solid_fill = 0
assert entity.has_solid_fill is False
assert entity.has_pattern_fill is True
def test_fill_properties_with_solid_filling():
entity = MPolygon()
entity.dxf.solid_fill = 1
assert entity.has_solid_fill is True
assert entity.has_pattern_fill is False
def test_load_from_text(entity):
assert entity.dxf.layer == "mpolygons"
assert entity.dxf.color == 1
assert entity.dxf.version == 1
assert entity.dxf.solid_fill == 0
assert len(entity.paths) == 2
assert entity.dxf.pattern_name == ""
def test_write_dxf_no_fill(entity):
entity = MPolygon.from_text(MPOLYGON_NO_FILL)
result = TagCollector.dxftags(entity, dxfversion=ezdxf.const.DXF2000)
expected = basic_tags_from_text(MPOLYGON_NO_FILL)
assert result == expected
assert result.get_first_value(71) == 0 # pattern fill
tags = list(result.pop_tags([52, 41, 77, 78]))
assert tags == [
(52, 0), # pattern_angle tag must be presents
(41, 1), # pattern_scale tag must be presents
(77, 0), # pattern_double tag must be presents
(78, 0), # patten length tag must be presents
], "required pattern tags are not in expected order"
assert (
result.has_tag(450) is False
), "gradient data is not supported for DXF R2000"
def test_write_dxf_r2004_no_fill_requires_basic_gradient_data(entity):
result = TagCollector.dxftags(entity, dxfversion=ezdxf.const.DXF2004)
tags = list(result.pop_tags([450, 451, 460, 461, 452, 462, 453, 470]))
assert tags == [
(450, 0), # kind = solid fill
(451, 0), # reserved for the future
(460, 0), # angle in radians
(461, 0), # centered
(452, 0), # one color
(462, 0), # tint
(453, 0), # number of colors
(470, ""), # gradient name
], "required gradient tags are not in expected order"
def test_write_dxf_with_fill(entity):
entity.dxf.solid_fill = 1
entity.dxf.fill_color = 163
result = TagCollector.dxftags(entity, dxfversion=ezdxf.const.DXF2000)
assert result.get_first_value(71) == 1 # solid_fill
assert (
result.has_tag(52) is False
), "pattern_angle tag should not be presents"
assert (
result.has_tag(41) is False
), "pattern_scale tag should not be presents"
assert (
result.has_tag(77) is False
), "pattern_double tag should not be presents"
assert (
result.has_tag(78) is False
), "pattern length tag should not be presents"
assert (
result.has_tag(63) is False
), "fill color tag is not supported for DXF R2000"
def test_write_dxf_R2004_with_fill(entity):
entity.dxf.solid_fill = 1
entity.dxf.fill_color = 163
result = TagCollector.dxftags(entity, dxfversion=ezdxf.const.DXF2004)
assert result.get_first_value(63) == 163, "missing fill color tag"
tags = list(result.pop_tags([450, 451, 460, 461, 452, 462, 453, 470]))
assert tags == [
(450, 0), # kind = solid fill
(451, 0), # reserved for the future
(460, 0), # angle in radians
(461, 0), # centered
(452, 0), # one color
(462, 0), # tint
(453, 0), # number of colors
(470, ""), # gradient name
], "required gradient tags are not in expected order"
def test_write_correct_polyline_path_tag_order(entity):
result = TagCollector.dxftags(entity)
tags = list(result.pop_tags([92, 72, 73]))
# 92 = path type 2: polyline path
# 73 = is_closed
# 72 = has_bulge
# The group codes 73 and 72 are swapped in comparison to HATCH
# contains 2 polyline paths
assert tags == [(92, 2), (73, 0), (72, 0), (92, 2), (73, 0), (72, 0)]
def test_write_dxf_with_pattern_fill(entity):
entity.set_pattern_fill("ANSI33", color=7, scale=0.01)
result = TagCollector.dxftags(entity, dxfversion=ezdxf.const.DXF2000)
assert result.has_tag(75) is False, "hatch style tag not supported?!"
def test_do_not_export_mpolygon_with_edge_paths(entity):
# Edge paths are not supported by MPOLYGON as far as I known!
entity.paths.add_edge_path()
assert entity.preprocess_export(TagCollector()) is False
MPOLYGON_NO_FILL = """0
MPOLYGON
5
ABBA
330
DEAD
100
AcDbEntity
8
mpolygons
62
1
100
AcDbMPolygon
70
1
10
0.0
20
0.0
30
0.0
210
0.0
220
0.0
230
1.0
2
71
0
91
2
92
2
73
0
72
0
93
5
10
-85.2
20
35.04
10
-85.2
20
35.04
10
-85.2
20
35.04
10
-85.2
20
35.04
10
-85.2
20
35.04
92
2
73
0
72
0
93
4
10
-85.2
20
35.04
10
-85.2
20
35.04
10
-85.2
20
35.04
10
-85.2
20
35.04
76
0
52
0.0
41
1.0
77
0
78
0
11
0.0
21
0.0
99
0
"""
if __name__ == "__main__":
pytest.main([__file__])
|
|
# -*- coding: utf-8 -*-
"""
Public Python API to create CMS contents.
WARNING: None of the functions defined in this module checks for permissions.
You must implement the necessary permission checks in your own code before
calling these methods!
"""
import datetime
from django.contrib.sites.models import Site
from django.core.exceptions import FieldError
from django.core.exceptions import PermissionDenied
from django.core.exceptions import ValidationError
from django.template.defaultfilters import slugify
from django.template.loader import get_template
from django.utils import six
from cms.admin.forms import save_permissions
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.models.pagemodel import Page
from cms.models.permissionmodels import (PageUser, PagePermission,
GlobalPagePermission, ACCESS_PAGE_AND_DESCENDANTS)
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.models.titlemodels import Title
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.utils import copy_plugins
from cms.utils.compat.dj import get_user_model
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import get_language_list
from cms.utils.permissions import _thread_locals
from menus.menu_pool import menu_pool
#===============================================================================
# Constants
#===============================================================================
VISIBILITY_ALL = None
VISIBILITY_USERS = 1
VISIBILITY_STAFF = 2
#===============================================================================
# Helpers/Internals
#===============================================================================
def _generate_valid_slug(source, parent, language):
"""
Generate a valid slug for a page from source for the given language.
Parent is passed so we can make sure the slug is unique for this level in
the page tree.
"""
if parent:
qs = Title.objects.filter(language=language, page__parent=parent)
else:
qs = Title.objects.filter(language=language, page__parent__isnull=True)
used = qs.values_list('slug', flat=True)
baseslug = slugify(source)
slug = baseslug
i = 1
while slug in used:
slug = '%s-%s' % (baseslug, i)
i += 1
return slug
def _verify_apphook(apphook, namespace):
"""
Verifies the apphook given is valid and returns the normalized form (name)
"""
apphook_pool.discover_apps()
if hasattr(apphook, '__module__') and issubclass(apphook, CMSApp):
try:
assert apphook in apphook_pool.apps.values()
except AssertionError:
print(apphook_pool.apps.values())
raise
return apphook.__name__
elif isinstance(apphook, six.string_types):
try:
assert apphook in apphook_pool.apps
except AssertionError:
print(apphook_pool.apps.values())
raise
apphook_name = apphook
else:
raise TypeError("apphook must be string or CMSApp instance")
if apphook_pool.apps[apphook_name].app_name and not namespace:
raise ValidationError('apphook with app_name must define a namespace')
return apphook_name
def _verify_plugin_type(plugin_type):
"""
Verifies the given plugin_type is valid and returns a tuple of
(plugin_model, plugin_type)
"""
if (hasattr(plugin_type, '__module__') and
issubclass(plugin_type, CMSPluginBase)):
plugin_pool.set_plugin_meta()
plugin_model = plugin_type.model
assert plugin_type in plugin_pool.plugins.values()
plugin_type = plugin_type.__name__
elif isinstance(plugin_type, six.string_types):
try:
plugin_model = plugin_pool.get_plugin(plugin_type).model
except KeyError:
raise TypeError(
'plugin_type must be CMSPluginBase subclass or string'
)
else:
raise TypeError('plugin_type must be CMSPluginBase subclass or string')
return plugin_model, plugin_type
#===============================================================================
# Public API
#===============================================================================
def create_page(title, template, language, menu_title=None, slug=None,
apphook=None, apphook_namespace=None, redirect=None, meta_description=None,
created_by='python-api', parent=None,
publication_date=None, publication_end_date=None,
in_navigation=False, soft_root=False, reverse_id=None,
navigation_extenders=None, published=False, site=None,
login_required=False, limit_visibility_in_menu=VISIBILITY_ALL,
position="last-child", overwrite_url=None, xframe_options=Page.X_FRAME_OPTIONS_INHERIT):
"""
Create a CMS Page and it's title for the given language
See docs/extending_cms/api_reference.rst for more info
"""
# ugly permissions hack
if created_by and isinstance(created_by, get_user_model()):
_thread_locals.user = created_by
created_by = getattr(created_by, get_user_model().USERNAME_FIELD)
else:
_thread_locals.user = None
# validate template
if not template == TEMPLATE_INHERITANCE_MAGIC:
assert template in [tpl[0] for tpl in get_cms_setting('TEMPLATES')]
get_template(template)
# validate site
if not site:
site = Site.objects.get_current()
else:
assert isinstance(site, Site)
# validate language:
assert language in get_language_list(site), get_cms_setting('LANGUAGES').get(site.pk)
# set default slug:
if not slug:
slug = _generate_valid_slug(title, parent, language)
# validate parent
if parent:
assert isinstance(parent, Page)
parent = Page.objects.get(pk=parent.pk)
# validate publication date
if publication_date:
assert isinstance(publication_date, datetime.date)
# validate publication end date
if publication_end_date:
assert isinstance(publication_end_date, datetime.date)
if navigation_extenders:
raw_menus = menu_pool.get_menus_by_attribute("cms_enabled", True)
menus = [menu[0] for menu in raw_menus]
assert navigation_extenders in menus
# validate menu visibility
accepted_limitations = (VISIBILITY_ALL, VISIBILITY_USERS, VISIBILITY_STAFF)
assert limit_visibility_in_menu in accepted_limitations
# validate position
assert position in ('last-child', 'first-child', 'left', 'right')
if parent:
if position in ('last-child', 'first-child'):
parent_id = parent.pk
else:
parent_id = parent.parent_id
else:
parent_id = None
# validate and normalize apphook
if apphook:
application_urls = _verify_apphook(apphook, apphook_namespace)
else:
application_urls = None
if reverse_id:
if Page.objects.drafts().filter(reverse_id=reverse_id, site=site).count():
raise FieldError('A page with the reverse_id="%s" already exist.' % reverse_id)
page = Page(
created_by=created_by,
changed_by=created_by,
parent_id=parent_id,
publication_date=publication_date,
publication_end_date=publication_end_date,
in_navigation=in_navigation,
soft_root=soft_root,
reverse_id=reverse_id,
navigation_extenders=navigation_extenders,
template=template,
application_urls=application_urls,
application_namespace=apphook_namespace,
site=site,
login_required=login_required,
limit_visibility_in_menu=limit_visibility_in_menu,
xframe_options=xframe_options,
)
page.add_root(instance=page)
page = page.reload()
if parent:
page.move(target=parent, pos=position)
page = page.reload()
create_title(
language=language,
title=title,
menu_title=menu_title,
slug=slug,
redirect=redirect,
meta_description=meta_description,
page=page,
overwrite_url=overwrite_url,
)
if published:
page.publish(language)
del _thread_locals.user
return page.reload()
def create_title(language, title, page, menu_title=None, slug=None,
redirect=None, meta_description=None,
parent=None, overwrite_url=None):
"""
Create a title.
Parent is only used if slug=None.
See docs/extending_cms/api_reference.rst for more info
"""
# validate page
assert isinstance(page, Page)
# validate language:
assert language in get_language_list(page.site_id)
# set default slug:
if not slug:
slug = _generate_valid_slug(title, parent, language)
title = Title.objects.create(
language=language,
title=title,
menu_title=menu_title,
slug=slug,
redirect=redirect,
meta_description=meta_description,
page=page
)
if overwrite_url:
title.has_url_overwrite = True
title.path = overwrite_url
title.save()
return title
def add_plugin(placeholder, plugin_type, language, position='last-child',
target=None, **data):
"""
Add a plugin to a placeholder
See docs/extending_cms/api_reference.rst for more info
"""
# validate placeholder
assert isinstance(placeholder, Placeholder)
# validate and normalize plugin type
plugin_model, plugin_type = _verify_plugin_type(plugin_type)
if target:
if position == 'last-child':
if CMSPlugin.node_order_by:
position = 'sorted-child'
new_pos = CMSPlugin.objects.filter(parent=target).count()
parent_id = target.pk
elif position == 'first-child':
new_pos = 0
if CMSPlugin.node_order_by:
position = 'sorted-child'
parent_id = target.pk
elif position == 'left':
new_pos = target.position
if CMSPlugin.node_order_by:
position = 'sorted-sibling'
parent_id = target.parent_id
elif position == 'right':
new_pos = target.position + 1
if CMSPlugin.node_order_by:
position = 'sorted-sibling'
parent_id = target.parent_id
else:
raise Exception('position not supported: %s' % position)
if position == 'last-child' or position == 'first-child':
qs = CMSPlugin.objects.filter(language=language, parent=target, position__gte=new_pos,
placeholder=placeholder)
else:
qs = CMSPlugin.objects.filter(language=language, parent=target.parent_id, position__gte=new_pos,
placeholder=placeholder)
for pl in qs:
pl.position += 1
pl.save()
else:
if position == 'last-child':
new_pos = CMSPlugin.objects.filter(language=language, parent__isnull=True, placeholder=placeholder).count()
else:
new_pos = 0
for pl in CMSPlugin.objects.filter(language=language, parent__isnull=True, position__gte=new_pos,
placeholder=placeholder):
pl.position += 1
pl.save()
parent_id = None
plugin_base = CMSPlugin(
plugin_type=plugin_type,
placeholder=placeholder,
position=new_pos,
language=language,
parent_id=parent_id,
)
plugin_base.add_root(instance=plugin_base)
if target:
plugin_base.move(target, pos=position)
plugin_base = CMSPlugin.objects.get(pk=plugin_base.pk)
plugin = plugin_model(**data)
plugin_base.set_base_attr(plugin)
plugin.save()
return plugin
def create_page_user(created_by, user,
can_add_page=True, can_view_page=True,
can_change_page=True, can_delete_page=True,
can_recover_page=True, can_add_pageuser=True,
can_change_pageuser=True, can_delete_pageuser=True,
can_add_pagepermission=True,
can_change_pagepermission=True,
can_delete_pagepermission=True, grant_all=False):
"""
Creates a page user.
See docs/extending_cms/api_reference.rst for more info
"""
if grant_all:
# just be lazy
return create_page_user(created_by, user, True, True, True, True,
True, True, True, True, True, True, True)
# validate created_by
assert isinstance(created_by, get_user_model())
data = {
'can_add_page': can_add_page,
'can_view_page': can_view_page,
'can_change_page': can_change_page,
'can_delete_page': can_delete_page,
'can_recover_page': can_recover_page,
'can_add_pageuser': can_add_pageuser,
'can_change_pageuser': can_change_pageuser,
'can_delete_pageuser': can_delete_pageuser,
'can_add_pagepermission': can_add_pagepermission,
'can_change_pagepermission': can_change_pagepermission,
'can_delete_pagepermission': can_delete_pagepermission,
}
user.is_staff = True
user.is_active = True
page_user = PageUser(created_by=created_by)
for field in [f.name for f in get_user_model()._meta.local_fields]:
setattr(page_user, field, getattr(user, field))
user.save()
page_user.save()
save_permissions(data, page_user)
return user
def assign_user_to_page(page, user, grant_on=ACCESS_PAGE_AND_DESCENDANTS,
can_add=False, can_change=False, can_delete=False,
can_change_advanced_settings=False, can_publish=False,
can_change_permissions=False, can_move_page=False,
can_recover_page=True, can_view=False,
grant_all=False, global_permission=False):
"""
Assigns given user to page, and gives him requested permissions.
See docs/extending_cms/api_reference.rst for more info
"""
grant_all = grant_all and not global_permission
data = {
'can_add': can_add or grant_all,
'can_change': can_change or grant_all,
'can_delete': can_delete or grant_all,
'can_change_advanced_settings': can_change_advanced_settings or grant_all,
'can_publish': can_publish or grant_all,
'can_change_permissions': can_change_permissions or grant_all,
'can_move_page': can_move_page or grant_all,
'can_view': can_view or grant_all,
}
page_permission = PagePermission(page=page, user=user,
grant_on=grant_on, **data)
page_permission.save()
if global_permission:
page_permission = GlobalPagePermission(
user=user, can_recover_page=can_recover_page, **data)
page_permission.save()
page_permission.sites.add(Site.objects.get_current())
return page_permission
def publish_page(page, user, language):
"""
Publish a page. This sets `page.published` to `True` and calls publish()
which does the actual publishing.
See docs/extending_cms/api_reference.rst for more info
"""
page = page.reload()
class FakeRequest(object):
def __init__(self, user):
self.user = user
request = FakeRequest(user)
if not page.has_publish_permission(request):
raise PermissionDenied()
page.publish(language)
return page.reload()
def get_page_draft(page):
"""
Returns the draft version of a page, regardless if the passed in
page is a published version or a draft version.
:param page: The page to get the draft version
:type page: :class:`cms.models.pagemodel.Page` instance
:return page: draft version of the page
:type page: :class:`cms.models.pagemodel.Page` instance
"""
if page:
if page.publisher_is_draft:
return page
else:
return page.publisher_draft
else:
return None
def copy_plugins_to_language(page, source_language, target_language,
only_empty=True):
"""
Copy the plugins to another language in the same page for all the page
placeholders.
By default plugins are copied only if placeholder has no plugin for the
target language; use ``only_empty=False`` to change this.
.. warning: This function skips permissions checks
:param page: the page to copy
:type page: :class:`cms.models.pagemodel.Page` instance
:param string source_language: The source language code,
must be in :setting:`django:LANGUAGES`
:param string target_language: The source language code,
must be in :setting:`django:LANGUAGES`
:param bool only_empty: if False, plugin are copied even if
plugins exists in the target language (on a placeholder basis).
:return int: number of copied plugins
"""
copied = 0
placeholders = page.get_placeholders()
for placeholder in placeholders:
# only_empty is True we check if the placeholder already has plugins and
# we skip it if has some
if not only_empty or not placeholder.cmsplugin_set.filter(language=target_language).exists():
plugins = list(
placeholder.cmsplugin_set.filter(language=source_language).order_by('path'))
copied_plugins = copy_plugins.copy_plugins_to(plugins, placeholder, target_language)
copied += len(copied_plugins)
return copied
|
|
# -*- coding: utf8 -*-
import json
import logging
from enum import Enum
from typing import List, Dict, Any, NamedTuple, Optional
import requests
__original_author__ = "enginebai"
logger = logging.getLogger(__name__)
# send message fields
RECIPIENT_FIELD = "recipient"
MESSAGE_FIELD = "message"
ATTACHMENT_FIELD = "attachment"
TYPE_FIELD = "type"
TEMPLATE_TYPE_FIELD = "template_type"
TEXT_FIELD = "text"
TITLE_FIELD = "title"
SUBTITLE_FIELD = "subtitle"
IMAGE_FIELD = "image_url"
BUTTONS_FIELD = "buttons"
PAYLOAD_FIELD = "payload"
URL_FIELD = "url"
ELEMENTS_FIELD = "elements"
QUICK_REPLIES_FIELD = "quick_replies"
CONTENT_TYPE_FIELD = "content_type"
# received message fields
POSTBACK_FIELD = "postback"
class Recipient(Enum):
PHONE_NUMBER = "phone_number"
ID = "id"
class MessageType(Enum):
TEXT = "text"
ATTACHMENT = "attachment"
class AttachmentType(Enum):
IMAGE = "image"
TEMPLATE = "template"
class TemplateType(Enum):
GENERIC = "generic"
BUTTON = "button"
RECEIPT = "receipt"
class ButtonType(Enum):
WEB_URL = "web_url"
POSTBACK = "postback"
class ContentType(Enum):
TEXT = "text"
LOCATION = "location"
class ActionButton:
def __init__(self, button_type, title, url=None, payload=None):
self.button_type = button_type
self.title = title
self.url = url
self.payload = payload
def to_dict(self):
button_dict = dict()
button_dict[TYPE_FIELD] = self.button_type.value
if self.title:
button_dict[TITLE_FIELD] = self.title
if self.url is not None:
button_dict[URL_FIELD] = self.url
if self.payload is not None:
button_dict[PAYLOAD_FIELD] = self.payload
return button_dict
class GenericElement:
def __init__(self, title, subtitle, image_url, buttons):
self.title = title
self.subtitle = subtitle
self.image_url = image_url
self.buttons = buttons
def to_dict(self):
element_dict = dict()
if self.title:
element_dict[TITLE_FIELD] = self.title
if self.subtitle:
element_dict[SUBTITLE_FIELD] = self.subtitle
if self.image_url:
element_dict[IMAGE_FIELD] = self.image_url
buttons = list(dict())
for i in range(len(self.buttons)):
buttons.append(self.buttons[i].to_dict())
element_dict[BUTTONS_FIELD] = buttons
return element_dict
class QuickReply:
def __init__(self, title, payload, image_url=None, content_type=ContentType.TEXT):
self.title = title
self.payload = payload
self.image_url = image_url
self.content_type = content_type
def to_dict(self):
reply_dict = dict()
reply_dict[CONTENT_TYPE_FIELD] = self.content_type.value
if self.title:
reply_dict[TITLE_FIELD] = self.title
reply_dict[PAYLOAD_FIELD] = self.payload
if self.image_url is not None:
reply_dict[IMAGE_FIELD] = self.image_url
logger.debug('Reply dict: {}'.format(reply_dict))
return reply_dict
class FacebookMessageType(Enum):
received = "message"
delivered = "delivery"
read = "read"
echo = "message"
postback = "postback"
def recognize_message_type(message: Dict[str, Any]) -> FacebookMessageType:
guess = None
for msg_type in FacebookMessageType:
if message.get(msg_type.value):
guess = msg_type
if guess in (FacebookMessageType.received, FacebookMessageType.echo):
guess = FacebookMessageType.echo if message['message'].get('is_echo', False) else FacebookMessageType.received
return guess
class FacebookEntity:
USER_FIELDS = ['first_name', 'last_name', 'profile_pic', 'locale',
'timezone', 'gender', 'is_payment_enabled', 'last_ad_referral']
def __init__(self, user: Dict[str, Any]):
self.id = user.get('id', None)
# User data.
self.first_name = None
self.last_name = None
self.profile_pic = None
self.locale = None
self.timezone = None
self.gender = None
self.is_payment_enabled = None
self.last_ad_referral = None
def hydrate_user_from_api(self, data: Dict[str, Any]):
for key, value in data.items():
setattr(self, key, value) # Question to myself: json.loads should do the job and perform conversion, right?
def __bool__(self):
return self.id is not None
Coordinates = NamedTuple('Coordinates', [
('lat', float),
('long', float),
])
# FIXME: Add support for generic, list, open graph, receipt, airline stuff.
class FacebookTemplate:
def __init__(self, template: Dict[str, Any]):
self.type = template.get('template_type')
self.buttons = [
ActionButton(button.get('type'), title=button.get('title'), url=button.get('url'),
payload=button.get('payload'))
for button in template.get('buttons', [])
]
class FacebookFallback:
def __init__(self, fallback: Dict[str, Any]):
self.title = fallback.get('title')
self.url = fallback.get('url')
self.payload = fallback.get('payload')
class FacebookAttachment:
def __init__(self, attachment: Dict[str, Any]):
self.type = attachment['type']
if self.type in ('image', 'audio', 'video', 'file'):
self.payload = attachment['payload']['url']
elif self.type == 'location':
self.payload = Coordinates(lat=float(attachment['payload']['lat']),
long=float(attachment['payload']['long']))
elif self.type == 'template':
self.payload = FacebookTemplate(attachment['payload'])
elif self.type == 'fallback':
self.payload = FacebookFallback(attachment)
class FacebookReferralSource(Enum):
m_me = 'SHORTLINK'
ad_referral = 'ADS'
parametric_messenger_code = 'MESSENGER_CODE'
discover_tab = 'DISCOVER_TAB'
def recognize_referral_source(referral: Dict[str, Any]) -> FacebookReferralSource:
return FacebookReferralSource(referral.get('source'))
class FacebookPostbackReferral:
def __init__(self, referral: Optional[Dict[str, Any]]):
if referral:
self.referral_source = recognize_referral_source(referral)
else:
self.referral_source = None
def __bool__(self):
return self.referral_source is not None
class FacebookMessage:
# Filled when the first instance is created.
DISPATCHERS = {}
initialized = False
def __init__(self, message: Dict[str, Any]):
self.type = recognize_message_type(message)
if not self.__class__.initialized:
self.__class__.DISPATCHERS = {
FacebookMessageType.received: FacebookMessage._process_received,
FacebookMessageType.delivered: FacebookMessage._process_delivered,
FacebookMessageType.read: FacebookMessage._process_read,
FacebookMessageType.echo: FacebookMessage._process_echo,
FacebookMessageType.postback: FacebookMessage._process_postback,
}
self.__class__.initialized = True
self.DISPATCHERS = self.__class__.DISPATCHERS
self._message = message
self.sender = FacebookEntity(message.get('sender'))
self.recipient = FacebookEntity(message.get('recipient'))
self.timestamp = message.get('timestamp')
# Message / Received.
self.mid = None
self.text = None
self.quick_reply_payload = None
self.attachments = []
# Echo
self.metadata = None
# Delivered
self.mids = []
self.watermark = None
self.seq = None
# Postback
self.postback_payload = None
self.referral = None
self.DISPATCHERS[self.type](self)
def _process_received(self):
message = self._message['message']
self.mid = message.get('mid')
self.text = message.get('text')
self.quick_reply_payload = message.get('quick_reply', {}).get('payload', None)
for attachment in message.get('attachments', []):
self.attachments.append(FacebookAttachment(attachment))
def _process_delivered(self):
message = self._message['delivery']
self.mids.extend(message.get('mids', []))
self.watermark = message['watermark'] # Always present per FB docs.
self.seq = message.get('seq')
def _process_read(self):
message = self._message['read']
self.watermark = message['watermark']
self.seq = message.get('seqs')
def _process_echo(self):
message = self._message['message']
self.app_id = message.get('app_id')
self.metadata = message.get('metadata')
self.mid = message.get('mid')
self.text = message.get('text')
for attachment in message.get('attachments', []):
self.attachments.append(FacebookAttachment(attachment))
def _process_postback(self):
message = self._message['postback']
self.postback_payload = message['payload']
self.referral = FacebookPostbackReferral(message.get('referal'))
class FacebookEntry:
def __init__(self, entry: Dict[str, Any]):
self.id = entry.get('id')
self.changed_fields = entry.get('changed_fields', [])
self.changes = entry.get('changes', [])
self.timestamp = entry.get('timestamp')
self.messages = self.process_messages(entry['messaging'])
def process_messages(self, entries: List[Dict[str, Any]]) -> List[FacebookMessage]:
messages = []
for message in entries:
messages.append(
FacebookMessage(message)
)
return messages
class Messager:
BASE_URL = "https://graph.facebook.com/v2.9/{}"
def __init__(self, access_token):
self.access_token = access_token
self.session = requests.Session()
self.session.headers.update({
'Content-Type': 'application/json'
})
self.session.params = {
'access_token': self.access_token
}
def subscribe_to_page(self):
return self.session.post(self.BASE_URL.format("me/subscribed_apps"))
def set_greeting_text(self, text):
data = {"setting_type": "greeting", "greeting": {"text": text}}
return self.session.post(self.BASE_URL.format("me/thread_settings"),
data=json.dumps(data))
def set_get_started_button_payload(self, payload):
data = {"setting_type": "call_to_actions", "thread_state": "new_thread",
"call_to_actions": [{"payload": payload}]}
return self.session.post(self.BASE_URL.format("me/thread_settings"),
data=json.dumps(data))
def send_text(self, user_id, text):
self._send({RECIPIENT_FIELD: self._build_recipient(user_id),
MESSAGE_FIELD: {MessageType.TEXT.value: text}})
def send_image(self, user_id, image):
self._send({RECIPIENT_FIELD: self._build_recipient(user_id),
MESSAGE_FIELD: {
ATTACHMENT_FIELD: {
TYPE_FIELD: AttachmentType.IMAGE.value,
PAYLOAD_FIELD: {
URL_FIELD: image
}
}
}})
def send_buttons(self, user_id, title, button_list):
buttons = list(dict())
for i in range(len(button_list)):
buttons.append(button_list[i].to_dict())
self._send({RECIPIENT_FIELD: self._build_recipient(user_id),
MESSAGE_FIELD: {
ATTACHMENT_FIELD: {
TYPE_FIELD: AttachmentType.TEMPLATE.value,
PAYLOAD_FIELD: {
TEMPLATE_TYPE_FIELD: TemplateType.BUTTON.value,
TEXT_FIELD: title,
BUTTONS_FIELD: buttons
}
}
}})
def send_generic(self, user_id, element_list):
elements = list(dict())
for i in range(len(element_list)):
elements.append(element_list[i].to_dict())
self._send({RECIPIENT_FIELD: self._build_recipient(user_id),
MESSAGE_FIELD: {
ATTACHMENT_FIELD: {
TYPE_FIELD: AttachmentType.TEMPLATE.value,
PAYLOAD_FIELD: {
TEMPLATE_TYPE_FIELD: TemplateType.GENERIC.value,
ELEMENTS_FIELD: elements
}
}
}})
def send_quick_replies(self, user_id, title, reply_list):
replies = list(dict())
for r in reply_list:
replies.append(r.to_dict())
self._send({RECIPIENT_FIELD: self._build_recipient(user_id),
MESSAGE_FIELD: {
TEXT_FIELD: title,
QUICK_REPLIES_FIELD: replies
}})
def typing(self, user_id, on=True):
data = {RECIPIENT_FIELD: {"id": user_id}, "sender_action": "typing_on" if on else "typing_off"}
return self.session.post(self.BASE_URL.format("me/messages"), data=json.dumps(data))
def fetch_user(self, user_id, fields: List[str] = None) -> FacebookEntity:
if fields is None:
fields = FacebookEntity.USER_FIELDS
entity = FacebookEntity(user_id)
resp = self.session.get(
self.BASE_URL
.format('/{}'),
params={
'fields': ','.join(fields)
}
)
resp.raise_for_status()
entity.hydrate_user_from_api(resp.json())
return entity
@staticmethod
def unserialize_received_request(object_type: str, json_entries: Dict[str, Any]) -> List[FacebookMessage]:
if json_entries['object'] != object_type:
raise RuntimeError('This message is not a page type')
messages = []
for entry in json_entries['entry']:
fb_entry = FacebookEntry(entry)
messages.extend(fb_entry.messages)
return messages
@staticmethod
def _build_recipient(user_id):
return {Recipient.ID.value: user_id}
def _send(self, message_data):
post_message_url = self.BASE_URL.format("me/messages")
response_message = json.dumps(message_data)
logger.debug('Message: {}'.format(response_message))
req = self.session.post(post_message_url,
data=response_message)
logger.info("[{status}/{reason}/{text}] Reply to {recipient}: {content}".format(
status=req.status_code,
reason=req.reason,
text=req.text,
recipient=message_data[RECIPIENT_FIELD],
content=message_data[MESSAGE_FIELD]))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from heat.common import identifier
from heat.common import template_format
from heat.engine import environment
from heat.engine.resources.aws.cfn.wait_condition_handle \
import WaitConditionHandle
from heat.engine.resources.aws.ec2 import instance
from heat.engine.resources.openstack.nova.server import Server
from heat.engine.scheduler import TaskRunner
from heat.engine import service
from heat.engine import stack as stk
from heat.engine import template as tmpl
from heat.tests import common
from heat.tests import utils
TEST_TEMPLATE_METADATA = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "",
"Parameters" : {
"KeyName" : {"Type" : "String", "Default": "mine" },
},
"Resources" : {
"S1": {
"Type": "AWS::EC2::Instance",
"Metadata" : {
"AWS::CloudFormation::Init" : {
"config" : {
"files" : {
"/tmp/random_file" : {
"content" : { "Fn::Join" : ["", [
"s2-ip=", {"Fn::GetAtt": ["S2", "PublicIp"]}
]]},
"mode" : "000400",
"owner" : "root",
"group" : "root"
}
}
}
}
},
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : "#!/bin/bash -v\n"
}
},
"S2": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : "#!/bin/bash -v\n"
}
}
}
}
'''
TEST_TEMPLATE_WAIT_CONDITION = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a WaitCondition.",
"Parameters" : {
"KeyName" : {"Type" : "String", "Default": "mine" },
},
"Resources" : {
"WH" : {
"Type" : "AWS::CloudFormation::WaitConditionHandle"
},
"S1": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : { "Fn::Join" : [ "", [ "#!/bin/bash -v\n",
"echo ",
{ "Ref" : "WH" },
"\n" ] ] }
}
},
"WC" : {
"Type" : "AWS::CloudFormation::WaitCondition",
"DependsOn": "S1",
"Properties" : {
"Handle" : {"Ref" : "WH"},
"Timeout" : "5"
}
},
"S2": {
"Type": "AWS::EC2::Instance",
"Metadata" : {
"test" : {"Fn::GetAtt": ["WC", "Data"]}
},
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : "#!/bin/bash -v\n"
}
}
}
}
'''
TEST_TEMPLATE_SERVER = '''
heat_template_version: 2013-05-23
resources:
instance1:
type: OS::Nova::Server
metadata: {"template_data": {get_attr: [instance2, first_address]}}
properties:
image: cirros-0.3.2-x86_64-disk
flavor: m1.small
key_name: stack_key
instance2:
type: OS::Nova::Server
metadata: {'apples': 'pears'}
properties:
image: cirros-0.3.2-x86_64-disk
flavor: m1.small
key_name: stack_key
'''
class MetadataRefreshTests(common.HeatTestCase):
@mock.patch.object(instance.Instance, 'handle_create')
@mock.patch.object(instance.Instance, 'check_create_complete')
@mock.patch.object(instance.Instance, 'FnGetAtt')
def test_FnGetAtt_metadata_updated(self, mock_get,
mock_check, mock_handle):
"""Tests that metadata gets updated when FnGetAtt return changes."""
# Setup
temp = template_format.parse(TEST_TEMPLATE_METADATA)
template = tmpl.Template(temp,
env=environment.Environment({}))
ctx = utils.dummy_context()
stack = stk.Stack(ctx, 'test_stack', template, disable_rollback=True)
stack.store()
self.stub_ImageConstraint_validate()
self.stub_KeypairConstraint_validate()
self.stub_FlavorConstraint_validate()
# Configure FnGetAtt to return different values on subsequent calls
mock_get.side_effect = [
'10.0.0.1',
'10.0.0.2',
]
# Initial resolution of the metadata
stack.create()
# Sanity check on S2
s2 = stack['S2']
self.assertEqual((s2.CREATE, s2.COMPLETE), s2.state)
# Verify S1 is using the initial value from S2
s1 = stack['S1']
content = self._get_metadata_content(s1.metadata_get())
self.assertEqual('s2-ip=10.0.0.1', content)
# Run metadata update to pick up the new value from S2
s1.metadata_update()
s2.metadata_update()
# Verify the updated value is correct in S1
content = self._get_metadata_content(s1.metadata_get())
self.assertEqual('s2-ip=10.0.0.2', content)
# Verify outgoing calls
mock_get.assert_has_calls([
mock.call('PublicIp'),
mock.call('PublicIp')])
self.assertEqual(2, mock_handle.call_count)
self.assertEqual(2, mock_check.call_count)
@staticmethod
def _get_metadata_content(m):
tmp = m['AWS::CloudFormation::Init']['config']['files']
return tmp['/tmp/random_file']['content']
class WaitConditionMetadataUpdateTests(common.HeatTestCase):
def setUp(self):
super(WaitConditionMetadataUpdateTests, self).setUp()
self.man = service.EngineService('a-host', 'a-topic')
self.man.create_periodic_tasks()
@mock.patch.object(instance.Instance, 'handle_create')
@mock.patch.object(instance.Instance, 'check_create_complete')
@mock.patch.object(instance.Instance, 'is_service_available')
@mock.patch.object(TaskRunner, '_sleep')
@mock.patch.object(WaitConditionHandle, 'identifier')
def test_wait_metadata(self, mock_identifier, mock_sleep, mock_available,
mock_check, mock_handle):
"""Tests a wait condition metadata update after a signal call."""
# Setup Stack
temp = template_format.parse(TEST_TEMPLATE_WAIT_CONDITION)
template = tmpl.Template(temp)
ctx = utils.dummy_context()
stack = stk.Stack(ctx, 'test-stack', template, disable_rollback=True)
stack.store()
self.stub_ImageConstraint_validate()
self.stub_KeypairConstraint_validate()
self.stub_FlavorConstraint_validate()
res_id = identifier.ResourceIdentifier('test_tenant_id', stack.name,
stack.id, '', 'WH')
mock_identifier.return_value = res_id
watch = stack['WC']
inst = stack['S2']
# Setup Sleep Behavior
self.run_empty = True
def check_empty(sleep_time):
self.assertEqual('{}', watch.FnGetAtt('Data'))
self.assertIsNone(inst.metadata_get()['test'])
def update_metadata(unique_id, data, reason):
self.man.resource_signal(ctx,
dict(stack.identifier()),
'WH',
{'Data': data, 'Reason': reason,
'Status': 'SUCCESS',
'UniqueId': unique_id},
sync_call=True)
def post_success(sleep_time):
update_metadata('123', 'foo', 'bar')
def side_effect_popper(sleep_time):
if self.run_empty:
self.run_empty = False
check_empty(sleep_time)
else:
post_success(sleep_time)
mock_sleep.side_effect = side_effect_popper
# Test Initial Creation
stack.create()
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
self.assertEqual('{"123": "foo"}', watch.FnGetAtt('Data'))
self.assertEqual('{"123": "foo"}', inst.metadata_get()['test'])
# Test Update
update_metadata('456', 'blarg', 'wibble')
self.assertEqual({'123': 'foo', '456': 'blarg'},
jsonutils.loads(watch.FnGetAtt('Data')))
self.assertEqual('{"123": "foo"}',
inst.metadata_get()['test'])
self.assertEqual(
{'123': 'foo', '456': 'blarg'},
jsonutils.loads(inst.metadata_get(refresh=True)['test']))
# Verify outgoing calls
self.assertTrue(mock_available.call_count > 0)
self.assertEqual(2, mock_handle.call_count)
self.assertEqual(2, mock_check.call_count)
class MetadataRefreshServerTests(common.HeatTestCase):
@mock.patch.object(Server, 'handle_create')
@mock.patch.object(Server, 'check_create_complete')
@mock.patch.object(Server, 'FnGetAtt')
def test_FnGetAtt_metadata_update(self, mock_get, mock_check, mock_handle):
temp = template_format.parse(TEST_TEMPLATE_SERVER)
template = tmpl.Template(temp,
env=environment.Environment({}))
ctx = utils.dummy_context()
stack = stk.Stack(ctx, 'test-stack', template, disable_rollback=True)
stack.store()
self.stub_ImageConstraint_validate()
self.stub_KeypairConstraint_validate()
self.stub_FlavorConstraint_validate()
# Note dummy addresses are from TEST-NET-1 ref rfc5737
mock_get.side_effect = ['192.0.2.1', '192.0.2.2', '192.0.2.2']
# Test
stack.create()
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
s1 = stack['instance1']
md = s1.metadata_get()
self.assertEqual({u'template_data': '192.0.2.1'}, md)
# Now set some metadata via the resource, like is done by
# _populate_deployments_metadata. This should be persisted over
# calls to metadata_update()
new_md = {u'template_data': '192.0.2.2', 'set_by_rsrc': 'orange'}
s1.metadata_set(new_md)
md = s1.metadata_get(refresh=True)
self.assertEqual(new_md, md)
s1.metadata_update()
md = s1.metadata_get(refresh=True)
self.assertEqual(new_md, md)
# Verify outgoing calls
mock_get.assert_has_calls([
mock.call('first_address'),
mock.call('first_address')])
self.assertEqual(2, mock_handle.call_count)
self.assertEqual(2, mock_check.call_count)
|
|
from django.test import TestCase
from dwitter.templatetags.insert_magic_links import insert_magic_links
class DweetTestCase(TestCase):
def test_insert_magic_links_bypasses_html(self):
self.assertEqual(
'prefix <h1>content</h1> suffix',
insert_magic_links('prefix <h1>content</h1> suffix')
)
# user
def test_insert_magic_links_replaces_user_with_valid_characters(self):
self.assertEqual(
'<a href="/u/a1_.@+-">u/a1_.@+-</a>',
insert_magic_links('u/a1_.@+-')
)
def test_insert_magic_links_bypasses_user_with_invalid_characters(self):
self.assertEqual(
'u/a1$',
'u/a1$'
)
def test_insert_magic_links_replaces_standalone_user(self):
self.assertEqual(
'<a href="/u/a">u/a</a>',
insert_magic_links('u/a')
)
def test_insert_magic_links_replaces_user_at_start_of_string(self):
self.assertEqual(
'<a href="/u/a">u/a</a> suffix',
insert_magic_links('u/a suffix')
)
def test_insert_magic_links_replaces_user_at_end_of_string(self):
self.assertEqual(
'prefix <a href="/u/a">u/a</a>',
insert_magic_links('prefix u/a')
)
def test_insert_magic_links_replaces_user_at_middle_of_string(self):
self.assertEqual(
'prefix <a href="/u/a">u/a</a> suffix',
insert_magic_links('prefix u/a suffix')
)
def test_insert_magic_links_bypasses_user_prefixed_by_non_space(self):
self.assertEqual(
'prefixu/a suffix',
insert_magic_links('prefixu/a suffix')
)
def test_insert_magic_links_bypasses_user_suffixed_by_non_space(self):
self.assertEqual(
'prefix u/a/suffix',
insert_magic_links('prefix u/a/suffix')
)
def test_insert_magic_links_replaces_user_suffixed_by_slash(self):
self.assertEqual(
'prefix <a href="/u/a">u/a</a> prefix/u/a',
insert_magic_links('prefix /u/a prefix/u/a')
)
def test_insert_magic_links_replaces_user_inside_parenthases(self):
self.assertEqual(
'(<a href="/u/a">u/a</a>)',
insert_magic_links('(u/a)')
)
# dweet
def test_insert_magic_links_replaces_dweet_with_valid_characters(self):
self.assertEqual(
'<a href="/d/1234567890">d/1234567890</a>',
insert_magic_links('d/1234567890')
)
def test_insert_magic_links_bypasses_dweet_with_invalid_characters(self):
self.assertEqual(
'd/1a',
'd/1a'
)
def test_insert_magic_links_replaces_standalone_dweet(self):
self.assertEqual(
'<a href="/d/1">d/1</a>',
insert_magic_links('d/1')
)
def test_insert_magic_links_replaces_dweet_at_start_of_string(self):
self.assertEqual(
'<a href="/d/1">d/1</a> suffix',
insert_magic_links('d/1 suffix')
)
def test_insert_magic_links_replaces_dweet_at_end_of_string(self):
self.assertEqual(
'prefix <a href="/d/1">d/1</a>',
insert_magic_links('prefix d/1')
)
def test_insert_magic_links_replaces_dweet_at_middle_of_string(self):
self.assertEqual(
'prefix <a href="/d/1">d/1</a> suffix',
insert_magic_links('prefix d/1 suffix')
)
def test_insert_magic_links_bypasses_dweet_prefixed_by_non_space(self):
self.assertEqual(
'prefixd/1 suffix',
insert_magic_links('prefixd/1 suffix')
)
def test_insert_magic_links_bypasses_dweet_suffixed_by_non_space(self):
self.assertEqual(
'prefix d/1/suffix',
insert_magic_links('prefix d/1/suffix')
)
def test_insert_magic_links_replaces_dweet_suffixed_by_slash(self):
self.assertEqual(
'prefix <a href="/d/1">d/1</a> prefix/d/1',
insert_magic_links('prefix /d/1 prefix/d/1')
)
def test_insert_magic_links_replaces_dweet_in_parenthases(self):
self.assertEqual(
'(<a href="/d/1">d/1</a>)',
insert_magic_links('(d/1)')
)
def test_insert_magic_replaces_basic_hashtag(self):
self.assertEqual(
'<a href="/h/test">#test</a>',
insert_magic_links('#test')
)
def test_insert_magic_replaces_prefix_hashtag(self):
self.assertEqual(
'prefix <a href="/h/test">#test</a>',
insert_magic_links('prefix #test')
)
def test_insert_magic_replaces_hashtag_prefix_no_space(self):
self.assertEqual(
'prefix<a href="/h/test">#test</a>',
insert_magic_links('prefix#test')
)
def test_insert_magic_replaces_hashtag_paren(self):
self.assertEqual(
'prefix(<a href="/h/test">#test</a>)',
insert_magic_links('prefix(#test)')
)
def test_insert_magic_replaces_hashtag_underscore(self):
self.assertEqual(
'Dwitter is just <a href="/h/amazing_underscore">#amazing_underscore</a>, right?',
insert_magic_links('Dwitter is just #amazing_underscore, right?')
)
def test_insert_magic_replaces_hashtag_illegal_hyphen(self):
self.assertEqual(
'Dwitter is just <a href="/h/amaze">#amaze</a>-balls, right?',
insert_magic_links('Dwitter is just #amaze-balls, right?')
)
def test_insert_magic_hashtag_not_start_with_digit(self):
self.assertEqual(
'Dwitter is just #1337 or <a href="/h/super1337">#super1337</a>?',
insert_magic_links('Dwitter is just #1337 or #super1337?')
)
def test_insert_magic_single_character_hashtag(self):
self.assertEqual(
'<a href="/h/s">#s</a>',
insert_magic_links('#s')
)
self.assertEqual(
'<a href="/h/S">#S</a>',
insert_magic_links('#S')
)
self.assertEqual(
'#1', # Start with digit not legal
insert_magic_links('#1')
)
def test_insert_magic_link_anchor_not_hashtag(self):
self.assertEqual(
'<a href="/h/start">#start</a> '
'https://www.example.com/page#anchor '
'<a href="/h/end">#end</a>',
insert_magic_links('#start https://www.example.com/page#anchor #end')
)
# mixed
def test_insert_magic_links_mixed(self):
self.assertEqual(
'<a href="/u/john">u/john</a> remixed '
'<a href="/d/123">d/123</a> by '
'<a href="/u/jane">u/jane</a>',
insert_magic_links('u/john remixed d/123 by /u/jane')
)
def test_insert_magic_links_mixed_hashtag(self):
self.assertEqual(
'<a href="/h/awesome">#awesome</a> '
'<a href="/u/john">u/john</a> remixed '
'<a href="/h/amazing">#amazing</a> '
'<a href="/d/123">d/123</a> by '
'<a href="/u/jane">u/jane</a>'
'<a href="/h/yey">#yey</a>',
insert_magic_links('#awesome u/john remixed #amazing d/123 by /u/jane#yey')
)
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "pointcloud.hoverlabel"
_path_str = "pointcloud.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.pointcloud.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.pointcloud.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.pointcloud.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
import os, sys, logging, re
import code
from PyQt5 import QtWidgets, QtGui, QtCore
from PyQt5.QtCore import Qt
from copper import hou
from .base_panel import BasePanel
from .python_syntax_highlighter import PythonHighlighter
logger = logging.getLogger(__name__)
class PythonShellPanel(BasePanel):
def __init__(self):
BasePanel.__init__(self)
self.python_shell_widget = PythonShellWidget(self)
self.syntax = PythonHighlighter(self.python_shell_widget.document())
self.addWidget(self.python_shell_widget)
@classmethod
def panelTypeName(cls):
return "Python Shell"
@classmethod
def hasNetworkControls(cls):
return False
class PythonShellWidget(QtWidgets.QTextEdit):
class InteractiveInterpreter(code.InteractiveInterpreter):
def __init__(self, locals):
code.InteractiveInterpreter.__init__(self, locals)
def runIt(self, command):
logger.debug("run cmd: %s" % command)
code.InteractiveInterpreter.runsource(self, command)
def __init__(self, parent):
QtWidgets.QTextEdit.__init__(self, parent)
self.setObjectName("PythonShellWidget")
sys.stdout = self
sys.stderr = self
self.refreshMarker = False # to change back to >>> from ...
self.multiLine = False # code spans more than one line
self.command = '' # command to be ran
self.printBanner() # print sys info
self.marker() # make the >>> or ... marker
self.history = [] # list of commands entered
self.historyIndex = -1
self.interpreterLocals = {"hou": hou}
# initilize interpreter with self locals
self.initInterpreter(locals())
def printBanner(self):
self.write(sys.version)
self.write(' on ' + sys.platform + '\n')
self.write('CopperFX python interpreter on PyQt ' + QtCore.PYQT_VERSION_STR + '\n')
msg = 'Type !hist for a history view and !hist(n) history index recall'
self.write(msg + '\n')
def marker(self):
if self.multiLine:
self.insertPlainText('... ')
else:
self.insertPlainText('>>> ')
def initInterpreter(self, interpreterLocals=None, parent=None):
if interpreterLocals:
# when we pass in locals, we don't want it to be named "self"
# so we rename it with the name of the class that did the passing
# and reinsert the locals back into the interpreter dictionary
selfName = interpreterLocals['self'].__class__.__name__
interpreterLocalVars = interpreterLocals.pop('self')
self.interpreterLocals[selfName] = interpreterLocalVars
else:
self.interpreterLocals = interpreterLocals
self.interpreter = self.InteractiveInterpreter(self.interpreterLocals)
def updateInterpreterLocals(self, newLocals):
className = newLocals.__class__.__name__
self.interpreterLocals[className] = newLocals
def write(self, line):
self.insertPlainText(line)
self.ensureCursorVisible()
def clearCurrentBlock(self):
# block being current row
length = len(self.document().lastBlock().text()[4:])
if length == 0:
return None
else:
# should have a better way of doing this but I can't find it
[self.textCursor().deletePreviousChar() for x in xrange(length)]
return True
def recallHistory(self):
# used when using the arrow keys to scroll through history
self.clearCurrentBlock()
if self.historyIndex > -1 or self.historyIndex < -1:
self.insertPlainText(self.history[self.historyIndex])
return True
def customCommands(self, command):
if command == '!hist': # display history
self.append('') # move down one line
# vars that are in the command are prefixed with ____CC and deleted
# once the command is done so they don't show up in dir()
backup = self.interpreterLocals.copy()
history = self.history[:]
history.reverse()
for i, x in enumerate(history):
iSize = len(str(i))
delta = len(str(len(history))) - iSize
line = line = ' ' * delta + '%i: %s' % (i, x) + '\n'
self.write(line)
self.updateInterpreterLocals(backup)
self.marker()
return True
if re.match('!hist\(\d+\)', command): # recall command from history
backup = self.interpreterLocals.copy()
history = self.history[:]
history.reverse()
index = int(command[6:-1])
self.clearCurrentBlock()
command = history[index]
if command[-1] == ':':
self.multiLine = True
self.write(command)
self.updateInterpreterLocals(backup)
return True
return False
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
# proper exit
self.interpreter.runIt('exit()')
if event.key() == Qt.Key_Down:
if self.historyIndex == len(self.history):
self.historyIndex -= 1
try:
if self.historyIndex > -1:
self.historyIndex -= 1
self.recallHistory()
else:
self.clearCurrentBlock()
except:
pass
return None
if event.key() == Qt.Key_Up:
try:
if len(self.history) - 1 > self.historyIndex:
self.historyIndex += 1
self.recallHistory()
else:
self.historyIndex = len(self.history)
except:
pass
return None
if event.key() == Qt.Key_Home:
# set cursor to position 4 in current block. 4 because that's where
# the marker stops
blockLength = len(self.document().lastBlock().text()[4:])
lineLength = len(self.document().toPlainText())
position = lineLength - blockLength
textCursor = self.textCursor()
textCursor.setPosition(position)
self.setTextCursor(textCursor)
return None
if event.key() in [Qt.Key_Left, Qt.Key_Backspace]:
# don't allow deletion of marker
if self.textCursor().positionInBlock() == 4:
return None
if event.key() in [Qt.Key_Return, Qt.Key_Enter]:
# set cursor to end of line to avoid line splitting
textCursor = self.textCursor()
position = len(self.document().toPlainText())
textCursor.setPosition(position)
self.setTextCursor(textCursor)
line = str(self.document().lastBlock().text())[4:] # remove marker
line.rstrip()
self.historyIndex = -1
if self.customCommands(line):
return None
else:
try:
line[-1]
self.haveLine = True
if line[-1] == ':':
self.multiLine = True
self.history.insert(0, line)
except:
self.haveLine = False
if self.haveLine and self.multiLine: # multi line command
self.command += line + '\n' # + command and line
self.append('') # move down one line
self.marker() # handle marker style
return None
if self.haveLine and not self.multiLine: # one line command
self.command = line # line is the command
self.append('') # move down one line
self.interpreter.runIt(self.command)
self.command = '' # clear command
self.marker() # handle marker style
return None
if self.multiLine and not self.haveLine: # multi line done
self.append('') # move down one line
self.interpreter.runIt(self.command)
self.command = '' # clear command
self.multiLine = False # back to single line
self.marker() # handle marker style
return None
if not self.haveLine and not self.multiLine: # just enter
self.append('')
self.marker()
return None
return None
# allow all other key events
super(PythonShellWidget, self).keyPressEvent(event)
|
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for quadratic_radial_distortion."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.rendering.camera import quadratic_radial_distortion
from tensorflow_graphics.util import test_case
RANDOM_TESTS_NUM_IMAGES = 10
RANDOM_TESTS_HEIGHT = 8
RANDOM_TESTS_WIDTH = 8
RADII_SHAPE = (RANDOM_TESTS_NUM_IMAGES, RANDOM_TESTS_HEIGHT, RANDOM_TESTS_WIDTH)
COEFFICIENT_SHAPE = (RANDOM_TESTS_NUM_IMAGES,)
def _get_random_radii():
return np.random.rand(*RADII_SHAPE).astype('float32')
def _get_zeros_radii():
return np.zeros(shape=RADII_SHAPE).astype('float32')
def _get_ones_radii():
return np.ones(shape=RADII_SHAPE).astype('float32')
def _get_random_coefficient():
return np.random.rand(*COEFFICIENT_SHAPE).astype('float32')
def _get_zeros_coefficient():
return np.zeros(shape=COEFFICIENT_SHAPE).astype('float32')
def _get_ones_coefficient():
return np.ones(shape=COEFFICIENT_SHAPE).astype('float32')
def _make_shape_compatible(coefficients):
return np.expand_dims(np.expand_dims(coefficients, axis=-1), axis=-1)
class QuadraticRadialDistortionTest(test_case.TestCase):
def test_distortion_factor_random_positive_distortion_coefficient(self):
"""Tests that distortion_factor produces the expected outputs."""
squared_radii = _get_random_radii() * 2.0
distortion_coefficient = _get_random_coefficient() * 2.0
distortion, mask = quadratic_radial_distortion.distortion_factor(
squared_radii, distortion_coefficient)
distortion_coefficient = _make_shape_compatible(distortion_coefficient)
with self.subTest(name='distortion'):
self.assertAllClose(1.0 + distortion_coefficient * squared_radii,
distortion)
# No overflow when distortion_coefficient >= 0.0.
with self.subTest(name='mask'):
self.assertAllInSet(mask, (False,))
def test_distortion_factor_preset_zero_distortion_coefficient(self):
"""Tests distortion_factor at zero distortion coefficient."""
squared_radii = _get_random_radii() * 2.0
distortion, mask = quadratic_radial_distortion.distortion_factor(
squared_radii, 0.0)
with self.subTest(name='distortion'):
self.assertAllClose(tf.ones_like(squared_radii), distortion)
# No overflow when distortion_coefficient = 0.0.
with self.subTest(name='mask'):
self.assertAllInSet(mask, (False,))
def test_distortion_factor_random_negative_distortion_coefficient(self):
"""Tests that distortion_factor produces the expected outputs."""
squared_radii = _get_random_radii() * 2.0
distortion_coefficient = _get_random_coefficient() * -0.2
distortion, mask = quadratic_radial_distortion.distortion_factor(
squared_radii, distortion_coefficient)
distortion_coefficient = _make_shape_compatible(distortion_coefficient)
max_squared_radii = -1.0 / 3.0 / distortion_coefficient
expected_overflow_mask = squared_radii > max_squared_radii
valid_mask = np.logical_not(expected_overflow_mask)
# We assert correctness of the mask, and of all the pixels that are not in
# overflow.
actual_distortion_when_valid = self.evaluate(distortion)[valid_mask]
expected_distortion_when_valid = (
1.0 + distortion_coefficient * squared_radii)[valid_mask]
with self.subTest(name='distortion'):
self.assertAllClose(expected_distortion_when_valid,
actual_distortion_when_valid)
with self.subTest(name='mask'):
self.assertAllEqual(expected_overflow_mask, mask)
def test_distortion_factor_preset_zero_radius(self):
"""Tests distortion_factor at the corner case of zero radius."""
squared_radii = _get_zeros_radii()
distortion_coefficient = _get_random_coefficient() - 0.5
distortion, mask = quadratic_radial_distortion.distortion_factor(
squared_radii, distortion_coefficient)
with self.subTest(name='distortion'):
self.assertAllClose(np.ones_like(squared_radii), distortion)
with self.subTest(name='mask'):
self.assertAllInSet(mask, (False,))
@parameterized.parameters(quadratic_radial_distortion.distortion_factor,
quadratic_radial_distortion.undistortion_factor)
def test_both_negative_radius_exception_raised(self, distortion_function):
"""Tests that an exception is raised when the squared radius is negative."""
squared_radii = _get_zeros_radii() - 0.5
distortion_coefficient = _get_random_coefficient() - 0.5
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(distortion_function(squared_radii, distortion_coefficient))
@parameterized.parameters((2, 2e-3), (3, 1e-8))
def test_undistortion_factor_random_positive_distortion_coefficient(
self, num_iterations, tolerance):
"""Tests that undistortion_factor produces the expected outputs."""
distorted_squared_radii = _get_random_radii() * 2.0
distortion_coefficient = _get_random_coefficient() * 0.2
undistortion, mask = quadratic_radial_distortion.undistortion_factor(
distorted_squared_radii, distortion_coefficient, num_iterations)
distortion_coefficient = _make_shape_compatible(distortion_coefficient)
undistorted_squared_radii = tf.square(
undistortion) * distorted_squared_radii
# We distort again the undistorted radii and compare to the original
# distorted_squared_radii.
redistorted_squared_radii = tf.square(
1.0 + distortion_coefficient *
undistorted_squared_radii) * undistorted_squared_radii
with self.subTest(name='distortion'):
self.assertAllClose(
distorted_squared_radii, redistorted_squared_radii, atol=tolerance)
# Positive distortion_coefficients never overflow.
with self.subTest(name='mask'):
self.assertAllInSet(mask, (False,))
@parameterized.parameters((2, 2e-2), (3, 6e-3), (4, 6e-4))
def test_undistortion_factor_random_negative_distortion_coefficient(
self, num_iterations, tolerance):
"""Tests that undistortion_factor produces the expected outputs."""
distorted_squared_radii = _get_random_radii() * 2.0
distortion_coefficient = _get_random_coefficient() * -0.2
undistortion, mask = quadratic_radial_distortion.undistortion_factor(
distorted_squared_radii, distortion_coefficient, num_iterations)
distortion_coefficient = _make_shape_compatible(distortion_coefficient)
undistorted_squared_radii = tf.square(
undistortion) * distorted_squared_radii
# See explanation in the implementation comments for this formula.
expected_overflow_mask = (
distorted_squared_radii * distortion_coefficient + 4.0 / 27.0 < 0)
redistorted_squared_radii = tf.square(
1.0 + distortion_coefficient *
undistorted_squared_radii) * undistorted_squared_radii
valid_mask = np.logical_not(expected_overflow_mask)
redistorted_squared_radii_when_valid = self.evaluate(
redistorted_squared_radii)[valid_mask]
distorted_squared_radii_when_valid = distorted_squared_radii[valid_mask]
with self.subTest(name='distortion'):
self.assertAllClose(
distorted_squared_radii_when_valid,
redistorted_squared_radii_when_valid,
rtol=tolerance,
atol=tolerance)
# We assert correctness of the mask, and of all the pixels that are not in
# overflow, distorting again the undistorted radii and comparing to the
# original distorted_squared_radii.
with self.subTest(name='mask'):
self.assertAllEqual(expected_overflow_mask, mask)
def test_undistortion_factor_zero_distortion_coefficient(self):
"""Tests undistortion_factor at zero distortion coefficient."""
squared_radii = _get_random_radii() * 2.0
undistortion, mask = quadratic_radial_distortion.undistortion_factor(
squared_radii, 0.0)
with self.subTest(name='distortion'):
self.assertAllClose(tf.ones_like(squared_radii), undistortion)
# No overflow when distortion_coefficient = 0.0.
with self.subTest(name='mask'):
self.assertAllEqual(np.zeros_like(squared_radii), mask)
@parameterized.parameters(
('must have a rank greater than 1', (2,), (2, 1)),
('Not all batch dimensions are broadcast-compatible', (2, 2, 2), (3,)),
('Not all batch dimensions are broadcast-compatible', (2, 2, 2), (3, 3)),
)
def test_distortion_factor_shape_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are raised."""
self.assert_exception_is_raised(
func=quadratic_radial_distortion.distortion_factor,
error_msg=error_msg,
shapes=shapes)
@parameterized.parameters(
((2, 2), ()),
((1, 2, 2), (2,)),
((2, 2, 2), (2,)),
((2, 2), (2, 2)),
((2, 2, 2), (1, 2)),
((2, 3, 4), (1,)),
((2, 3, 4), (1, 1)),
((2, 3, 4), (2,)),
)
def test_distortion_factor_shape_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are raised."""
self.assert_exception_is_not_raised(
func=quadratic_radial_distortion.distortion_factor, shapes=shapes)
@parameterized.parameters(
('must have a rank greater than 1', (2,), (2, 1)),
('Not all batch dimensions are broadcast-compatible', (2, 2, 2), (3,)),
('Not all batch dimensions are broadcast-compatible', (2, 2, 2), (3, 3)),
)
def test_undistortion_factor_shape_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are raised."""
self.assert_exception_is_raised(
func=quadratic_radial_distortion.undistortion_factor,
error_msg=error_msg,
shapes=shapes)
@parameterized.parameters(
((2, 2), ()),
((1, 2, 2), (2,)),
((2, 2, 2), (2,)),
((2, 2), (2, 2)),
((2, 2, 2), (1, 2)),
((2, 3, 4), (1,)),
((2, 3, 4), (1, 1)),
((2, 3, 4), (2,)),
)
def test_undistortion_factor_shape_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are raised."""
self.assert_exception_is_not_raised(
func=quadratic_radial_distortion.undistortion_factor, shapes=shapes)
@parameterized.parameters(quadratic_radial_distortion.distortion_factor,
quadratic_radial_distortion.undistortion_factor)
def test_both_radial_jacobian(self, distortion_function):
"""Test the Jacobians with respect to squared radii."""
squared_radii = _get_random_radii().astype(np.float64) * 0.5
distortion_coefficients = _get_random_coefficient().astype(np.float64) * 0.5
distortion_coefficients -= 0.25
def distortion_fn(squared_radii):
distortion, _ = distortion_function(squared_radii,
distortion_coefficients)
return distortion
self.assert_jacobian_is_correct_fn(
distortion_fn, [squared_radii], delta=1e-7, atol=1e-3)
@parameterized.parameters(quadratic_radial_distortion.distortion_factor,
quadratic_radial_distortion.undistortion_factor)
def test_both_distortion_coefficient_jacobian(self, distortion_function):
"""Test the Jacobians with respect to distortion coefficients."""
squared_radii = _get_random_radii().astype(np.float64) * 0.5
distortion_coefficients = _get_random_coefficient().astype(np.float64) * 0.5
distortion_coefficients -= 0.25
def distortion_fn(distortion_coefficients):
distortion, _ = distortion_function(squared_radii,
distortion_coefficients)
return distortion
self.assert_jacobian_is_correct_fn(
distortion_fn, [distortion_coefficients], delta=1e-7, atol=1e-3)
if __name__ == '__main__':
test_case.main()
|
|
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import DirectObject
from pandac.PandaModules import *
import random
from toontown.hood import ZoneUtil
from toontown.toonbase import ToontownGlobals
class HoodMgr(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('HoodMgr')
ToontownCentralInitialDropPoints = (
[-90.7, -60, 0.025, 102.575, 0, 0],
[-91.4, -40.5, -3.948, 125.763, 0, 0],
[-107.8, -17.8, -1.937, 149.456, 0, 0],
[-108.7, 12.8, -1.767, 158.756, 0, 0],
[-42.1, -22.8, -1.328, -248.1, 0, 0],
[-35.2, -60.2, 0.025, -265.639, 0, 0]
)
ToontownCentralHQDropPoints = (
[-43.5, 42.6, -0.55, -100.454, 0, 0],
[-53.0, 12.5, -2.948, 281.502, 0, 0],
[-40.3, -18.5, -0.913, -56.674, 0, 0],
[-1.9, -37.0, 0.025, -23.43, 0, 0],
[1.9, -5.9, 4, -37.941, 0, 0]
)
ToontownCentralTunnelDropPoints = (
[-28.3, 40.1, 0.25, 17.25, 0, 0],
[-63.75, 58.96, -0.5, -23.75, 0, 0],
[-106.93, 17.66, -2.2, 99, 0, 0],
[-116.0, -21.5, -0.038, 50, 0, 0],
[74.88, -115, 2.53, -224.41, 0, 0],
[30.488, -101.5, 2.53, -179.23, 0, 0]
)
dropPoints = {
ToontownGlobals.DonaldsDock: (
[-28, -2.5, 5.8, 120, 0, 0],
[-22, 13, 5.8, 155.6, 0, 0],
[67, 47, 5.7, 134.7, 0, 0],
[62, 19, 5.7, 97, 0, 0],
[66, -27, 5.7, 80.5, 0, 0],
[-114, -7, 5.7, -97, 0, 0],
[-108, 36, 5.7, -153.8, 0, 0],
[-116, -46, 5.7, -70.1, 0, 0],
[-63, -79, 5.7, -41.2, 0, 0],
[-2, -79, 5.7, 57.4, 0, 0],
[-38, -78, 5.7, 9.1, 0, 0]
),
ToontownGlobals.ToontownCentral: (
[-60, -8, 1.3, -90, 0, 0],
[-66, -9, 1.3, -274, 0, 0],
[17, -28, 4.1, -44, 0, 0],
[87.7, -22, 4, 66, 0, 0],
[-9.6, 61.1, 0, 132, 0, 0],
[-109.0, -2.5, -1.656, -90, 0, 0],
[-35.4, -81.3, 0.5, -4, 0, 0],
[-103, 72, 0, -141, 0, 0],
[93.5, -148.4, 2.5, 43, 0, 0],
[25, 123.4, 2.55, 272, 0, 0],
[48, 39, 4, 201, 0, 0],
[-80, -61, 0.1, -265, 0, 0],
[-46.875, 43.68, -1.05, 124, 0, 0],
[34, -105, 2.55, 45, 0, 0],
[16, -75, 2.55, 56, 0, 0],
[-27, -56, 0.1, 45, 0, 0],
[100, 27, 4.1, 150, 0, 0],
[-70, 4.6, -1.9, 90, 0, 0],
[-130.7, 50, 0.55, -111, 0, 0]
),
ToontownGlobals.TheBrrrgh: (
[35, -32, 6.2, 138, 0, 0],
[26, -105, 6.2, -339, 0, 0],
[-29, -139, 6.2, -385, 0, 0],
[-79, -123, 6.2, -369, 0, 0],
[-114, -86, 3, -54, 0, 0],
[-136, 9, 6.2, -125, 0, 0],
[-75, 92, 6.2, -187, 0, 0],
[-7, 75, 6.2, -187, 0, 0],
[-106, -42, 8.6, -111, 0, 0],
[-116, -44, 8.3, -20, 0, 0]
),
ToontownGlobals.MinniesMelodyland: (
[86, 44, -13.5, 121.1, 0, 0],
[88, -8, -13.5, 91, 0, 0],
[92, -76, -13.5, 62.5, 0, 0],
[53, -112, 6.5, 65.8, 0, 0],
[-69, -71, 6.5, -67.2, 0, 0],
[-75, 21, 6.5, -100.9, 0, 0],
[-21, 72, 6.5, -129.5, 0, 0],
[56, 72, 6.5, 138.2, 0, 0],
[-41, 47, 6.5, -98.9, 0, 0]
),
ToontownGlobals.DaisyGardens: (
[0, 0, 0, -10.5, 0, 0],
[76, 35, 1.1, -30.2, 0, 0],
[97, 106, 0, 51.4, 0, 0],
[51, 180, 10, 22.6, 0, 0],
[-14, 203, 10, 85.6, 0, 0],
[-58, 158, 10, -146.9, 0, 0],
[-86, 128, 0, -178.9, 0, 0],
[-64, 65, 0, 17.7, 0, 0],
[-13, 39, 0, -15.7, 0, 0],
[-12, 193, 0, -112.4, 0, 0],
[87, 128, 0, 45.4, 0, 0]
),
ToontownGlobals.DonaldsDreamland: (
[77, 91, 0, 124.4, 0, 0],
[29, 92, 0, -154.5, 0, 0],
[-28, 49, -16.4, -142, 0, 0],
[21, 40, -16, -65.1, 0, 0],
[48, 27, -15.4, -161, 0, 0],
[-2, -22, -15.2, -132.1, 0, 0],
[-92, -88, 0, -116.3, 0, 0],
[-56, -93, 0, -21.5, 0, 0],
[20, -88, 0, -123.4, 0, 0],
[76, -90, 0, 11, 0, 0]
),
ToontownGlobals.GoofySpeedway: (
[-0.7, 62, 0.08, 182, 0, 0],
[-1, -30, 0.06, 183, 0, 0],
[-13, -120, 0, 307, 0, 0],
[16.4, -120, 0, 65, 0, 0],
[-0.5, -90, 0, 182, 0, 0],
[-30, -25, -0.373, 326, 0, 0],
[29, -17, -0.373, 32, 0, 0]
),
ToontownGlobals.GolfZone: (
[-49.6, 102, 0, 162, 0, 0],
[-22.8, 36.6, 0, 157.5, 0, 0],
[40, 51, 0, 185, 0, 0],
[48.3, 122.2, 0, 192, 0, 0],
[106.3, 69.2, 0, 133, 0, 0],
[-81.5, 47.2, 0, 183, 0, 0],
[-80.5, -84.2, 0, 284, 0, 0],
[73, -111, 0, 354, 0, 0]
),
ToontownGlobals.OutdoorZone: (
[-165.8, 108, 0.025, 252, 0, 0],
[21, 130, 0.16, 170, 0, 0],
[93, 78.5, 0.23, 112, 0, 0],
[79, -1.6, 0.75, 163, 0, 0],
[10, 33, 5.32, 130.379, 0, 0],
[-200, -42, 0.025, 317.543, 0, 0],
[-21, -65, 0.335, -18, 0, 0],
[23, 68.5, 4.51, -22.808, 0, 0]
),
ToontownGlobals.Tutorial: (
[130.9, -8.6, -1.3, 105.5, 0, 0],
),
ToontownGlobals.SellbotHQ: (
[64, -128, 0.26, 36, 0, 0],
[9, -140, 0.26, 0, 0, 0],
[-82, -112, 0.26, -127, 0, 0],
[-73, -213, 0.26, -23, 0, 0],
[-20, -243, 0.26, -9, 0, 0],
[79, -208, 0.26, 43, 0, 0]
),
ToontownGlobals.CashbotHQ: (
[102, -437, -23.439, 0, 0, 0],
[124, -437, -23.439, 0, 0, 0],
[110, -446, -23.439, 0, 0, 0],
[132, -446, -23.439, 0, 0, 0]
),
ToontownGlobals.LawbotHQ: (
[77.5, 129.13, -68.4, -166.6, 0, 0],
[-57.7, 80.75, -68.4, -139.2, 0, 0],
[203.3, 46.36, -68.4, -213.37, 0, 0],
[88.2, -336.52, -68.4, -720.4, 0, 0],
[232.77, -305.33, -68.4, -651, 0, 0],
[-20.16, -345.76, -68.4, -777.98, 0, 0]
)
}
DefaultDropPoint = [0, 0, 0, 0, 0, 0]
hoodName2Id = {
'dd': ToontownGlobals.DonaldsDock,
'tt': ToontownGlobals.ToontownCentral,
'br': ToontownGlobals.TheBrrrgh,
'mm': ToontownGlobals.MinniesMelodyland,
'dg': ToontownGlobals.DaisyGardens,
'oz': ToontownGlobals.OutdoorZone,
'ff': ToontownGlobals.FunnyFarm,
'gs': ToontownGlobals.GoofySpeedway,
'dl': ToontownGlobals.DonaldsDreamland,
'bosshq': ToontownGlobals.BossbotHQ,
'sellhq': ToontownGlobals.SellbotHQ,
'cashhq': ToontownGlobals.CashbotHQ,
'lawhq': ToontownGlobals.LawbotHQ,
'gz': ToontownGlobals.GolfZone
}
hoodId2Name = {
ToontownGlobals.DonaldsDock: 'dd',
ToontownGlobals.ToontownCentral: 'tt',
ToontownGlobals.Tutorial: 'tt',
ToontownGlobals.TheBrrrgh: 'br',
ToontownGlobals.MinniesMelodyland: 'mm',
ToontownGlobals.DaisyGardens: 'dg',
ToontownGlobals.OutdoorZone: 'oz',
ToontownGlobals.FunnyFarm: 'ff',
ToontownGlobals.GoofySpeedway: 'gs',
ToontownGlobals.DonaldsDreamland: 'dl',
ToontownGlobals.BossbotHQ: 'bosshq',
ToontownGlobals.SellbotHQ: 'sellhq',
ToontownGlobals.CashbotHQ: 'cashhq',
ToontownGlobals.LawbotHQ: 'lawhq',
ToontownGlobals.GolfZone: 'gz'
}
dbgDropMode = 0
currentDropPoint = 0
def __init__(self, cr):
self.cr = cr
def getDropPoint(self, dropPointList):
if self.dbgDropMode == 0:
return random.choice(dropPointList)
else:
droppnt = self.currentDropPoint % len(dropPointList)
self.currentDropPoint = (self.currentDropPoint + 1) % len(dropPointList)
return dropPointList[droppnt]
def getAvailableZones(self):
if base.launcher == None:
return self.getZonesInPhase(4) + self.getZonesInPhase(6) + self.getZonesInPhase(8) + self.getZonesInPhase(9) + self.getZonesInPhase(10) + self.getZonesInPhase(11) + self.getZonesInPhase(12) + self.getZonesInPhase(13)
else:
zones = []
for phase in set(ToontownGlobals.phaseMap.values()):
if base.launcher.getPhaseComplete(phase):
zones = zones + self.getZonesInPhase(phase)
return zones
def getZonesInPhase(self, phase):
p = []
for i in ToontownGlobals.phaseMap.items():
if i[1] == phase:
p.append(i[0])
return p
def getPhaseFromHood(self, hoodId):
hoodId = ZoneUtil.getCanonicalHoodId(hoodId)
return ToontownGlobals.phaseMap[hoodId]
def getPlaygroundCenterFromId(self, hoodId):
dropPointList = self.dropPoints.get(hoodId, None)
if dropPointList:
return self.getDropPoint(dropPointList)
else:
self.notify.warning('getPlaygroundCenterFromId: No such hood name as: ' + str(hoodId))
return self.DefaultDropPoint
def getIdFromName(self, hoodName):
id = self.hoodName2Id.get(hoodName)
if id:
return id
else:
self.notify.error('No such hood name as: %s' % hoodName)
def getNameFromId(self, hoodId):
name = self.hoodId2Name.get(hoodId)
if name:
return name
else:
self.notify.error('No such hood id as: %s' % hoodId)
def getFullnameFromId(self, hoodId):
hoodId = ZoneUtil.getCanonicalZoneId(hoodId)
return ToontownGlobals.hoodNameMap[hoodId][-1]
def addLinkTunnelHooks(self, hoodPart, nodeList, currentZoneId):
tunnelOriginList = []
for i in nodeList:
linkTunnelNPC = i.findAllMatches('**/linktunnel*')
for p in xrange(linkTunnelNPC.getNumPaths()):
linkTunnel = linkTunnelNPC.getPath(p)
name = linkTunnel.getName()
nameParts = name.split('_')
hoodStr = nameParts[1]
zoneStr = nameParts[2]
hoodId = self.getIdFromName(hoodStr)
zoneId = int(zoneStr)
hoodId = ZoneUtil.getTrueZoneId(hoodId, currentZoneId)
zoneId = ZoneUtil.getTrueZoneId(zoneId, currentZoneId)
linkSphere = linkTunnel.find('**/tunnel_trigger')
if linkSphere.isEmpty():
linkSphere = linkTunnel.find('**/tunnel_sphere')
if not linkSphere.isEmpty():
cnode = linkSphere.node()
cnode.setName('tunnel_trigger_' + hoodStr + '_' + zoneStr)
cnode.setCollideMask(ToontownGlobals.WallBitmask | ToontownGlobals.GhostBitmask)
else:
linkSphere = linkTunnel.find('**/tunnel_trigger_' + hoodStr + '_' + zoneStr)
if linkSphere.isEmpty():
self.notify.error('tunnel_trigger not found')
tunnelOrigin = linkTunnel.find('**/tunnel_origin')
if tunnelOrigin.isEmpty():
self.notify.error('tunnel_origin not found')
tunnelOriginPlaceHolder = render.attachNewNode('toph_' + hoodStr + '_' + zoneStr)
tunnelOriginList.append(tunnelOriginPlaceHolder)
tunnelOriginPlaceHolder.setPos(tunnelOrigin.getPos(render))
tunnelOriginPlaceHolder.setHpr(tunnelOrigin.getHpr(render))
hood = base.localAvatar.cr.playGame.hood
if ZoneUtil.tutorialDict:
how = 'teleportIn'
tutorialFlag = 1
else:
how = 'tunnelIn'
tutorialFlag = 0
hoodPart.accept('enter' + linkSphere.getName(), hoodPart.handleEnterTunnel, [{'loader': ZoneUtil.getLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': how,
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': None,
'tunnelOrigin': tunnelOriginPlaceHolder,
'tutorial': tutorialFlag}])
return tunnelOriginList
def extractGroupName(self, groupFullName):
return groupFullName.split(':', 1)[0]
def makeLinkTunnelName(self, hoodId, currentZone):
return '**/toph_' + self.getNameFromId(hoodId) + '_' + str(currentZone)
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'res/designer/problems/ventilationwidget.ui'
#
# Created: Tue Feb 16 12:16:15 2016
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_VentilationWidget(object):
def setupUi(self, VentilationWidget):
VentilationWidget.setObjectName("VentilationWidget")
VentilationWidget.resize(720, 976)
self.verticalLayout = QtGui.QVBoxLayout(VentilationWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtGui.QTabWidget(VentilationWidget)
self.tabWidget.setObjectName("tabWidget")
self.tabExecutable = QtGui.QWidget()
self.tabExecutable.setObjectName("tabExecutable")
self.verticalLayout_3 = QtGui.QVBoxLayout(self.tabExecutable)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.groupBox = QtGui.QGroupBox(self.tabExecutable)
self.groupBox.setObjectName("groupBox")
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setObjectName("gridLayout")
self.checkBoxInBuiltExecutable = QtGui.QCheckBox(self.groupBox)
self.checkBoxInBuiltExecutable.setObjectName("checkBoxInBuiltExecutable")
self.gridLayout.addWidget(self.checkBoxInBuiltExecutable, 0, 0, 1, 1)
self.label_19 = QtGui.QLabel(self.groupBox)
self.label_19.setObjectName("label_19")
self.gridLayout.addWidget(self.label_19, 1, 0, 1, 1)
self.lineEditExecutable = QtGui.QLineEdit(self.groupBox)
self.lineEditExecutable.setObjectName("lineEditExecutable")
self.gridLayout.addWidget(self.lineEditExecutable, 1, 1, 1, 1)
self.pushButtonChooseExecutable = QtGui.QPushButton(self.groupBox)
self.pushButtonChooseExecutable.setObjectName("pushButtonChooseExecutable")
self.gridLayout.addWidget(self.pushButtonChooseExecutable, 1, 2, 1, 1)
self.verticalLayout_3.addWidget(self.groupBox)
spacerItem = QtGui.QSpacerItem(20, 766, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem)
self.tabWidget.addTab(self.tabExecutable, "")
self.tabFileInputs = QtGui.QWidget()
self.tabFileInputs.setObjectName("tabFileInputs")
self.verticalLayout_6 = QtGui.QVBoxLayout(self.tabFileInputs)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.groupBox_2 = QtGui.QGroupBox(self.tabFileInputs)
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_2.setObjectName("gridLayout_2")
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_22 = QtGui.QLabel(self.groupBox_2)
self.label_22.setObjectName("label_22")
self.horizontalLayout_3.addWidget(self.label_22)
self.lineEditIpNode = QtGui.QLineEdit(self.groupBox_2)
self.lineEditIpNode.setObjectName("lineEditIpNode")
self.horizontalLayout_3.addWidget(self.lineEditIpNode)
self.pushButtonChooseIpNode = QtGui.QPushButton(self.groupBox_2)
self.pushButtonChooseIpNode.setObjectName("pushButtonChooseIpNode")
self.horizontalLayout_3.addWidget(self.pushButtonChooseIpNode)
self.gridLayout_2.addLayout(self.horizontalLayout_3, 1, 0, 1, 1)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_21 = QtGui.QLabel(self.groupBox_2)
self.label_21.setObjectName("label_21")
self.horizontalLayout_5.addWidget(self.label_21)
self.lineEditIpField = QtGui.QLineEdit(self.groupBox_2)
self.lineEditIpField.setObjectName("lineEditIpField")
self.horizontalLayout_5.addWidget(self.lineEditIpField)
self.pushButtonChooseIpField = QtGui.QPushButton(self.groupBox_2)
self.pushButtonChooseIpField.setObjectName("pushButtonChooseIpField")
self.horizontalLayout_5.addWidget(self.pushButtonChooseIpField)
self.gridLayout_2.addLayout(self.horizontalLayout_5, 3, 0, 1, 1)
self.checkBoxInBuiltTree = QtGui.QCheckBox(self.groupBox_2)
self.checkBoxInBuiltTree.setObjectName("checkBoxInBuiltTree")
self.gridLayout_2.addWidget(self.checkBoxInBuiltTree, 0, 0, 1, 1)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_23 = QtGui.QLabel(self.groupBox_2)
self.label_23.setObjectName("label_23")
self.horizontalLayout_4.addWidget(self.label_23)
self.lineEditIpElem = QtGui.QLineEdit(self.groupBox_2)
self.lineEditIpElem.setObjectName("lineEditIpElem")
self.horizontalLayout_4.addWidget(self.lineEditIpElem)
self.pushButtonChooseIpElem = QtGui.QPushButton(self.groupBox_2)
self.pushButtonChooseIpElem.setObjectName("pushButtonChooseIpElem")
self.horizontalLayout_4.addWidget(self.pushButtonChooseIpElem)
self.gridLayout_2.addLayout(self.horizontalLayout_4, 2, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem1, 6, 0, 1, 1)
self.horizontalLayout_14 = QtGui.QHBoxLayout()
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.label_33 = QtGui.QLabel(self.groupBox_2)
self.label_33.setObjectName("label_33")
self.horizontalLayout_14.addWidget(self.label_33)
self.lineEditIpMesh = QtGui.QLineEdit(self.groupBox_2)
self.lineEditIpMesh.setObjectName("lineEditIpMesh")
self.horizontalLayout_14.addWidget(self.lineEditIpMesh)
self.pushButtonChooseIpMesh = QtGui.QPushButton(self.groupBox_2)
self.pushButtonChooseIpMesh.setObjectName("pushButtonChooseIpMesh")
self.horizontalLayout_14.addWidget(self.pushButtonChooseIpMesh)
self.gridLayout_2.addLayout(self.horizontalLayout_14, 4, 0, 1, 1)
self.verticalLayout_6.addWidget(self.groupBox_2)
self.groupBox_3 = QtGui.QGroupBox(self.tabFileInputs)
self.groupBox_3.setObjectName("groupBox_3")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.checkBoxInBuiltFlow = QtGui.QCheckBox(self.groupBox_3)
self.checkBoxInBuiltFlow.setObjectName("checkBoxInBuiltFlow")
self.verticalLayout_4.addWidget(self.checkBoxInBuiltFlow)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_25 = QtGui.QLabel(self.groupBox_3)
self.label_25.setObjectName("label_25")
self.horizontalLayout_2.addWidget(self.label_25)
self.lineEditFlow = QtGui.QLineEdit(self.groupBox_3)
self.lineEditFlow.setObjectName("lineEditFlow")
self.horizontalLayout_2.addWidget(self.lineEditFlow)
self.pushButtonChooseFlow = QtGui.QPushButton(self.groupBox_3)
self.pushButtonChooseFlow.setObjectName("pushButtonChooseFlow")
self.horizontalLayout_2.addWidget(self.pushButtonChooseFlow)
self.verticalLayout_4.addLayout(self.horizontalLayout_2)
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem2)
self.verticalLayout_6.addWidget(self.groupBox_3)
self.groupBox_4 = QtGui.QGroupBox(self.tabFileInputs)
self.groupBox_4.setObjectName("groupBox_4")
self.verticalLayout_5 = QtGui.QVBoxLayout(self.groupBox_4)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_26 = QtGui.QLabel(self.groupBox_4)
self.label_26.setObjectName("label_26")
self.horizontalLayout.addWidget(self.label_26)
self.lineEditTerminalExNode = QtGui.QLineEdit(self.groupBox_4)
self.lineEditTerminalExNode.setObjectName("lineEditTerminalExNode")
self.horizontalLayout.addWidget(self.lineEditTerminalExNode)
self.pushButtonChooseTerminalExNode = QtGui.QPushButton(self.groupBox_4)
self.pushButtonChooseTerminalExNode.setObjectName("pushButtonChooseTerminalExNode")
self.horizontalLayout.addWidget(self.pushButtonChooseTerminalExNode)
self.verticalLayout_5.addLayout(self.horizontalLayout)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_24 = QtGui.QLabel(self.groupBox_4)
self.label_24.setObjectName("label_24")
self.horizontalLayout_7.addWidget(self.label_24)
self.lineEditTreeExElem = QtGui.QLineEdit(self.groupBox_4)
self.lineEditTreeExElem.setObjectName("lineEditTreeExElem")
self.horizontalLayout_7.addWidget(self.lineEditTreeExElem)
self.pushButtonChooseTreeExElem = QtGui.QPushButton(self.groupBox_4)
self.pushButtonChooseTreeExElem.setObjectName("pushButtonChooseTreeExElem")
self.horizontalLayout_7.addWidget(self.pushButtonChooseTreeExElem)
self.verticalLayout_5.addLayout(self.horizontalLayout_7)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_20 = QtGui.QLabel(self.groupBox_4)
self.label_20.setObjectName("label_20")
self.horizontalLayout_6.addWidget(self.label_20)
self.lineEditTreeExNode = QtGui.QLineEdit(self.groupBox_4)
self.lineEditTreeExNode.setObjectName("lineEditTreeExNode")
self.horizontalLayout_6.addWidget(self.lineEditTreeExNode)
self.pushButtonChooseTreeExNode = QtGui.QPushButton(self.groupBox_4)
self.pushButtonChooseTreeExNode.setObjectName("pushButtonChooseTreeExNode")
self.horizontalLayout_6.addWidget(self.pushButtonChooseTreeExNode)
self.verticalLayout_5.addLayout(self.horizontalLayout_6)
self.horizontalLayout_8 = QtGui.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.label_27 = QtGui.QLabel(self.groupBox_4)
self.label_27.setObjectName("label_27")
self.horizontalLayout_8.addWidget(self.label_27)
self.lineEditVentilationExElem = QtGui.QLineEdit(self.groupBox_4)
self.lineEditVentilationExElem.setObjectName("lineEditVentilationExElem")
self.horizontalLayout_8.addWidget(self.lineEditVentilationExElem)
self.pushButtonChooseVentilationExElem = QtGui.QPushButton(self.groupBox_4)
self.pushButtonChooseVentilationExElem.setObjectName("pushButtonChooseVentilationExElem")
self.horizontalLayout_8.addWidget(self.pushButtonChooseVentilationExElem)
self.verticalLayout_5.addLayout(self.horizontalLayout_8)
self.horizontalLayout_13 = QtGui.QHBoxLayout()
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.label_32 = QtGui.QLabel(self.groupBox_4)
self.label_32.setObjectName("label_32")
self.horizontalLayout_13.addWidget(self.label_32)
self.lineEditRadiusExElem = QtGui.QLineEdit(self.groupBox_4)
self.lineEditRadiusExElem.setObjectName("lineEditRadiusExElem")
self.horizontalLayout_13.addWidget(self.lineEditRadiusExElem)
self.pushButtonChooseRadiusExElem = QtGui.QPushButton(self.groupBox_4)
self.pushButtonChooseRadiusExElem.setObjectName("pushButtonChooseRadiusExElem")
self.horizontalLayout_13.addWidget(self.pushButtonChooseRadiusExElem)
self.verticalLayout_5.addLayout(self.horizontalLayout_13)
spacerItem3 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_5.addItem(spacerItem3)
self.verticalLayout_6.addWidget(self.groupBox_4)
spacerItem4 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem4)
self.tabWidget.addTab(self.tabFileInputs, "")
self.tabParameters = QtGui.QWidget()
self.tabParameters.setObjectName("tabParameters")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.tabParameters)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.groupBoxMainParameters = QtGui.QGroupBox(self.tabParameters)
self.groupBoxMainParameters.setObjectName("groupBoxMainParameters")
self.formLayout = QtGui.QFormLayout(self.groupBoxMainParameters)
self.formLayout.setObjectName("formLayout")
self.label = QtGui.QLabel(self.groupBoxMainParameters)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label)
self.spinBoxNumberOfBreaths = QtGui.QSpinBox(self.groupBoxMainParameters)
self.spinBoxNumberOfBreaths.setMinimum(1)
self.spinBoxNumberOfBreaths.setProperty("value", 5)
self.spinBoxNumberOfBreaths.setObjectName("spinBoxNumberOfBreaths")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.spinBoxNumberOfBreaths)
self.label_2 = QtGui.QLabel(self.groupBoxMainParameters)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.spinBoxNumberOfIterations = QtGui.QSpinBox(self.groupBoxMainParameters)
self.spinBoxNumberOfIterations.setMinimum(1)
self.spinBoxNumberOfIterations.setMaximum(999999999)
self.spinBoxNumberOfIterations.setSingleStep(100)
self.spinBoxNumberOfIterations.setProperty("value", 200)
self.spinBoxNumberOfIterations.setObjectName("spinBoxNumberOfIterations")
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.spinBoxNumberOfIterations)
self.label_3 = QtGui.QLabel(self.groupBoxMainParameters)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_3)
self.doubleSpinBoxTimeStep = ScientificDoubleSpinBox(self.groupBoxMainParameters)
self.doubleSpinBoxTimeStep.setObjectName("doubleSpinBoxTimeStep")
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxTimeStep)
self.label_4 = QtGui.QLabel(self.groupBoxMainParameters)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_4)
self.doubleSpinBoxErrorTolerance = ScientificDoubleSpinBox(self.groupBoxMainParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxErrorTolerance.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxErrorTolerance.setSizePolicy(sizePolicy)
self.doubleSpinBoxErrorTolerance.setObjectName("doubleSpinBoxErrorTolerance")
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxErrorTolerance)
self.verticalLayout_2.addWidget(self.groupBoxMainParameters)
self.groupBoxFlowParameters = QtGui.QGroupBox(self.tabParameters)
self.groupBoxFlowParameters.setObjectName("groupBoxFlowParameters")
self.formLayout_2 = QtGui.QFormLayout(self.groupBoxFlowParameters)
self.formLayout_2.setObjectName("formLayout_2")
self.label_5 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_5.setObjectName("label_5")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_5)
self.doubleSpinBoxFRC = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxFRC.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxFRC.setSizePolicy(sizePolicy)
self.doubleSpinBoxFRC.setObjectName("doubleSpinBoxFRC")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxFRC)
self.label_6 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_6.setObjectName("label_6")
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_6)
self.doubleSpinBoxConstrict = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxConstrict.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxConstrict.setSizePolicy(sizePolicy)
self.doubleSpinBoxConstrict.setObjectName("doubleSpinBoxConstrict")
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxConstrict)
self.label_7 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_7.setObjectName("label_7")
self.formLayout_2.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_7)
self.doubleSpinBoxTInterval = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxTInterval.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxTInterval.setSizePolicy(sizePolicy)
self.doubleSpinBoxTInterval.setObjectName("doubleSpinBoxTInterval")
self.formLayout_2.setWidget(4, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxTInterval)
self.label_8 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_8.setObjectName("label_8")
self.formLayout_2.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_8)
self.spinBoxGdirn = QtGui.QSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinBoxGdirn.sizePolicy().hasHeightForWidth())
self.spinBoxGdirn.setSizePolicy(sizePolicy)
self.spinBoxGdirn.setMinimum(1)
self.spinBoxGdirn.setObjectName("spinBoxGdirn")
self.formLayout_2.setWidget(6, QtGui.QFormLayout.FieldRole, self.spinBoxGdirn)
self.label_9 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_9.setObjectName("label_9")
self.formLayout_2.setWidget(8, QtGui.QFormLayout.LabelRole, self.label_9)
self.doubleSpinBoxPressIn = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxPressIn.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxPressIn.setSizePolicy(sizePolicy)
self.doubleSpinBoxPressIn.setObjectName("doubleSpinBoxPressIn")
self.formLayout_2.setWidget(8, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxPressIn)
self.label_10 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_10.setObjectName("label_10")
self.formLayout_2.setWidget(10, QtGui.QFormLayout.LabelRole, self.label_10)
self.doubleSpinBoxCOV = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxCOV.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxCOV.setSizePolicy(sizePolicy)
self.doubleSpinBoxCOV.setObjectName("doubleSpinBoxCOV")
self.formLayout_2.setWidget(10, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxCOV)
self.label_11 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_11.setObjectName("label_11")
self.formLayout_2.setWidget(12, QtGui.QFormLayout.LabelRole, self.label_11)
self.doubleSpinBoxRMaxMean = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxRMaxMean.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxRMaxMean.setSizePolicy(sizePolicy)
self.doubleSpinBoxRMaxMean.setObjectName("doubleSpinBoxRMaxMean")
self.formLayout_2.setWidget(12, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxRMaxMean)
self.label_12 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_12.setObjectName("label_12")
self.formLayout_2.setWidget(14, QtGui.QFormLayout.LabelRole, self.label_12)
self.doubleSpinBoxRMinMean = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxRMinMean.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxRMinMean.setSizePolicy(sizePolicy)
self.doubleSpinBoxRMinMean.setObjectName("doubleSpinBoxRMinMean")
self.formLayout_2.setWidget(14, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxRMinMean)
self.label_13 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_13.setObjectName("label_13")
self.formLayout_2.setWidget(16, QtGui.QFormLayout.LabelRole, self.label_13)
self.doubleSpinBoxIERatio = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxIERatio.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxIERatio.setSizePolicy(sizePolicy)
self.doubleSpinBoxIERatio.setObjectName("doubleSpinBoxIERatio")
self.formLayout_2.setWidget(16, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxIERatio)
self.label_18 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_18.setObjectName("label_18")
self.formLayout_2.setWidget(18, QtGui.QFormLayout.LabelRole, self.label_18)
self.doubleSpinBoxRefVolume = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxRefVolume.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxRefVolume.setSizePolicy(sizePolicy)
self.doubleSpinBoxRefVolume.setObjectName("doubleSpinBoxRefVolume")
self.formLayout_2.setWidget(18, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxRefVolume)
self.label_14 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_14.setObjectName("label_14")
self.formLayout_2.setWidget(20, QtGui.QFormLayout.LabelRole, self.label_14)
self.doubleSpinBoxVolumeTarget = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxVolumeTarget.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxVolumeTarget.setSizePolicy(sizePolicy)
self.doubleSpinBoxVolumeTarget.setObjectName("doubleSpinBoxVolumeTarget")
self.formLayout_2.setWidget(20, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxVolumeTarget)
self.label_15 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_15.setObjectName("label_15")
self.formLayout_2.setWidget(22, QtGui.QFormLayout.LabelRole, self.label_15)
self.doubleSpinBoxPMusStep = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxPMusStep.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxPMusStep.setSizePolicy(sizePolicy)
self.doubleSpinBoxPMusStep.setMinimum(-9999999.99)
self.doubleSpinBoxPMusStep.setObjectName("doubleSpinBoxPMusStep")
self.formLayout_2.setWidget(22, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxPMusStep)
self.label_17 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_17.setObjectName("label_17")
self.formLayout_2.setWidget(24, QtGui.QFormLayout.LabelRole, self.label_17)
self.label_16 = QtGui.QLabel(self.groupBoxFlowParameters)
self.label_16.setObjectName("label_16")
self.formLayout_2.setWidget(26, QtGui.QFormLayout.LabelRole, self.label_16)
self.doubleSpinBoxChestWallCompliance = ScientificDoubleSpinBox(self.groupBoxFlowParameters)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.doubleSpinBoxChestWallCompliance.sizePolicy().hasHeightForWidth())
self.doubleSpinBoxChestWallCompliance.setSizePolicy(sizePolicy)
self.doubleSpinBoxChestWallCompliance.setObjectName("doubleSpinBoxChestWallCompliance")
self.formLayout_2.setWidget(26, QtGui.QFormLayout.FieldRole, self.doubleSpinBoxChestWallCompliance)
self.comboBoxExpirationType = QtGui.QComboBox(self.groupBoxFlowParameters)
self.comboBoxExpirationType.setObjectName("comboBoxExpirationType")
self.comboBoxExpirationType.addItem("")
self.formLayout_2.setWidget(24, QtGui.QFormLayout.FieldRole, self.comboBoxExpirationType)
self.verticalLayout_2.addWidget(self.groupBoxFlowParameters)
spacerItem5 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem5)
self.tabWidget.addTab(self.tabParameters, "")
self.verticalLayout.addWidget(self.tabWidget)
self.retranslateUi(VentilationWidget)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(VentilationWidget)
VentilationWidget.setTabOrder(self.spinBoxNumberOfBreaths, self.spinBoxNumberOfIterations)
VentilationWidget.setTabOrder(self.spinBoxNumberOfIterations, self.doubleSpinBoxTimeStep)
VentilationWidget.setTabOrder(self.doubleSpinBoxTimeStep, self.doubleSpinBoxErrorTolerance)
VentilationWidget.setTabOrder(self.doubleSpinBoxErrorTolerance, self.doubleSpinBoxFRC)
VentilationWidget.setTabOrder(self.doubleSpinBoxFRC, self.doubleSpinBoxConstrict)
VentilationWidget.setTabOrder(self.doubleSpinBoxConstrict, self.doubleSpinBoxTInterval)
VentilationWidget.setTabOrder(self.doubleSpinBoxTInterval, self.spinBoxGdirn)
VentilationWidget.setTabOrder(self.spinBoxGdirn, self.doubleSpinBoxPressIn)
VentilationWidget.setTabOrder(self.doubleSpinBoxPressIn, self.doubleSpinBoxCOV)
VentilationWidget.setTabOrder(self.doubleSpinBoxCOV, self.doubleSpinBoxRMaxMean)
VentilationWidget.setTabOrder(self.doubleSpinBoxRMaxMean, self.doubleSpinBoxRMinMean)
VentilationWidget.setTabOrder(self.doubleSpinBoxRMinMean, self.doubleSpinBoxIERatio)
VentilationWidget.setTabOrder(self.doubleSpinBoxIERatio, self.doubleSpinBoxRefVolume)
VentilationWidget.setTabOrder(self.doubleSpinBoxRefVolume, self.doubleSpinBoxVolumeTarget)
VentilationWidget.setTabOrder(self.doubleSpinBoxVolumeTarget, self.doubleSpinBoxPMusStep)
VentilationWidget.setTabOrder(self.doubleSpinBoxPMusStep, self.comboBoxExpirationType)
VentilationWidget.setTabOrder(self.comboBoxExpirationType, self.doubleSpinBoxChestWallCompliance)
def retranslateUi(self, VentilationWidget):
VentilationWidget.setWindowTitle(QtGui.QApplication.translate("VentilationWidget", "Ventilation", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("VentilationWidget", "Executable", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxInBuiltExecutable.setText(QtGui.QApplication.translate("VentilationWidget", "Use in-built executable", None, QtGui.QApplication.UnicodeUTF8))
self.label_19.setText(QtGui.QApplication.translate("VentilationWidget", "Executable:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonChooseExecutable.setText(QtGui.QApplication.translate("VentilationWidget", "Choose", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabExecutable), QtGui.QApplication.translate("VentilationWidget", "Executable", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("VentilationWidget", "Tree", None, QtGui.QApplication.UnicodeUTF8))
self.label_22.setText(QtGui.QApplication.translate("VentilationWidget", "ipnode:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonChooseIpNode.setText(QtGui.QApplication.translate("VentilationWidget", "Choose", None, QtGui.QApplication.UnicodeUTF8))
self.label_21.setText(QtGui.QApplication.translate("VentilationWidget", "ipfiel:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonChooseIpField.setText(QtGui.QApplication.translate("VentilationWidget", "Choose", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxInBuiltTree.setText(QtGui.QApplication.translate("VentilationWidget", "Use in-built tree files", None, QtGui.QApplication.UnicodeUTF8))
self.label_23.setText(QtGui.QApplication.translate("VentilationWidget", "ipelem:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonChooseIpElem.setText(QtGui.QApplication.translate("VentilationWidget", "Choose", None, QtGui.QApplication.UnicodeUTF8))
self.label_33.setText(QtGui.QApplication.translate("VentilationWidget", "ipmesh:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonChooseIpMesh.setText(QtGui.QApplication.translate("VentilationWidget", "Choose", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("VentilationWidget", "Flow", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxInBuiltFlow.setText(QtGui.QApplication.translate("VentilationWidget", "Use in-built flow file", None, QtGui.QApplication.UnicodeUTF8))
self.label_25.setText(QtGui.QApplication.translate("VentilationWidget", "Flow file::", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonChooseFlow.setText(QtGui.QApplication.translate("VentilationWidget", "Choose", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_4.setTitle(QtGui.QApplication.translate("VentilationWidget", "Output", None, QtGui.QApplication.UnicodeUTF8))
self.label_26.setText(QtGui.QApplication.translate("VentilationWidget", "Terminal exnode:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonChooseTerminalExNode.setText(QtGui.QApplication.translate("VentilationWidget", "Choose", None, QtGui.QApplication.UnicodeUTF8))
self.label_24.setText(QtGui.QApplication.translate("VentilationWidget", "Tree exelem:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonChooseTreeExElem.setText(QtGui.QApplication.translate("VentilationWidget", "Choose", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setText(QtGui.QApplication.translate("VentilationWidget", "Tree exnode:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonChooseTreeExNode.setText(QtGui.QApplication.translate("VentilationWidget", "Choose", None, QtGui.QApplication.UnicodeUTF8))
self.label_27.setText(QtGui.QApplication.translate("VentilationWidget", "Ventilation exelem:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonChooseVentilationExElem.setText(QtGui.QApplication.translate("VentilationWidget", "Choose", None, QtGui.QApplication.UnicodeUTF8))
self.label_32.setText(QtGui.QApplication.translate("VentilationWidget", "Radius exelem:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonChooseRadiusExElem.setText(QtGui.QApplication.translate("VentilationWidget", "Choose", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabFileInputs), QtGui.QApplication.translate("VentilationWidget", "File Input/Output(s)", None, QtGui.QApplication.UnicodeUTF8))
self.groupBoxMainParameters.setTitle(QtGui.QApplication.translate("VentilationWidget", "Main Parameters", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("VentilationWidget", "Number of breaths:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("VentilationWidget", "Number of iterations:", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("VentilationWidget", "dt:", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("VentilationWidget", "Error tolerance:", None, QtGui.QApplication.UnicodeUTF8))
self.groupBoxFlowParameters.setTitle(QtGui.QApplication.translate("VentilationWidget", "Flow Parameters", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("VentilationWidget", "FRC:", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("VentilationWidget", "Constrict:", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("VentilationWidget", "T interval:", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("VentilationWidget", "G dirn:", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setText(QtGui.QApplication.translate("VentilationWidget", "Press in", None, QtGui.QApplication.UnicodeUTF8))
self.label_10.setText(QtGui.QApplication.translate("VentilationWidget", "COV:", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("VentilationWidget", "R max. mean:", None, QtGui.QApplication.UnicodeUTF8))
self.label_12.setText(QtGui.QApplication.translate("VentilationWidget", "R min. mean:", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setText(QtGui.QApplication.translate("VentilationWidget", "i-e ratio:", None, QtGui.QApplication.UnicodeUTF8))
self.label_18.setText(QtGui.QApplication.translate("VentilationWidget", "Ref. volume:", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setText(QtGui.QApplication.translate("VentilationWidget", "Volume target:", None, QtGui.QApplication.UnicodeUTF8))
self.label_15.setText(QtGui.QApplication.translate("VentilationWidget", "Pmus step:", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setText(QtGui.QApplication.translate("VentilationWidget", "Expiration type:", None, QtGui.QApplication.UnicodeUTF8))
self.label_16.setText(QtGui.QApplication.translate("VentilationWidget", "Chest wall compliance:", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxExpirationType.setItemText(0, QtGui.QApplication.translate("VentilationWidget", "Passive", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabParameters), QtGui.QApplication.translate("VentilationWidget", "Parameters", None, QtGui.QApplication.UnicodeUTF8))
from opencmiss.neon.ui.misc.scientificdoublespinbox import ScientificDoubleSpinBox
|
|
# -*- coding: utf-8 -*-
'''
canteen datastructures
~~~~~~~~~~~~~~~~~~~~~~
lightweight datastructures for use inside and outside
:py:class:`canteen`.
:author: Sam Gammon <sam@keen.io>
:copyright: (c) Keen IO, 2013
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
'''
# stdlib
import abc, logging
# canteen util
from . import decorators
class Sentinel(object):
''' Create a named sentinel object. '''
name = None
hash = None
_falsy = False
def __init__(self, name, falsy=False):
''' Construct a new sentinel.
:param name:
:param falsy:
:returns: '''
self.name, self.hash, self._falsy = name, int((''.join(str(ord(c)) for c in name))), falsy
def __hash__(self):
''' Hash value for this sentinel.
:returns: '''
return self.hash
def __eq__(self, other):
''' Equality comparator for this sentinel.
:returns: '''
if isinstance(other, self.__class__):
return other.hash == self.hash
return False
def __repr__(self):
''' Represent this sentinel as a string.
:returns: '''
return '<Sentinel "%s">' % self.name
def __nonzero__(self):
''' Test whether this sentinel is falsy.
:returns: '''
return (not self._falsy)
# Sentinels
_EMPTY, _TOMBSTONE = Sentinel("EMPTY", True), Sentinel("TOMBSTONE", True)
class UtilStruct(object):
''' Abstract class for a utility object. '''
__metaclass__ = abc.ABCMeta
## Init -- Accept structure fill
def __new__(cls, *args, **kwargs):
''' Class constructor that enforces abstractness
at the root of the class tree.
Raises :py:exc:`NotImplementedError` if the
root class :py:class:`UtilStruct` is constructed
directly. Otherwise, returns a new instance
of the requested class. '''
if cls.__name__ is 'UtilStruct':
raise NotImplementedError('Cannot construct `UtilStruct` directly as'
' it is an abstract class.')
return object.__new__(cls, *args, **kwargs)
@abc.abstractmethod
def fillStructure(self, struct, case_sensitive=False, **kwargs):
''' Abstract method that fills a local object with data, usually
from initialization.
:param struct:
:param case_sensitive:
:param kwargs:
:returns: '''
raise NotImplementedError('`UtilStruct.fillStructure` is abstract and must'
' be implemented by a subclass.')
class ObjectProxy(UtilStruct):
''' Same handy object as above, but stores the entries in an
_entries attribute rather than the class dict. '''
_entries = None
_case_sensitive = None
def __init__(self, struct=None, case_sensitive=False, **kwargs):
''' If handed a dictionary (or something) in init, send it to
fillStructure (and do the same for kwargs).
:param struct:
:param case_sensitive:
:param kwargs:
:raises TypeError:
:returns: '''
self._entries, self._case_sensitive = {}, case_sensitive
if struct:
if kwargs: struct.update(kwargs)
self.fillStructure(struct, case_sensitive=case_sensitive)
def i_filter(self, target):
''' Account for case sensitivity.
:param target: String parameter name
to filter.
:returns: Case insensitive version
of ``target`` if case sensitivity
is deactivated. '''
if self._case_sensitive:
return target
return str(target).lower()
def fillStructure(self, fill, case_sensitive=False, **kwargs):
''' If handed a dictionary, will fill self with
those entries. Usually called from ``__init__``.
:param fill: Structure to fill self with.
:param case_sensitive: Whether we should
initialize while ignoring case.
:param kwargs: Keyword arguments to be applied
to ``struct`` as override.
:returns: ``self``. '''
self.case_sensitive = case_sensitive
if fill:
if kwargs: fill.update(kwargs)
for k, v in (fill.iteritems() if isinstance(fill, dict) else iter(fill)):
self._entries[self.i_filter(k)] = v
return self
def __getitem__(self, name):
''' 'x = struct[name]' override.
:param name:
:raises KeyError:
:returns: '''
filtered = self.i_filter(name)
if filtered not in self._entries:
raise KeyError("Cannot locate name '%s' in ObjectProxy '%s'." % (name, self))
return self._entries[filtered]
def __getattr__(self, name):
''' 'x = struct.name' override.
:param name:
:raises AttributeError
:returns: '''
filtered = self.i_filter(name)
if filtered not in self._entries:
raise AttributeError("Could not find the attribute '%s' on the specified ObjectProxy." % name)
return self._entries[filtered]
def __contains__(self, name):
''' 'x in struct' override.
:param name:
:returns: '''
return self.i_filter(name) in self._entries
def keys(self):
''' return all keys in this struct.
:returns: '''
return self._entries.keys()
def values(self):
''' return all values in this struct.
:returns: '''
return self._entries.values()
def items(self):
''' return all (k, v) pairs in this struct.
:returns: '''
return self._entries.items()
def iterkeys(self):
''' return each key in this struct,
one at a time, generator-style.
:yields: '''
return self._entries.iterkeys()
def itervalues(self):
''' return each value in this struct,
one at a time, generator-style.
:yields: '''
return self._entries.itervalues()
def iteritems(self):
''' return all (k, v) pairs in this struct,
one at a time, generator-style.
:yields: '''
return self._entries.iteritems()
class WritableObjectProxy(ObjectProxy):
''' Same handy object as `ObjectProxy`, but allows appending things at runtime. '''
def __setitem__(self, name, value):
''' 'struct[name] = x' override.
:param name:
:param value:
:returns: '''
self._entries[name] = value
def __setattr__(self, name, value):
''' 'struct.name = x' override.
:param name:
:param value:
:returns: '''
if name in ('_entries', '_case_sensitive', '__slots__'):
return object.__setattr__(self, name, value)
self._entries[name] = value
def __delattr__(self, name):
''' 'del struct.name' override.
:param name:
:raises AttributeError:
:returns: '''
if self.i_filter(name) not in self._entries:
raise AttributeError("Could not find the entry '%s' on the specified ObjectProxy." % name)
del self._entries[self.i_filter(name)]
def __delitem__(self, name):
''' 'del struct[name]' override.
:param name:
:raises KeyError:
:returns: '''
if self.i_filter(name) not in self._entries:
raise KeyError("Could not find the entry '%s' on the specified ObjectProxy." % name)
del self._entries[self.i_filter(name)]
class CallbackProxy(ObjectProxy):
''' Handy little object that takes a dict and makes
it accessible via var[item], but returns the
result of an invoked ``callback(item)``. '''
_entries = None # cached entries
callback = None # callback func
def __init__(self, callback, struct={}, **kwargs):
''' Map the callback and fillStructure if we
get one via `struct`.
:param callback:
:param struct:
:param kwargs:
:returns: '''
self.callback = callback
self._entries = struct
if kwargs: self._entries.update(kwargs)
def __getitem__(self, name):
''' 'x = struct[name]' override.
:param name:
:raises KeyError:
:returns: '''
if self._entries:
if name not in self._entries:
raise KeyError("Could not retrieve item '%s' from CallbackProxy '%s'." % (name, self))
return self.callback(self._entries.get(name))
return self.callback(name)
def __getattr__(self, name):
''' 'x = struct.name' override.
:param name:
:raises AttributeError:
:returns: '''
if self._entries:
if not name or (name not in self._entries):
raise AttributeError("CallbackProxy could not resolve entry '%s'." % name)
return self.callback(self._entries.get(name))
return self.callback(name)
def __call__(self, *args, **kwargs):
''' 'struct()' override.
:returns: '''
return self.callback(*args, **kwargs)
class ObjectDictBridge(UtilStruct):
''' Treat an object like a dict, or an object! Assign an object
with `ObjectDictBridge(<object>)`. Then access properties
with `bridge[item]` or `bridge.item`. '''
target = None # target object
def __init__(self, target_object=None):
''' constructor.
:param target_object:
:returns: '''
super(ObjectDictBridge, self).__setattr__('target', target_object)
def __getitem__(self, name):
''' 'x = struct[name]' override.
:param name:
:raise KeyError:
:returns: '''
if self.target is not None:
try:
return getattr(self.target, name)
except AttributeError, e:
raise KeyError(str(e))
else:
raise KeyError('No object target set for ObjectDictBridge.')
def __setitem__(self, name):
''' 'struct[name] = x' override.
:param name:
:raises KeyError:
:returns: '''
if self.target is not None:
try:
return setattr(self.target, name)
except Exception, e:
raise e
else:
raise KeyError('No object target set for ObjectDictBridge.')
def __delitem__(self, name):
''' 'del struct[name]' override.
:param name:
:raises KeyError:
:raises AttributeError:
:returns: '''
if self.target is not None:
try:
return delattr(self.target, name)
except Exception, e:
raise e
else:
raise KeyError('No object target set for ObjectDictBridge.')
def __getattr__(self, name):
''' 'x = struct.name' override.
:param name:
:raises KeyError:
:raises AttributeError:
:returns: '''
if self.target is not None:
try:
return getattr(self.target, name)
except Exception, e:
raise e
else:
raise KeyError('No object target set for ObjectDictBridge.')
def __setattr__(self, name):
''' 'struct.name = x' override.
:param name:
:raises KeyError:
:raises AttributeError:
:returns: '''
if self.target is not None:
try:
return setattr(self.target, name)
except Exception, e:
raise e
else:
raise KeyError('No object target set for ObjectDictBridge.')
def __delattr__(self, name):
''' 'del struct.name' override.
:param name:
:raises KeyError:
:raises AttributeError:
:returns: '''
if self.target is not None:
try:
return delattr(self.target, name)
except Exception, e:
raise e
else:
raise KeyError('No object target set for ObjectDictBridge.')
def __contains__(self, name):
''' Indicates whether this ObjectDictBridge
contains the given key.
:param name:
:returns: '''
try:
getattr(self.target, name)
except AttributeError:
return False
return True
def get(self, name, default_value=None):
''' dict-like safe get (`obj.get(name, default)`).
:param name:
:param default_value:
:returns: '''
try:
return getattr(self.target, name)
except:
return default_value
return default_value
@decorators.singleton
class BidirectionalEnum(object):
''' Small and simple datastructure for mapping
static flags to smaller values. '''
class __metaclass__(abc.ABCMeta):
''' Metaclass for property-gather-enabled classes. '''
def __new__(cls, name, chain, mappings):
''' Read mapped properties, store on the
object, along with a reverse mapping.
:param name:
:param chain:
:param mappings:
:returns: '''
if name == 'ProxiedStructure':
return type(name, chain, mappings)
# Init calculated data attributes
mappings['_pmap'] = {}
mappings['_plookup'] = []
# Define __contains__ proxy
def _contains(proxied_o, flag_or_value):
''' Bidirectionally-compatible __contains__
replacement.
:param proxied_o:
:param flag_or_value:
:returns: '''
return flag_or_value in proxied_o._plookup
# Define __getitem__ proxy
def _getitem(proxied_o, fragment):
''' Attempt to resolve the fragment by a
forward, then reverse resolution chain.
:param proxied_o:
:param fragment:
:returns: '''
if proxied_o.__contains__(fragment):
return proxied_o._pmap.get(fragment)
# Define __setitem__ proxy
def _setitem(proxied_o, n, v):
''' Block setitem calls, because this is a
complicated object that is supposed
to be a modelling tool only.
:param proxied_o:
:param n:
:param v:
:raises NotImplementedError: '''
raise NotImplementedError('Not implemented')
# Map properties into data and lookup attributes
map(lambda x: [mappings['_pmap'].update(dict(x)), mappings['_plookup'].append([x[0][0], x[1][0]])],
(((attr, value), (value, attr)) for attr, value in mappings.items() if not attr.startswith('_')))
if '__getitem__' not in mappings:
mappings['__getitem__'] = _getitem
if '__setitem__' not in mappings:
mappings['__setitem__'] = _setitem
if '__contains__' not in mappings:
mappings['__contains__'] = _contains
return super(cls, cls).__new__(cls, name, chain, mappings)
@classmethod
def reverse_resolve(cls, code):
''' Resolve a mapping, by it's integer/string code.
:param code:
:returns: '''
if code in cls._pmap:
return cls._pmap[code]
return False
@classmethod
def forward_resolve(cls, flag):
''' Resolve a mapping, by it's string property name.
:param flag:
:returns: '''
if flag in cls._pmap:
return cls.__getattr__(flag)
return False
@classmethod
def resolve(cls, flag): return cls.forward_resolve(flag)
@classmethod
def __serialize__(cls):
''' Flatten down into a structure suitable for
storage/transport.
:returns: '''
return dict([(k, v) for k, v in dir(cls) if not k.startswith('_')])
@classmethod
def __json__(cls):
''' Flatten down and serialize into JSON.
:returns: '''
return cls.__serialize__()
@classmethod
def __repr__(cls):
''' Display a string representation of
a flattened self.
:returns: '''
return '::'.join([
"<%s" % cls.__name__,
','.join([
block for block in ('='.join([str(k), str(v)]) for k, v in cls.__serialize__().items())]),
"BiDirectional>"
])
__all__ = (
'Sentinel',
'_EMPTY',
'_TOMBSTONE',
'UtilStruct',
'ObjectProxy',
'WritableObjectProxy',
'CallbackProxy',
'ObjectDictBridge',
'BidirectionalEnum'
)
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014 Bartosz Janda
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import os
import logging
import logger
class_dumps_folder_name = "class_dumps"
module_map_file_name = "module_map.json"
class LazyClassDumpManager(object):
"""
Lazy loads class data into memory.
:param dict[str, Module] modules: Maps module name to module.
"""
def __init__(self):
super(LazyClassDumpManager, self).__init__()
log = logging.getLogger(__name__)
log.debug("LazyClassDumpManager: created.")
self.modules = dict()
def register_module(self, module_name, module_path):
"""
Adds module directory with `module_map.json` file.
:param str module_name: Module name.
:param str module_path: Path to module directory.
"""
log = logging.getLogger(__name__)
module_path = os.path.normpath(module_path)
# Check if directory exists.
if not os.path.exists(module_path):
log.error("LazyClassDumpManager: Cannot find module \"{}\" directory \"{}\".".format(module_name, module_path))
return
# Loads module.
module = Module(module_name, module_path)
self.modules[module_name] = module
def get_module(self, name):
"""
Returns Module object with given name.
:param str name: Module name.
:return: Module object with given name.
:rtype: Module | None
"""
if name in self.modules:
return self.modules[name]
return None
def find_module_for_class(self, architecture_name, class_name):
"""
Tries to find module name for given class.
:param str architecture_name: Architecture name.
:param str class_name: Class name.
:return: Module name.
:rtype: str | None
"""
# Tries to find class in all modules.
for module_name in self.modules:
module = self.modules[module_name]
c = module.get_class_or_load(architecture_name, class_name)
if c is not None:
return module_name
return None
def get_class(self, module_name, architecture_name, class_name):
"""
Returns Class object based on module name, architecture name and class name.
Supported architectures: armv7, armv7s, arm64, i386, x86_64.
:param str module_name: Module name.
:param str architecture_name: Architecture name.
:param str class_name: Class name.
:return: Class object based on module name, architecture name and class name.
:rtype: Class | None
"""
# Try to finds Module.
module = self.get_module(module_name)
if not module:
return None
# Get Class.
c = module.get_class_or_load(architecture_name, class_name)
return c
def get_ivar(self, module_name, architecture_name, class_name, ivar_name):
"""
Returns Ivar object based on module name, architecture name, class name and ivar name.
:param str module_name: Module name.
:param str architecture_name: Architecture name.
:param str class_name: Class name.
:param str ivar_name: Ivar name.
:return: Ivar object based on module name, architecture name, class name and ivar name.
:rtype: Ivar | None
"""
# Get Class.
c = self.get_class(module_name, architecture_name, class_name)
if not c:
return None
# Get Ivar.
i = c.get_ivar(ivar_name)
# Ivar not fount, but has superclass.
if i is None and c.super_class_name is not None:
i = self.get_ivar(module_name, architecture_name, c.super_class_name, ivar_name)
# Class not found in current module, try to load another module.
if i is None:
mm = self.find_module_for_class(architecture_name, c.super_class_name)
if mm is not None:
i = self.get_ivar(mm, architecture_name, c.super_class_name)
return i
class ClassDumpManager(object):
"""
Represent list of modules. It loads all data at once.
:param dict[str, Module] modules: Map of module name to Module object.
"""
def __init__(self):
super(ClassDumpManager, self).__init__()
self.modules = dict()
def get_module(self, name):
"""
Returns Module object with given name.
:param str name: Module name.
:return: Module object with given name.
:rtype: Module | None
"""
if name in self.modules:
return self.modules[name]
return None
def read_directory_path(self, dir_path):
"""
Reads all module directories from directory.
:param str dir_path: Path to directory.
"""
# Go through all files in input directory and read it.
for module_name in os.listdir(dir_path):
module_path = os.path.join(dir_path, module_name)
if os.path.isdir(module_path):
# Get Module.
module = self.get_module(module_name)
if not module:
module = Module(module_name)
self.modules[module_name] = module
# Read Module JSON files.
module.read_directory_path(module_path)
def save_to_folder(self, folder_path):
"""
Saves all classes from all modules as JSON files to given folder path.
:param str folder_path: Path to output folder.
"""
folder_path = os.path.normpath(folder_path)
# Create output directory if needed.
if len(self.modules) != 0:
if not os.path.exists(folder_path):
os.makedirs(folder_path)
# Save every Module.
for module_name, module in self.modules.iteritems():
module_path = os.path.join(folder_path, module.name, class_dumps_folder_name)
module.save_to_folder(module_path)
class Module(object):
"""
Represents one module. Contains list of architectures.
:param str name: Module name.
:param str dir_path: Path to module directory.
:param dict[str, str] module_file_map: Module map. Maps class name to class file path.
:param dict[str, Architecture] architectures: Maps architecture name to Architecture object.
"""
def __init__(self, name, dir_path=None):
"""
:param str name: Module name.
"""
super(Module, self).__init__()
self.name = name
self.dir_path = dir_path
self.module_file_map = None
self.architectures = dict()
if dir_path:
self._read_module_map()
def get_architecture(self, name):
"""
Finds Architecture object with given name.
:param str name: Architecture name.
:return: Architecture object with given name.
:rtype: Architecture | None
"""
if name in self.architectures:
return self.architectures[name]
return None
def all_class_names(self):
"""
Returns a list of all class names from all architectures.
:return: A list of all class names from all architectures.
:rtype: list[str]
"""
s = set()
for name, architecture in self.architectures.iteritems():
s = s.union(set(architecture.all_class_names()))
return list(s)
def read_directory_path(self, dir_path):
"""
Reads all files from directory.
:param str dir_path: Path to directory.
"""
# Go through all files in input directory and read it.
for root, dirs, files in os.walk(dir_path):
for file_name in files:
# Check if it is a JSON file.
if not file_name.endswith(".json"):
continue
# File path.
file_path = os.path.join(root, file_name)
self.read_file_path(file_path)
def read_file_path(self, file_path):
"""
Reads a file at given path.
:param str file_path: File path.
"""
with open(file_path, "r") as f:
self.read_file(f)
def read_file(self, f):
"""
Reads a file object.
:param f: File to read.
"""
json_data = json.load(f)
""":type: dict[str, dict]"""
self.read_json(json_data)
def read_json(self, json_data):
"""
Reads a JSON data.
:param dict[str, dict] json_data: Dictionary representation of JSON data of protocol.
"""
for architecture_name in json_data:
architecture = self.get_architecture(architecture_name)
# Create architecture.
if not architecture:
architecture = Architecture(architecture_name)
self.architectures[architecture_name] = architecture
architecture.read_json(json_data[architecture_name])
def save_to_folder(self, folder_path):
"""
Saves all classes from all architectures as JSON files to given folder path.
:param str folder_path: Path to output folder.
"""
# Create output directory if needed.
if len(self.all_class_names()) != 0:
if not os.path.exists(folder_path):
os.makedirs(folder_path)
# Module map.
module_map = dict()
# Get every class.
classes = self.all_class_names()
for class_name in classes:
class_data = dict()
class_file_name = None
# Get class data from all architectures.
for name, architecture in self.architectures.iteritems():
c = architecture.get_class(class_name)
class_data[architecture.name] = c.json_data()
class_file_name = c.get_file_name()
# Module map info.
module_map[class_name] = class_file_name
# Save class data to file.
class_file_path = os.path.join(folder_path, class_file_name)
with open(class_file_path, "w") as f:
json.dump(class_data, f, sort_keys=True, indent=2, separators=(",", ":"))
print("Saving {}.{}.".format(self.name, class_name))
# Save module map.
module_map_file_path = os.path.join(folder_path, module_map_file_name)
with open(module_map_file_path, "w") as f:
json.dump(module_map, f, sort_keys=True, indent=2, separators=(",", ":"))
def _read_module_map(self):
"""
Reads module map file.
"""
log = logging.getLogger(__name__)
# Check if module map exists.
module_map_file_path = os.path.join(self.dir_path, module_map_file_name)
if not os.path.exists(module_map_file_path):
log.error("Module: _read_module_map: Cannot find module map \"{}\" at \"{}\".".format(self.name, module_map_file_path))
raise StandardError()
# Reads module map into memory.
with open(module_map_file_path, "r") as f:
self.module_file_map = json.load(f)
def get_class_or_load(self, architecture_name, class_name):
"""
Get Class object for given architecture and class name. Loads data if needed.
:param str | unicode architecture_name: Architecture name.
:param str | unicode class_name: Class name.
:return: Class object for given architecture and class name.
:rtype: Class | None
"""
log = logging.getLogger(__name__)
# Load architecture.
a = self.get_architecture(architecture_name)
if not a:
a = Architecture(architecture_name)
self.architectures[architecture_name] = a
# Load class.
c = a.get_class(class_name)
if c:
return c
# Read class when not yet exists.
if class_name in self.module_file_map:
# Get path to class json.
class_path = self.module_file_map[class_name]
class_path = os.path.join(self.dir_path, class_path)
# File doesn't exists.
if not os.path.exists(class_path):
log.error("Module: get_class_or_load: Cannot find file: \"{}\".".format(class_path))
return None
# Open file.
with open(class_path, "r") as f:
json_data = json.load(f)
# File is empty.
if not json_data:
log.error("Module: get_class_or_load: Cannot open file \"{}\".".format(class_path))
return None
# File doesn't contains architecture information.
if architecture_name not in json_data:
log.error("Module: get_class_or_load: Cannot find architecture in \"{}\".".format(class_path))
return None
# Read JSON data.
class_data = json_data[architecture_name]
# Create class object.
c = a.read_json(class_data)
return c
def __str__(self):
return "<{}: {}>".format(self.__class__.__name__, self.name)
class Architecture(object):
"""
Represent one CPU architecture.
:param str name: Name of architecture.
:param dict[str, Class] classes: Maps class name to Class object.
"""
def __init__(self, name):
"""
:param str name: Name of architecture.
"""
super(Architecture, self).__init__()
self.name = name
self.classes = dict()
def all_class_names(self):
"""
Returns all class names.
:return: All class names.
:rtype: list[str]
"""
return self.classes.keys()
def get_class(self, name):
"""
Returns class with given name.
:param str name: Class name.
:return: Class with given name.
:rtype: Class | None
"""
if name in self.classes:
return self.classes[name]
return None
def read_json(self, json_data):
"""
Reads JSON content of class and adds it to the list of classes
:param dict[str, str | object] json_data: Dictionary representation of JSON data of class.
:return: Return parsed class.
:rtype: Class | None
"""
if json_data is None:
return None
if "type" not in json_data:
return None
t = json_data["type"]
if t == "class":
c = Class(json_data)
self.classes[c.class_name] = c
return c
return None
def __str__(self):
return "<{}: {}, classes:{}>".format(self.__class__.__name__, self.name, len(self.classes))
class Protocol(object):
"""
Represents protocol.
:param str protocol_name: Protocol name.
:param list[str] protocols: List of protocols names.
:param list properties: List of properties.
:param list class_methods: List of class methods.
:param list instance_methods: List of instance methods.
:param list optional_class_methods: List of optional class methods.
:param list optional_instance_methods: List of optional instance methods.
:param str type: Type of object (always "protocol").
"""
def __init__(self, json_data=None):
"""
:param dict[str, str | list[str] | object] json_data: Dictionary representation of JSON data of protocol.
"""
super(Protocol, self).__init__()
self.protocol_name = None
self.protocols = list()
self.properties = list()
self.class_methods = list()
self.instance_methods = list()
self.optional_class_methods = list()
self.optional_instance_methods = list()
self.type = "protocol"
self.read_json(json_data)
def get_file_name(self):
"""
Returns JSON file name for given protocol.
:return: JSON file name for given protocol.
:rtype: str
"""
return "{}-Protocol.json".format(self.protocol_name)
def read_json(self, json_data):
"""
Reads JSON data and stores data in local parameters.
:param dict[str, str | list[str] | object] json_data: Dictionary representation of JSON data of protocol.
"""
if json_data is None:
return
if "protocolName" in json_data:
self.protocol_name = json_data["protocolName"]
if "protocols" in json_data:
self.protocols = json_data["protocols"]
def json_data(self):
"""
Returns JSON representation of Protocol object as dictionary.
:return: JSON representation of Protocol object as dictionary.
:rtype: dict[str, str | list[str] | object]
"""
j = dict()
""":type: dict[str, str | list[str] | object]"""
if self.protocol_name:
j["protocolName"] = self.protocol_name
if len(self.protocols) > 0:
j["protocols"] = self.protocols
j["type"] = self.type
return j
class Class(Protocol):
"""
Represents class.
:param str class_name: Name of class.
:param str super_class_name: Name of super class.
:param list[Ivar] ivars: List of ivars.
:param str type: Type of object (always "class").
"""
def __init__(self, json_data=None):
"""
:param dict[str, str | list] json_data: Dictionary representation of JSON data of protocol.
"""
super(Class, self).__init__()
self.class_name = None
self.super_class_name = None
self.ivars = list()
self.type = "class"
self.read_json(json_data)
def get_file_name(self):
"""
Returns JSON file name for given class.
:return: JSON file name for given class.
:rtype: str
"""
return "{}.json".format(self.class_name)
def get_ivar(self, ivar_name):
"""
Returns ivar with given name.
:return: ivar with given name.
:rtype: Ivar | None
"""
for ivar in self.ivars:
if ivar.name == ivar_name:
return ivar
return None
def read_json(self, json_data):
"""
Reads JSON data and stores data in local parameters.
:param dict[str, str | list] json_data: Dictionary representation of JSON data of protocol.
"""
if json_data is None:
return
super(Class, self).read_json(json_data)
self.protocol_name = None
if "className" in json_data:
self.class_name = json_data["className"]
if "superClassName" in json_data:
self.super_class_name = json_data["superClassName"]
if "ivars" in json_data:
ivars_j = json_data["ivars"]
ivars = list()
""":type: list[Ivar]"""
for ivar_j in ivars_j:
ivar = Ivar(ivar_j)
ivars.append(ivar)
self.ivars = ivars
def json_data(self):
"""
Returns JSON representation of Class object as dictionary.
:return: JSON representation of Class object as dictionary.
:rtype: dict[str, str | list]
"""
j = super(Class, self).json_data()
# Class name.
if self.class_name:
j["className"] = self.class_name
# Super class name.
if self.super_class_name:
j["superClassName"] = self.super_class_name
# ivars.
ivars_j = list()
for ivar in self.ivars:
ivar_j = ivar.json_data()
ivars_j.append(ivar_j)
if len(ivars_j) > 0:
j["ivars"] = ivars_j
# Type
j["type"] = self.type
return j
def __str__(self):
return "<{} {}>".format(self.__class__.__name__, self.class_name)
class Ivar(object):
"""
Represents ivar.
:param int alignment: ivar alignment.
:param str ivarType: Type of ivar.
:param str name: Name of ivar.
:param int offset: Offset of ivar.
:param int size: Size of ivar.
:param str type: Type of object (always "ivar").
"""
def __init__(self, json_data=None):
"""
:param dict[str, str | int] json_data: Dictionary representation of JSON data of ivar.
"""
super(Ivar, self).__init__()
self.alignment = None
self.ivarType = None
self.name = None
self.offset = None
self.size = None
self.type = "ivar"
self.read_json(json_data)
def read_json(self, json_data):
"""
Reads JSON data and stores data in local parameters.
:param dict[str, str | int] json_data: Dictionary representation of JSON data of ivar.
"""
if json_data is None:
return
if "alignment" in json_data:
self.alignment = json_data["alignment"]
if "ivarType" in json_data:
self.ivarType = json_data["ivarType"]
if "name" in json_data:
self.name = json_data["name"]
if "offset" in json_data:
self.offset = json_data["offset"]
if "size" in json_data:
self.size = json_data["size"]
def json_data(self):
"""
Returns JSON representation of Ivar object as dictionary.
:return: JSON representation of Ivar object as dictionary.
:rtype: dict[str, str | int]
"""
j = dict()
""":type: dict[str, str | int]"""
if self.alignment:
j["alignment"] = self.alignment
if self.ivarType:
j["ivarType"] = self.ivarType
if self.name:
j["name"] = self.name
if self.offset:
j["offset"] = self.offset
if self.size:
j["size"] = self.size
j["type"] = self.type
return j
def __str__(self):
return "<{} {}, {}>".format(self.__class__.__name__, self.name, self.offset)
|
|
import requests
import unittest
import json
protocol = "http"
#host = "tactile-petal-92303.appspot.com"
host = "localhost:8080"
def genUrl(URI):
return "%s://%s/%s" %(protocol, host, URI)
class CreateUserIDTest(unittest.TestCase):
"""Create User test case"""
# preparing to test
def setUp(self):
""" Setting up for the test """
requests.delete(genUrl("user/"))
self.r = requests.post(genUrl("user/"))
# ending the test
def tearDown(self):
"""Cleaning up after the test"""
# test length
def testLength5(self):
"""Test length 5"""
self.assertEqual(len(self.r.text), 5, "User ID should be length 5")
# test lowercase
def testAllLower(self):
"""Test all lower"""
lowercase_letters = ''.join(c for c in self.r.text if c.islower())
self.assertEqual(len(lowercase_letters), 5, "All IDs should be lowercase")
#test alphabets
def testAllCharacters(self):
import string
alphabets = ''.join(c for c in self.r.text if c in string.ascii_letters)
self.assertEqual(len(alphabets), 5, "All IDs should be alphabets")
#test no repeat
def testNoRepeat(self):
IDs = set()
IDs.add(self.r.text)
for n in range(1):
r = requests.post(genUrl("user/"))
self.assertTrue(r.text not in IDs, "There is a repeat user ID")
IDs.add(r.text)
class UserTest(unittest.TestCase):
"""Get User case"""
# preparing to test
def setUp(self):
""" Setting up for the test """
requests.delete(genUrl("user/"))
# ending the test
def tearDown(self):
"""Cleaning up after the test"""
# test existing
def testCreateUser(self):
"""Test Existing ID"""
r = requests.post(genUrl("user/"))
userID = r.text
r = requests.get(genUrl("user/%s" % userID))
self.assertEqual(r.status_code, 200, "There should be a user created")
def testDeleteUser(self):
r = requests.post(genUrl("user/"))
userID = r.text
r = requests.delete(genUrl("user/%s" % userID))
self.assertEqual(r.status_code, 200, "Status code of delete user should be 200")
r = requests.get(genUrl("user/%s" % userID))
self.assertEqual(r.status_code, 404)
def testGetUserInfo(self):
""" Test if the information from server is correct"""
r = requests.post(genUrl("user/"))
userID = r.text
r = requests.get(genUrl("user/%s" % userID))
try:
r.json()
except ValueError:
self.assertTrue(False, "No JSON object could be decoded")
def testGetDefaultUserInfo(self):
r = requests.post(genUrl("user/"))
userID = r.text
r = requests.get(genUrl("user/%s" % userID))
try:
obj = r.json()
self.assertIn("Name", obj)
self.assertIn("XP", obj)
self.assertIn("Level", obj)
self.assertIn("Gold", obj)
self.assertEqual(obj["Name"], "")
self.assertEqual(obj["XP"], 0)
self.assertEqual(obj["Level"], 1)
self.assertEqual(obj["Gold"], 0)
except ValueError:
self.assertTrue(False, "No JSON object could be decoded")
def testGetAlteredUserInfo(self):
r = requests.post(genUrl("user/"))
userID = r.text
r = requests.put(genUrl("user/gold/%s"%userID), data="100")
r = requests.put(genUrl("user/XP/%s"%userID), data="100")
r = requests.get(genUrl("user/%s" % userID))
try:
obj = r.json()
self.assertIn("Name", obj)
self.assertIn("XP", obj)
self.assertIn("Level", obj)
self.assertIn("Gold", obj)
self.assertEqual(obj["Name"], "")
self.assertEqual(obj["XP"], 100)
self.assertEqual(obj["Level"], 2)
self.assertEqual(obj["Gold"], 100)
except ValueError:
self.assertTrue(False, "No JSON object could be decoded")
def testDelete1User(self):
""" Create 2 users, delete 1, check if it is properly deleted"""
r = requests.post(genUrl("user/"))
userID1 = r.text
r = requests.post(genUrl("user/"))
userID2 = r.text
r = requests.delete(genUrl("user/%s" % userID1))
r = requests.get(genUrl("user/%s" % userID1))
self.assertEqual(r.status_code, 404)
r = requests.get(genUrl("user/%s" % userID2))
try:
obj = r.json()
self.assertIn("Name", obj)
self.assertIn("XP", obj)
self.assertIn("Level", obj)
self.assertIn("Gold", obj)
self.assertEqual(obj["Name"], "")
self.assertEqual(obj["XP"], 0)
self.assertEqual(obj["Level"], 1)
self.assertEqual(obj["Gold"], 0)
except ValueError:
self.assertTrue(False, "No JSON object could be decoded")
def testDeleteAllUsers(self):
r = requests.post(genUrl("user/"))
userID1 = r.text
r = requests.post(genUrl("user/"))
userID2 = r.text
r = requests.delete(genUrl("user/"))
self.assertEqual(r.status_code, 200, "Status code of delete all users should be 200")
r = requests.get(genUrl("user/%s" % userID1))
self.assertEqual(r.status_code, 404)
r = requests.get(genUrl("user/%s" % userID2))
self.assertEqual(r.status_code, 404)
class UserNameTest(unittest.TestCase):
"""Get User Name case"""
# preparing to test
def setUp(self):
""" Setting up for the test """
requests.delete(genUrl("user/"))
r = requests.post(genUrl("user/"))
self.userID = r.text
# ending the test
def tearDown(self):
"""Cleaning up after the test"""
def getUserName(self, userID=None):
if not userID:
userID = self.userID
r = requests.get(genUrl("user/%s" % userID))
try:
obj = r.json()
self.assertIn("Name", obj, "There should be a key \'Name\'")
return obj["Name"]
except ValueError:
self.assertTrue(False, "No JSON object could be decoded")
# test existing
def testExistingID(self):
"""Test Existing ID with no user name set"""
userName = self.getUserName()
self.assertEqual(userName, "", "Username should be blank by default")
def testNonExistingID(self):
userID = "aaaaa"
r = requests.get(genUrl("user/%s" % userID))
self.assertEqual(r.status_code, 404, "Non existing user")
def testSetName(self):
userName = "wangchuck"
r = requests.post(genUrl("user/name/%s" % self.userID), data=userName)
self.assertEqual(r.status_code, 200, "Set user name should be successful")
self.assertEqual(self.getUserName(), userName, "User name retrieved should be the same as the one set")
def testSetEmptyName(self):
userName = ""
r = requests.post(genUrl("user/name/%s" % self.userID), data=userName)
self.assertEqual(r.status_code, 400, "User name should not be the same")
self.assertEqual(self.getUserName(), userName, "User name retrieved should be the same as the one set")
def testSetEmptyThenValidName(self):
userName = ""
r = requests.post(genUrl("user/name/%s" % self.userID), data=userName)
self.assertEqual(r.status_code, 400, "User name should not be the same")
self.assertEqual(self.getUserName(), userName, "User name retrieved should be the same as the one set")
userName = "wangchuck"
r = requests.post(genUrl("user/name/%s" % self.userID), data=userName)
self.assertEqual(r.status_code, 200, "Set user name should be successful")
self.assertEqual(self.getUserName(), userName, "User name retrieved should be the same as the one set")
def testNameWithSpace(self):
userName = "Mother of Dragons"
r = requests.post(genUrl("user/name/%s" % self.userID), data=userName)
self.assertEqual(r.status_code, 200, "Set user name should be successful")
self.assertEqual(self.getUserName(), userName, "User name retrieved should be the same as the one set")
def testResetName(self):
""" Test if the name can be set a 2nd time """
userName1 = "wangchuck"
r = requests.post(genUrl("user/name/%s" % self.userID), data=userName1)
self.assertEqual(r.status_code, 200, "Set user name should be successful")
self.assertEqual(self.getUserName(), userName1, "User name retrieved should be the same as the one set")
userName2 = "Waterloo"
r = requests.post(genUrl("user/name/%s" % self.userID), data=userName2)
self.assertEqual(r.status_code, 405, "Set user name should be unsuccessful 405")
self.assertEqual(self.getUserName(), userName1, "User name retrieved should be the same as the first one set")
def testInvalidName(self):
userName = "@$%^"
r = requests.post(genUrl("user/name/%s" % self.userID), data=userName)
self.assertEqual(r.status_code, 400, "Should be a bad request, bad user name")
self.assertEqual(self.getUserName(), "", "User name retrieved should be empty")
class XPTest(unittest.TestCase):
"""Get User XP case"""
# preparing to test
def setUp(self):
""" Setting up for the test """
requests.delete(genUrl("user/"))
r = requests.post(genUrl("user/"))
self.userID = r.text
# ending the test
def tearDown(self):
"""Cleaning up after the test"""
# test existing
def testExistingID(self):
"""Test Existing ID to see if the call passes"""
r = requests.get(genUrl("user/XP/%s" % self.userID))
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testNonExistingID(self):
""" to see if the call fails """
r = requests.get(genUrl("user/XP/%s" % "aaaaa"))
self.assertEqual(r.status_code, 404, "Status code should be 404 not found")
XPVal = "300"
r = requests.put(genUrl("user/XP/%s" % "aaaaa"), data=XPVal)
self.assertEqual(r.status_code, 404, "Status code should be 404 not found")
def testSetValidXP(self):
XPVal = "300"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=XPVal)
self.assertEqual(r.status_code, 200, "Status code should be 200")
r = requests.get(genUrl("user/XP/%s" % self.userID))
self.assertEqual(r.text, XPVal, "XP should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testSetHigherXP(self):
XPVal = "300"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=XPVal)
self.assertEqual(r.status_code, 200, "Status code should be 200")
r = requests.get(genUrl("user/XP/%s" % self.userID))
self.assertEqual(r.text, XPVal, "XP should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
highXPVal = "400"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=highXPVal)
self.assertEqual(r.status_code, 200, "Status code should be 400, XP won't go lower")
r = requests.get(genUrl("user/XP/%s" % self.userID))
self.assertEqual(r.text, highXPVal, "XP should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testSetLowerXP(self):
XPVal = "300"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=XPVal)
self.assertEqual(r.status_code, 200, "Status code should be 200")
r = requests.get(genUrl("user/XP/%s" % self.userID))
self.assertEqual(r.text, XPVal, "XP should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
lowXPVal = "200"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=lowXPVal)
self.assertEqual(r.status_code, 400, "Status code should be 400, XP won't go lower")
r = requests.get(genUrl("user/XP/%s" % self.userID))
self.assertEqual(r.text, XPVal, "XP should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testGetDefaultXP(self):
r = requests.get(genUrl("user/XP/%s" % self.userID))
self.assertEqual(r.text, "0", "XP should be 0 by default")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testSetNegativeXP(self):
XPVal = "-300"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=XPVal)
self.assertEqual(r.status_code, 400, "Status code should be 400, no negative values")
r = requests.get(genUrl("user/XP/%s" % self.userID))
self.assertEqual(r.text, "0", "XP should be 0 by default")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testSetAlphabetXP(self):
XPVal = "abc"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=XPVal)
self.assertEqual(r.status_code, 400, "Status code should be 400, no alphabet values")
r = requests.get(genUrl("user/XP/%s" % self.userID))
self.assertEqual(r.text, "0", "XP should be 0 by default")
self.assertEqual(r.status_code, 200, "Status code should be 200")
class GoldTest(unittest.TestCase):
"""Gold test case"""
# preparing to test
def setUp(self):
""" Setting up for the test """
requests.delete(genUrl("user/"))
r = requests.post(genUrl("user/"))
self.userID = r.text
# ending the test
def tearDown(self):
"""Cleaning up after the test"""
# test existing
def testExistingID(self):
"""Test Existing ID to see if call passes"""
r = requests.get(genUrl("user/gold/%s" % self.userID))
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testNonExistingID(self):
""" Test non existing ID to see if call fails """
r = requests.get(genUrl("user/gold/%s" % "aaaaa"))
self.assertEqual(r.status_code, 404, "Status code should be 404 not found")
goldVal = "300"
r = requests.put(genUrl("user/gold/%s" % "aaaaa"), data=goldVal)
self.assertEqual(r.status_code, 404, "Status code should be 404 not found")
def testSetValidGold(self):
goldVal = "300"
r = requests.put(genUrl("user/gold/%s" % self.userID), data=goldVal)
self.assertEqual(r.status_code, 200, "Status code should be 200")
r = requests.get(genUrl("user/gold/%s" % self.userID))
self.assertEqual(r.text, goldVal, "Gold value should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testSetMoreOrLessGold(self):
goldVal1 = "300"
r = requests.put(genUrl("user/gold/%s" % self.userID), data=goldVal1)
self.assertEqual(r.status_code, 200, "Status code should be 200")
r = requests.get(genUrl("user/gold/%s" % self.userID))
self.assertEqual(r.text, goldVal1, "Gold value should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
goldVal2 = "400"
r = requests.put(genUrl("user/gold/%s" % self.userID), data=goldVal2)
self.assertEqual(r.status_code, 200, "Status code should be 200")
r = requests.get(genUrl("user/gold/%s" % self.userID))
self.assertEqual(r.text, goldVal2, "Gold value should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
goldVal3 = "100"
r = requests.put(genUrl("user/gold/%s" % self.userID), data=goldVal3)
self.assertEqual(r.status_code, 200, "Status code should be 200")
r = requests.get(genUrl("user/gold/%s" % self.userID))
self.assertEqual(r.text, goldVal3, "Gold value should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testGetDefaultGold(self):
r = requests.get(genUrl("user/gold/%s" % self.userID))
self.assertEqual(r.text, "0", "Gold should be 0 by default")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testSetNegativeGold(self):
goldVal = "-300"
r = requests.put(genUrl("user/gold/%s" % self.userID), data=goldVal)
self.assertEqual(r.status_code, 400, "Status code should be 400, no negative gold")
r = requests.get(genUrl("user/gold/%s" % self.userID))
self.assertEqual(r.text, "0", "Gold value should be 0 by default")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testSetAlphabetGold(self):
goldVal = "jiu"
r = requests.put(genUrl("user/gold/%s" % self.userID), data=goldVal)
self.assertEqual(r.status_code, 400, "Status code should be 400, no alphabet gold")
r = requests.get(genUrl("user/gold/%s" % self.userID))
self.assertEqual(r.text, "0", "Gold value should be 0 by default")
self.assertEqual(r.status_code, 200, "Status code should be 200")
class LevelTest(unittest.TestCase):
"""Get User level case"""
# preparing to test
def setUp(self):
""" Setting up for the test """
requests.delete(genUrl("user/"))
r = requests.post(genUrl("user/"))
self.userID = r.text
# ending the test
def tearDown(self):
"""Cleaning up after the test"""
# test existing
def testExistingID(self):
"""Test Existing ID"""
r = requests.get(genUrl("user/level/%s" % self.userID))
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testNonExistingID(self):
""" to see if the call fails """
r = requests.get(genUrl("user/level/%s" % "aaaaa"))
self.assertEqual(r.status_code, 404, "Status code should be 404 not found")
XPVal = "300"
r = requests.put(genUrl("user/level/%s" % "aaaaa"), data=XPVal)
self.assertEqual(r.status_code, 405, "Status code should be 405 not allowed")
def testSetValidLevel(self):
XPVal = "300"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=XPVal)
self.assertEqual(r.status_code, 200, "Status code should be 200")
levelVal = "4"
r = requests.get(genUrl("user/level/%s" % self.userID))
self.assertEqual(r.text, levelVal, "Level should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testGetDefaultLevel(self):
r = requests.get(genUrl("user/level/%s" % self.userID))
self.assertEqual(r.text, "1", "Level should be default 1")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testSetLowerLevels(self):
"""Set first time"""
XPVal = "300"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=XPVal)
self.assertEqual(r.status_code, 200, "Status code should be 200")
levelVal = "4"
r = requests.get(genUrl("user/level/%s" % self.userID))
self.assertEqual(r.text, levelVal, "Level should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
"""Set lower Level"""
XPVal = "200"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=XPVal)
self.assertEqual(r.status_code, 400, "Status code should be 400, level set should not be lower")
levelVal = "4"
r = requests.get(genUrl("user/level/%s" % self.userID))
self.assertEqual(r.text, levelVal, "Level should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def testSetHigherLevels(self):
"""Set first time"""
XPVal = "300"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=XPVal)
self.assertEqual(r.status_code, 200, "Status code should be 200")
levelVal = "4"
r = requests.get(genUrl("user/level/%s" % self.userID))
self.assertEqual(r.text, levelVal, "Level should be set")
self.assertEqual(r.status_code, 200, "Status code should be 200")
"""Set higher Level"""
XPVal = "400"
r = requests.put(genUrl("user/XP/%s" % self.userID), data=XPVal)
self.assertEqual(r.status_code, 200, "Status code should be 200")
levelVal = "5"
r = requests.get(genUrl("user/level/%s" % self.userID))
self.assertEqual(r.text, levelVal, "Level should be set correctly")
self.assertEqual(r.status_code, 200, "Status code should be 200")
def runTestsFromFile(filename):
submission = []
with open(filename, 'r') as f:
submission = f.readlines()
submission = [line.strip().split(",") for line in submission]
for student in submission:
host = student[1]
if student[0][0] == '#':
print "Skipping %s" % (student[0])
continue
print "Testing %s %s" %(student[0], student[1])
with open("Result_%s.txt" % student[0], "w") as f:
assignment1Runner = unittest.TextTestRunner(stream=f, descriptions=False, verbosity=2)
testResult = assignment1Runner.run(assignment1Suite)
f.write(str(testResult)+"\n\n")
f.write("Tests Run = %d\n"%(testResult.testsRun))
if testResult.errors:
f.write("Errors\n")
for error in testResult.errors:
f.write("%s\n%s\n"%(error[0].id, error[1]))
if testResult.failures:
f.write("Failures\n")
for failure in testResult.failures:
f.write("%s\n%s\n"%(failure[0].id, failure[1]))
passedTests = testResult.testsRun - len(testResult.failures) - len(testResult.errors)
f.write("Total Tests = %d, Passed Test = %d, Failed Test = %d, Errors = %d"%(testResult.testsRun, passedTests, len(testResult.failures), len(testResult.errors)))
if __name__ == "__main__":
# creating a new test suite
assignment1Suite = unittest.TestSuite()
# adding a test case
assignment1Suite.addTest(unittest.makeSuite(CreateUserIDTest))
assignment1Suite.addTest(unittest.makeSuite(UserTest))
assignment1Suite.addTest(unittest.makeSuite(UserNameTest))
assignment1Suite.addTest(unittest.makeSuite(XPTest))
assignment1Suite.addTest(unittest.makeSuite(GoldTest))
assignment1Suite.addTest(unittest.makeSuite(LevelTest))
#assignment1Runner = unittest.TextTestRunner()
#unittest.main()
nameList = "submission.csv"
import os.path
if os.path.isfile(nameList):
runTestsFromFile(nameList)
else:
unittest.main()
|
|
from .models import ReceiptFile, RealThingSize, ExchangeInvolvement, MoneyBag, MoneyBagPiece, Exchange, PaymentMethod, Pledge
from what_apps.people.models import GenericParty
def exchange_with_other_party(party, member, receipt_image=None, date=None):
'''
Dehydration Function.
Takes the name of some other party and some member, and returns the following in a tuple:
*an exchange object
*the other party's involvement object
*our involvement object
'''
if receipt_image:
receipt = ReceiptFile.objects.create(file=receipt_image)
exchange = Exchange.objects.create(manager=member, invoice_data=receipt)
else:
exchange = Exchange.objects.create(manager=member)
if date:
exchange.start_date = date
exchange.save()
their_involvement = ExchangeInvolvement.objects.create(exchange = exchange, party = party )
our_involvement = ExchangeInvolvement(exchange=exchange)
our_involvement.slashroot_as_party()
our_involvement.save()
return exchange, their_involvement, our_involvement
def pledges_from_involvements(buyer_involvement=None, seller_involvement=None):
'''
Dehydration Function.
Creates symmetrical pledges and adds them to their respective involvements.
'''
seller_pledge = Pledge.objects.create(pursuant_to=seller_involvement, recipient=buyer_involvement)
buyer_pledge = Pledge.objects.create(pursuant_to=buyer_involvement, recipient=seller_involvement)
#This probably ought to be a function somewhere.
seller_pledge.incoming_pledges.add(buyer_pledge)
buyer_pledge.incoming_pledges.add(seller_pledge)
return buyer_pledge, seller_pledge
def donation_from_POST(post, member):
'''
Takes a POST from the donation form, figures that shit out.
'''
donor = party_from_encrypted_string(post['lookup_other_party'])
exchange, donor_involvement, our_involvement = exchange_with_other_party(donor, member)
donor_pledge = Pledge(pursuant_to = donor_involvement, recipient=our_involvement)
donor_pledge.save()
#Do I love these next 8 lines? No. No I do not. -Justin
try:
num_money_bags = int(post['Money-TOTAL_FORMS'])
except:
num_money_bags = 0
try:
num_real_things = int(post['Money-TOTAL_FORMS'])
except:
num_real_things = 0
for n in range(1, num_money_bags + 1):
method = PaymentMethod.objects.get(id=post['Money-' + str(n) + '-method'])
amount = post['Money-' + str(n) + '-amount']
money_bag = MoneyBag.objects.create(method = method)
piece = MoneyBagPiece.objects.create(money_bag = money_bag, amount = amount)
#Add this money bag as an item in the donor's involvement.
donor_pledge.items.add(piece)
donor_pledge.save()
#TODO: Hook up real-thing donations
donor_pledge.deliver()
return exchange
def buy_item(group=False, deliver=False, seller_involvement=None, buyer_involvement=None, trade_element=None, trade_element_kwargs=None, money_bag=None, price=None, quantity=None):
'''
A dehydration method specially for trading a real thing for a money bag.
Makes pledges and returns them.
Pledges will be delivered if deliver=True.
If quantity is more than one, items will each be assigned their own pledge unless group=True.
'''
count = quantity
if group:
buyer_pledge, seller_pledge = pledges_from_involvements(buyer_involvement=buyer_involvement, seller_involvement=seller_involvement)
for counter in range(count): #We'll iterate through once for each item in the quantity.
if not group:
buyer_pledge, seller_pledge = pledges_from_involvements(buyer_involvement=buyer_involvement, seller_involvement=seller_involvement)
#Get the size details
try:
quantification_method = trade_element_kwargs.pop('unit of quantification')
amount = trade_element_kwargs.pop('amount')
except:
#Guess they don't have an amount for their quantification method.
#TODO: Neaten this.
pass
try:
property = trade_element_kwargs.pop('property')
except:
#TODO: This is pretty lame. Property is supposed to be ManyToMany.
pass
#Make the thing
real_thing = trade_element.objects.create(**trade_element_kwargs) #The kwargs will have come most recently from get_purchase_details, which in turns gets them from a form.
#Make and associate the size of the thing
size = RealThingSize.objects.create(unit=quantification_method, number=amount, thing=real_thing)
if property:
real_thing.properties.add(property)
seller_pledge.items.add(real_thing)
#..and we give them some money.
piece = MoneyBagPiece.objects.create(money_bag = money_bag, amount = price)
buyer_pledge.items.add(piece)
#Deliver both.
#TODO: Add shipping tracking here.
if deliver:
seller_pledge.deliver()
buyer_pledge.deliver(change_owner=True)
return seller_pledge, buyer_pledge
def get_purchase_details(main_form, item_forms, member, receipt_image=None, date=None):
'''
Takes the following:
*A main_form, which has information about the overall puchase.
*item_forms, each of which should a tuple of a formset and a model
*member - the member who is entering the transaction
*deliver - a boolean about whether delivery has yet been made.
TODO: Fix and finish.
'''
houston_we_have_a_problem = False if main_form.is_valid() else True
#We have more than one formset (call it a formset-set) - let's check each one.
for bound_formset, model in item_forms:
if not bound_formset.is_valid():
houston_we_have_a_problem = True
if houston_we_have_a_problem:
#We have a problem. Tell them so and spit back the forms.
return False, (main_form, item_forms)
#Nothing seems to have caused a problem. Proceed.
vendor = main_form.cleaned_data['other_party']
exchange, vendor_involvement, our_involvement = exchange_with_other_party(vendor, member, receipt_image=receipt_image, date=date)
SlashRoot = our_involvement.party
#For the moment, we're assuming that the first formset is device, the second is item, the third is ingredient.
#Assume this is one money bag for the entire purchase.
method = main_form.cleaned_data['payment_method']
money_bag = MoneyBag.objects.create(method = method)
for formset, model in item_forms:
#Each formset is for a type of purchase (device, ingredient, etc)
#So, within this loop, we're dealing with a formset which contains one form for each item purchased.
for form in formset:
#First we need to actually instantiate the device (assume, for a moment, that this device is not yet in our ecosystem)
#TODO: Review this code to ensure that devices (or, for that matter, other objects) can't get created only to be thwarted by some error or odd circumstance further down in the code.
try:
#Copy the cleaned data into a dict
purchase_form_dict = form.cleaned_data.copy()
#Pop four important pieces of information out of the dict
quantity = purchase_form_dict.pop('quantity')
price = purchase_form_dict.pop('price_per')
deliver = purchase_form_dict.pop('deliver')
group = purchase_form_dict.pop('group')
#This is standard (we are the new owner)
standard_trade_element_dict = {
#I guess there's nothing left here.
#TODO: Examine if this dict is still needed.
}
#Merge what's left of the popped dict into the standard dict and we have our kwargs.
trade_element_dict = dict(standard_trade_element_dict, **purchase_form_dict)
except KeyError:
#TODO: Figure out why some forms come in blank (and thus produce KeyError)
continue
vendor_pledge, our_pledge = buy_item(seller_involvement = vendor_involvement,
buyer_involvement = our_involvement,
trade_element = model,
trade_element_kwargs = trade_element_dict,
price = price,
money_bag = money_bag,
quantity = quantity,
deliver = deliver,
group = group,
)
return True, [vendor_involvement, our_involvement]
|
|
from twisted.application.service import Service
from twisted.internet import reactor
from twisted.internet.defer import Deferred, DeferredList
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.protocols.basic import Int32StringReceiver
from carbon.conf import settings
from carbon.util import pickle
from carbon import log, state, instrumentation
from collections import deque
from time import time
SEND_QUEUE_LOW_WATERMARK = settings.MAX_QUEUE_SIZE * settings.QUEUE_LOW_WATERMARK_PCT
class CarbonClientProtocol(Int32StringReceiver):
def connectionMade(self):
log.clients("%s::connectionMade" % self)
self.paused = False
self.connected = True
self.transport.registerProducer(self, streaming=True)
# Define internal metric names
self.lastResetTime = time()
self.destinationName = self.factory.destinationName
self.queuedUntilReady = 'destinations.%s.queuedUntilReady' % self.destinationName
self.sent = 'destinations.%s.sent' % self.destinationName
self.relayMaxQueueLength = 'destinations.%s.relayMaxQueueLength' % self.destinationName
self.batchesSent = 'destinations.%s.batchesSent' % self.destinationName
self.slowConnectionReset = 'destinations.%s.slowConnectionReset' % self.destinationName
self.factory.connectionMade.callback(self)
self.factory.connectionMade = Deferred()
self.sendQueued()
def connectionLost(self, reason):
log.clients("%s::connectionLost %s" % (self, reason.getErrorMessage()))
self.connected = False
def pauseProducing(self):
self.paused = True
def resumeProducing(self):
self.paused = False
self.sendQueued()
def stopProducing(self):
self.disconnect()
def disconnect(self):
if self.connected:
self.transport.unregisterProducer()
self.transport.loseConnection()
self.connected = False
def sendDatapoint(self, metric, datapoint):
self.factory.enqueue(metric, datapoint)
reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.sendQueued)
def _sendDatapoints(self, datapoints):
self.sendString(pickle.dumps(datapoints, protocol=-1))
instrumentation.increment(self.sent, len(datapoints))
instrumentation.increment(self.batchesSent)
self.factory.checkQueue()
def sendQueued(self):
"""This should be the only method that will be used to send stats.
In order to not hold the event loop and prevent stats from flowing
in while we send them out, this will process
settings.MAX_DATAPOINTS_PER_MESSAGE stats, send them, and if there
are still items in the queue, this will invoke reactor.callLater
to schedule another run of sendQueued after a reasonable enough time
for the destination to process what it has just received.
Given a queue size of one million stats, and using a
chained_invocation_delay of 0.0001 seconds, you'd get 1,000
sendQueued() invocations/second max. With a
settings.MAX_DATAPOINTS_PER_MESSAGE of 100, the rate of stats being
sent could theoretically be as high as 100,000 stats/sec, or
6,000,000 stats/minute. This is probably too high for a typical
receiver to handle.
In practice this theoretical max shouldn't be reached because
network delays should add an extra delay - probably on the order
of 10ms per send, so the queue should drain with an order of
minutes, which seems more realistic.
"""
chained_invocation_delay = 0.0001
queueSize = self.factory.queueSize
instrumentation.max(self.relayMaxQueueLength, queueSize)
if self.paused:
instrumentation.max(self.queuedUntilReady, queueSize)
return
if not self.factory.hasQueuedDatapoints():
return
if settings.USE_RATIO_RESET is True:
if not self.connectionQualityMonitor():
self.resetConnectionForQualityReasons("Sent: {0}, Received: {1}".format(
instrumentation.prior_stats.get(self.sent, 0),
instrumentation.prior_stats.get('metricsReceived', 0)))
self._sendDatapoints(self.factory.takeSomeFromQueue())
if (self.factory.queueFull.called and
queueSize < SEND_QUEUE_LOW_WATERMARK):
self.factory.queueHasSpace.callback(queueSize)
if self.factory.hasQueuedDatapoints():
reactor.callLater(chained_invocation_delay, self.sendQueued)
def connectionQualityMonitor(self):
"""Checks to see if the connection for this factory appears to
be delivering stats at a speed close to what we're receiving
them at.
This is open to other measures of connection quality.
Returns a Bool
True means that quality is good, OR
True means that the total received is less than settings.MIN_RESET_STAT_FLOW
False means that quality is bad
"""
destination_sent = float(instrumentation.prior_stats.get(self.sent, 0))
total_received = float(instrumentation.prior_stats.get('metricsReceived', 0))
instrumentation.increment(self.slowConnectionReset, 0)
if total_received < settings.MIN_RESET_STAT_FLOW:
return True
if (destination_sent / total_received) < settings.MIN_RESET_RATIO:
return False
else:
return True
def resetConnectionForQualityReasons(self, reason):
"""Only re-sets the connection if it's been
settings.MIN_RESET_INTERVAL seconds since the last re-set.
Reason should be a string containing the quality info that led to
a re-set.
"""
if (time() - self.lastResetTime) < float(settings.MIN_RESET_INTERVAL):
return
else:
self.factory.connectedProtocol.disconnect()
self.lastResetTime = time()
instrumentation.increment(self.slowConnectionReset)
log.clients("%s:: resetConnectionForQualityReasons: %s" % (self, reason))
def __str__(self):
return 'CarbonClientProtocol(%s:%d:%s)' % (self.factory.destination)
__repr__ = __str__
class CarbonClientFactory(ReconnectingClientFactory):
maxDelay = 5
def __init__(self, destination):
self.destination = destination
self.destinationName = ('%s:%d:%s' % destination).replace('.', '_')
self.host, self.port, self.carbon_instance = destination
self.addr = (self.host, self.port)
self.started = False
# This factory maintains protocol state across reconnects
self.queue = deque() # Change to make this the sole source of metrics to be sent.
self.connectedProtocol = None
self.queueEmpty = Deferred()
self.queueFull = Deferred()
self.queueFull.addCallback(self.queueFullCallback)
self.queueHasSpace = Deferred()
self.queueHasSpace.addCallback(self.queueSpaceCallback)
self.connectFailed = Deferred()
self.connectionMade = Deferred()
self.connectionLost = Deferred()
# Define internal metric names
self.attemptedRelays = 'destinations.%s.attemptedRelays' % self.destinationName
self.fullQueueDrops = 'destinations.%s.fullQueueDrops' % self.destinationName
self.queuedUntilConnected = 'destinations.%s.queuedUntilConnected' % self.destinationName
def queueFullCallback(self, result):
state.events.cacheFull()
log.clients('%s send queue is full (%d datapoints)' % (self, result))
def queueSpaceCallback(self, result):
if self.queueFull.called:
log.clients('%s send queue has space available' % self.connectedProtocol)
self.queueFull = Deferred()
self.queueFull.addCallback(self.queueFullCallback)
state.events.cacheSpaceAvailable()
self.queueHasSpace = Deferred()
self.queueHasSpace.addCallback(self.queueSpaceCallback)
def buildProtocol(self, addr):
self.connectedProtocol = CarbonClientProtocol()
self.connectedProtocol.factory = self
return self.connectedProtocol
def startConnecting(self): # calling this startFactory yields recursion problems
self.started = True
self.connector = reactor.connectTCP(self.host, self.port, self)
def stopConnecting(self):
self.started = False
self.stopTrying()
if self.connectedProtocol and self.connectedProtocol.connected:
return self.connectedProtocol.disconnect()
@property
def queueSize(self):
return len(self.queue)
def hasQueuedDatapoints(self):
return bool(self.queue)
def takeSomeFromQueue(self):
"""Use self.queue, which is a collections.deque, to pop up to
settings.MAX_DATAPOINTS_PER_MESSAGE items from the left of the
queue.
"""
def yield_max_datapoints():
for count in range(settings.MAX_DATAPOINTS_PER_MESSAGE):
try:
yield self.queue.popleft()
except IndexError:
raise StopIteration
return list(yield_max_datapoints())
def checkQueue(self):
"""Check if the queue is empty. If the queue isn't empty or
doesn't exist yet, then this will invoke the callback chain on the
self.queryEmpty Deferred chain with the argument 0, and will
re-set the queueEmpty callback chain with a new Deferred
object.
"""
if not self.queue:
self.queueEmpty.callback(0)
self.queueEmpty = Deferred()
def enqueue(self, metric, datapoint):
self.queue.append((metric, datapoint))
def enqueue_from_left(self, metric, datapoint):
self.queue.appendleft((metric, datapoint))
def sendDatapoint(self, metric, datapoint):
instrumentation.increment(self.attemptedRelays)
if self.queueSize >= settings.MAX_QUEUE_SIZE:
if not self.queueFull.called:
self.queueFull.callback(self.queueSize)
instrumentation.increment(self.fullQueueDrops)
else:
self.enqueue(metric, datapoint)
if self.connectedProtocol:
reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.connectedProtocol.sendQueued)
else:
instrumentation.increment(self.queuedUntilConnected)
def sendHighPriorityDatapoint(self, metric, datapoint):
"""The high priority datapoint is one relating to the carbon
daemon itself. It puts the datapoint on the left of the deque,
ahead of other stats, so that when the carbon-relay, specifically,
is overwhelmed its stats are more likely to make it through and
expose the issue at hand.
In addition, these stats go on the deque even when the max stats
capacity has been reached. This relies on not creating the deque
with a fixed max size.
"""
instrumentation.increment(self.attemptedRelays)
self.enqueue_from_left(metric, datapoint)
if self.connectedProtocol:
reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.connectedProtocol.sendQueued)
else:
instrumentation.increment(self.queuedUntilConnected)
def startedConnecting(self, connector):
log.clients("%s::startedConnecting (%s:%d)" % (self, connector.host, connector.port))
def clientConnectionLost(self, connector, reason):
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
log.clients("%s::clientConnectionLost (%s:%d) %s" % (self, connector.host, connector.port, reason.getErrorMessage()))
self.connectedProtocol = None
self.connectionLost.callback(0)
self.connectionLost = Deferred()
def clientConnectionFailed(self, connector, reason):
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
log.clients("%s::clientConnectionFailed (%s:%d) %s" % (self, connector.host, connector.port, reason.getErrorMessage()))
self.connectFailed.callback(dict(connector=connector, reason=reason))
self.connectFailed = Deferred()
def disconnect(self):
self.queueEmpty.addCallback(lambda result: self.stopConnecting())
readyToStop = DeferredList(
[self.connectionLost, self.connectFailed],
fireOnOneCallback=True,
fireOnOneErrback=True)
self.checkQueue()
# This can happen if the client is stopped before a connection is ever made
if (not readyToStop.called) and (not self.started):
readyToStop.callback(None)
return readyToStop
def __str__(self):
return 'CarbonClientFactory(%s:%d:%s)' % self.destination
__repr__ = __str__
class CarbonClientManager(Service):
def __init__(self, router):
self.router = router
self.client_factories = {} # { destination : CarbonClientFactory() }
def startService(self):
Service.startService(self)
for factory in self.client_factories.values():
if not factory.started:
factory.startConnecting()
def stopService(self):
Service.stopService(self)
self.stopAllClients()
def startClient(self, destination):
if destination in self.client_factories:
return
log.clients("connecting to carbon daemon at %s:%d:%s" % destination)
self.router.addDestination(destination)
factory = self.client_factories[destination] = CarbonClientFactory(destination)
connectAttempted = DeferredList(
[factory.connectionMade, factory.connectFailed],
fireOnOneCallback=True,
fireOnOneErrback=True)
if self.running:
factory.startConnecting() # this can trigger & replace connectFailed
return connectAttempted
def stopClient(self, destination):
factory = self.client_factories.get(destination)
if factory is None:
return
self.router.removeDestination(destination)
stopCompleted = factory.disconnect()
stopCompleted.addCallback(lambda result: self.disconnectClient(destination))
return stopCompleted
def disconnectClient(self, destination):
factory = self.client_factories.pop(destination)
c = factory.connector
if c and c.state == 'connecting' and not factory.hasQueuedDatapoints():
c.stopConnecting()
def stopAllClients(self):
deferreds = []
for destination in list(self.client_factories):
deferreds.append( self.stopClient(destination) )
return DeferredList(deferreds)
def sendDatapoint(self, metric, datapoint):
for destination in self.router.getDestinations(metric):
self.client_factories[destination].sendDatapoint(metric, datapoint)
def sendHighPriorityDatapoint(self, metric, datapoint):
for destination in self.router.getDestinations(metric):
self.client_factories[destination].sendHighPriorityDatapoint(metric, datapoint)
def __str__(self):
return "<%s[%x]>" % (self.__class__.__name__, id(self))
|
|
#
# ------------------------------------------------------------
# Copyright (c) All rights reserved
# SiLab, Institute of Physics, University of Bonn
# ------------------------------------------------------------
#
import logging
from copy import deepcopy
from collections.abc import Iterable
import array
from collections import namedtuple
from six import integer_types
from basil.utils.utils import tobytes
from basil.utils.BitLogic import BitLogic
from basil.HL.HardwareLayer import HardwareLayer
logger = logging.getLogger(__name__)
# description attributes
read_only = ['read_only', 'read-only', 'readonly', 'ro']
write_only = ['write_only', 'write-only', 'writeonly', 'wo']
is_byte_array = ['byte_array', 'byte-array', 'bytearray']
class RegisterHardwareLayer(HardwareLayer):
'''Register Hardware Layer.
Implementation of advanced register operations.
Example:
_registers = {'RESET': {'descr': {'addr': 0, 'size': 8, 'properties': ['writeonly']}}, <-- 8-bit reset register, writeonly
'LOST_DATA_COUNTER': {'descr': {'addr': 0, 'size': 8, 'properties': ['ro']}}, <-- 8-bit data register, 'ro' equivalent to 'readonly'
'ENABLE': {'descr': {'addr': 1, 'size': 1, 'offset': 0}}, <-- 1-bit register
'ENABLE_EXTERN': {'descr': {'addr': 1, 'size': 1, 'offset': 1}}, <-- 1-bit register
'EN_ARMING': {'descr': {'addr': 1, 'size': 1, 'offset': 2}}, <-- 1-bit register
'EN_WRITE_TIMESTAMP': {'descr': {'addr': 1, 'size': 1, 'offset': 3}}, <-- 1-bit register
'EVENT_COUNTER': {'descr': {'addr': 2, 'size': 32, 'properties': ['ro']}} <-- 32-bit register, 'ro' equivalent to 'readonly'
_require_version = '==3' <-- or use '<=', '>=', ... accordingly
'''
_registers = {}
_require_version = None
def __init__(self, intf, conf):
super(RegisterHardwareLayer, self).__init__(intf, conf)
# require interface and base address
self._intf = intf
self._base_addr = conf['base_addr']
rv = namedtuple('_register_values', field_names=iter(self._registers.keys()))
self._register_values = rv(*([None] * len(self._registers)))
for reg in self._registers.keys():
if not reg.isupper():
raise ValueError("Register %s must be uppercase." % reg)
self.add_property(reg)
def init(self):
super(RegisterHardwareLayer, self).init()
# reset during initialization to get default state and to remove any prior settings
if "RESET" in self._registers:
self.RESET # assign no value, to read back value and write same value or default value
if 'VERSION' in self._registers:
version = str(self.VERSION)
else:
version = None
logger.info("Initializing %s (firmware version: %s), module %s, base_addr %s" % (self.name, version if 'VERSION' in self._registers else 'n/a', self.__class__.__module__, hex(self._base_addr)))
if self._require_version and not eval(version + self._require_version):
raise Exception("FPGA module %s does not satisfy version requirements (read: %s, require: %s)" % (self.__class__.__module__, version, self._require_version.strip()))
for reg, value in self._registers.items():
if reg in self._init:
self[reg] = self._init[reg]
elif 'default' in value and not ('properties' in value['descr'] and [i for i in read_only if i in value['descr']['properties']]):
self[reg] = value['default']
else: # do nothing here, keep existing value
pass
unknown_regs = set(self._init.keys()).difference(set(self._registers.keys()))
if unknown_regs:
raise KeyError("Attempt to write to unknown register(s) in %s, module %s during initialization: %s" % (self.name, self.__class__.__module__, ", ".join(unknown_regs)))
def set_value(self, value, addr, size, offset, **kwargs):
'''Writing a value of any arbitrary size (max. unsigned int 64) and offset to a register
Parameters
----------
value : int, str
The register value (int, long, bit string) to be written.
addr : int
The register address.
size : int
Bit size/length of the value to be written to the register.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
nothing
'''
div_offset, mod_offset = divmod(offset, 8)
div_size, mod_size = divmod(size + mod_offset, 8)
if mod_size:
div_size += 1
if mod_offset == 0 and mod_size == 0:
reg = BitLogic.from_value(0, size=div_size * 8)
else:
ret = self._intf.read(self._base_addr + addr + div_offset, size=div_size)
reg = BitLogic()
reg.frombytes(tobytes(ret))
reg[size + mod_offset - 1:mod_offset] = value
self._intf.write(self._base_addr + addr + div_offset, data=array.array('B', reg.tobytes()))
def get_value(self, addr, size, offset, **kwargs):
'''Reading a value of any arbitrary size (max. unsigned int 64) and offset from a register
Parameters
----------
addr : int
The register address.
size : int
Bit size/length of the value.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
reg : int
Register value.
'''
div_offset, mod_offset = divmod(offset, 8)
div_size, mod_size = divmod(size + mod_offset, 8)
if mod_size:
div_size += 1
ret = self._intf.read(self._base_addr + addr + div_offset, size=div_size)
reg = BitLogic()
reg.frombytes(tobytes(ret))
return reg[size + mod_offset - 1:mod_offset].tovalue()
def set_bytes(self, data, addr, **kwargs):
'''Writing bytes of any arbitrary size
Parameters
----------
data : iterable
The data (byte array) to be written.
addr : int
The register address.
Returns
-------
nothing
'''
self._intf.write(self._conf['base_addr'] + addr, data)
def get_bytes(self, addr, size, **kwargs):
'''Reading bytes of any arbitrary size
Parameters
----------.
addr : int
The register address.
size : int
Byte length of the value.
Returns
-------
data : iterable
Byte array.
'''
return self._intf.read(self._conf['base_addr'] + addr, size)
def set_configuration(self, conf):
if conf:
for reg, value in conf.items():
self[reg] = value
def get_configuration(self):
conf = {}
for reg in self._registers.keys():
descr = self._registers[reg]['descr']
if not ('properties' in descr and [i for i in write_only if i in descr['properties']]) and not ('properties' in descr and [i for i in read_only if i in descr['properties']]):
conf[reg] = self[reg]
return conf
def add_property(self, attribute):
# create local setter and getter with a particular attribute name
# getter = lambda self: self._get(attribute)
# setter = lambda self, value: self._set(attribute, value)
# Workaround: obviously dynamic properties catch exceptions
# Print error message and return None
def getter(self):
try:
return self._get(attribute)
except Exception as e:
logger.error(e)
return None
def setter(self, value):
try:
return self._set(attribute, value)
except Exception as e:
logger.error(e)
return None
# construct property attribute and add it to the class
setattr(self.__class__, attribute, property(fget=getter, fset=setter, doc=attribute + ' register'))
def set_default(self):
for reg, value in self._registers.items():
if 'default' in value and not ('properties' in value['descr'] and [i for i in read_only if i in self._registers[reg]['descr']['properties']]):
self._set(reg, value['default'])
def _get(self, reg):
descr = deepcopy(self._registers[reg]['descr'])
if 'properties' in descr and [i for i in write_only if i in descr['properties']]:
# allows a lazy-style of programming
if 'default' in self._registers[reg]:
return self._set(reg, self._registers[reg]['default'])
else:
descr.setdefault('offset', 0)
return self._set(reg, self.get_value(**descr))
# raise error when doing read on write-only register
# raise IOError('Register is write-only')
# return None to prevent misuse
# return None
else:
if 'properties' in descr and [i for i in is_byte_array if i in descr['properties']]:
ret_val = self.get_bytes(**descr)
ret_val = array.array('B', ret_val).tolist()
else:
descr.setdefault('offset', 0)
curr_val = self._register_values._asdict()[reg]
if not self.is_initialized: # this test allows attributes to be set in the __init__ method
ret_val = curr_val
else:
ret_val = self.get_value(**descr)
if curr_val is not None and 'properties' in descr and not [i for i in read_only if i in descr['properties']] and curr_val != ret_val:
raise ValueError('Read value was not expected: read: %s, expected: %s' % (str(ret_val), str(curr_val)))
return ret_val
def _set(self, reg, value):
descr = deepcopy(self._registers[reg]['descr'])
if 'properties' in descr and [i for i in read_only if i in descr['properties']]:
raise IOError('Register is read-only')
if 'properties' in descr and [i for i in is_byte_array if i in descr['properties']]:
if not isinstance(value, Iterable):
raise ValueError('For array byte_register iterable object is needed')
value = array.array('B', value).tolist()
self.set_bytes(value, **descr)
self._register_values = self._register_values._replace(**{reg: value})
else:
descr.setdefault('offset', 0)
value = value if isinstance(value, integer_types) else int(value, base=2)
try:
self.set_value(value, **descr)
except ValueError:
raise
else:
self._register_values = self._register_values._replace(**{reg: value})
def __getitem__(self, name):
return self._get(name)
def __setitem__(self, name, value):
return self._set(name, value)
def __getattr__(self, name):
'''called only on last resort if there are no attributes in the instance that match the name
'''
if name.isupper():
_ = self._register_values._asdict()[name]
def method(*args, **kwargs):
nsplit = name.split('_', 1)
if len(nsplit) == 2 and nsplit[0] == 'set' and nsplit[1].isupper() and len(args) == 1 and not kwargs:
self[nsplit[1]] = args[0] # returns None
elif len(nsplit) == 2 and nsplit[0] == 'get' and nsplit[1].isupper() and not args and not kwargs:
return self[nsplit[1]]
else:
raise AttributeError("%r object has no attribute %r" % (self.__class__, name))
return method
def __setattr__(self, name, value):
if name.isupper():
_ = self._register_values._asdict()[name]
super(RegisterHardwareLayer, self).__setattr__(name, value)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import runpy
import sys
import unittest
from datetime import timedelta
from unittest import mock
import pytest
from werkzeug.routing import Rule
from werkzeug.test import create_environ
from werkzeug.wrappers import Response
from airflow.www import app as application
from tests.test_utils.config import conf_vars
from tests.test_utils.decorators import dont_initialize_flask_app_submodules
class TestApp(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
from airflow import settings
settings.configure_orm()
@conf_vars(
{
('webserver', 'enable_proxy_fix'): 'True',
('webserver', 'proxy_fix_x_for'): '1',
('webserver', 'proxy_fix_x_proto'): '1',
('webserver', 'proxy_fix_x_host'): '1',
('webserver', 'proxy_fix_x_port'): '1',
('webserver', 'proxy_fix_x_prefix'): '1',
}
)
@dont_initialize_flask_app_submodules
def test_should_respect_proxy_fix(self):
app = application.cached_app(testing=True)
app.url_map.add(Rule("/debug", endpoint="debug"))
def debug_view():
from flask import request
# Should respect HTTP_X_FORWARDED_FOR
assert request.remote_addr == '192.168.0.1'
# Should respect HTTP_X_FORWARDED_PROTO, HTTP_X_FORWARDED_HOST, HTTP_X_FORWARDED_PORT,
# HTTP_X_FORWARDED_PREFIX
assert request.url == 'https://valid:445/proxy-prefix/debug'
return Response("success")
app.view_functions['debug'] = debug_view
new_environ = {
"PATH_INFO": "/debug",
"REMOTE_ADDR": "192.168.0.2",
"HTTP_HOST": "invalid:9000",
"HTTP_X_FORWARDED_FOR": "192.168.0.1",
"HTTP_X_FORWARDED_PROTO": "https",
"HTTP_X_FORWARDED_HOST": "valid",
"HTTP_X_FORWARDED_PORT": "445",
"HTTP_X_FORWARDED_PREFIX": "/proxy-prefix",
}
environ = create_environ(environ_overrides=new_environ)
response = Response.from_app(app, environ)
assert b"success" == response.get_data()
assert response.status_code == 200
@conf_vars(
{
('webserver', 'base_url'): 'http://localhost:8080/internal-client',
}
)
@dont_initialize_flask_app_submodules
def test_should_respect_base_url_ignore_proxy_headers(self):
app = application.cached_app(testing=True)
app.url_map.add(Rule("/debug", endpoint="debug"))
def debug_view():
from flask import request
# Should ignore HTTP_X_FORWARDED_FOR
assert request.remote_addr == '192.168.0.2'
# Should ignore HTTP_X_FORWARDED_PROTO, HTTP_X_FORWARDED_HOST, HTTP_X_FORWARDED_PORT,
# HTTP_X_FORWARDED_PREFIX
assert request.url == 'http://invalid:9000/internal-client/debug'
return Response("success")
app.view_functions['debug'] = debug_view
new_environ = {
"PATH_INFO": "/internal-client/debug",
"REMOTE_ADDR": "192.168.0.2",
"HTTP_HOST": "invalid:9000",
"HTTP_X_FORWARDED_FOR": "192.168.0.1",
"HTTP_X_FORWARDED_PROTO": "https",
"HTTP_X_FORWARDED_HOST": "valid",
"HTTP_X_FORWARDED_PORT": "445",
"HTTP_X_FORWARDED_PREFIX": "/proxy-prefix",
}
environ = create_environ(environ_overrides=new_environ)
response = Response.from_app(app, environ)
assert b"success" == response.get_data()
assert response.status_code == 200
@conf_vars(
{
('webserver', 'base_url'): 'http://localhost:8080/internal-client',
('webserver', 'enable_proxy_fix'): 'True',
('webserver', 'proxy_fix_x_for'): '1',
('webserver', 'proxy_fix_x_proto'): '1',
('webserver', 'proxy_fix_x_host'): '1',
('webserver', 'proxy_fix_x_port'): '1',
('webserver', 'proxy_fix_x_prefix'): '1',
}
)
@dont_initialize_flask_app_submodules
def test_should_respect_base_url_when_proxy_fix_and_base_url_is_set_up_but_headers_missing(self):
app = application.cached_app(testing=True)
app.url_map.add(Rule("/debug", endpoint="debug"))
def debug_view():
from flask import request
# Should use original REMOTE_ADDR
assert request.remote_addr == '192.168.0.1'
# Should respect base_url
assert request.url == "http://invalid:9000/internal-client/debug"
return Response("success")
app.view_functions['debug'] = debug_view
new_environ = {
"PATH_INFO": "/internal-client/debug",
"REMOTE_ADDR": "192.168.0.1",
"HTTP_HOST": "invalid:9000",
}
environ = create_environ(environ_overrides=new_environ)
response = Response.from_app(app, environ)
assert b"success" == response.get_data()
assert response.status_code == 200
@conf_vars(
{
('webserver', 'base_url'): 'http://localhost:8080/internal-client',
('webserver', 'enable_proxy_fix'): 'True',
('webserver', 'proxy_fix_x_for'): '1',
('webserver', 'proxy_fix_x_proto'): '1',
('webserver', 'proxy_fix_x_host'): '1',
('webserver', 'proxy_fix_x_port'): '1',
('webserver', 'proxy_fix_x_prefix'): '1',
}
)
@dont_initialize_flask_app_submodules
def test_should_respect_base_url_and_proxy_when_proxy_fix_and_base_url_is_set_up(self):
app = application.cached_app(testing=True)
app.url_map.add(Rule("/debug", endpoint="debug"))
def debug_view():
from flask import request
# Should respect HTTP_X_FORWARDED_FOR
assert request.remote_addr == '192.168.0.1'
# Should respect HTTP_X_FORWARDED_PROTO, HTTP_X_FORWARDED_HOST, HTTP_X_FORWARDED_PORT,
# HTTP_X_FORWARDED_PREFIX and use base_url
assert request.url == "https://valid:445/proxy-prefix/internal-client/debug"
return Response("success")
app.view_functions['debug'] = debug_view
new_environ = {
"PATH_INFO": "/internal-client/debug",
"REMOTE_ADDR": "192.168.0.2",
"HTTP_HOST": "invalid:9000",
"HTTP_X_FORWARDED_FOR": "192.168.0.1",
"HTTP_X_FORWARDED_PROTO": "https",
"HTTP_X_FORWARDED_HOST": "valid",
"HTTP_X_FORWARDED_PORT": "445",
"HTTP_X_FORWARDED_PREFIX": "/proxy-prefix",
}
environ = create_environ(environ_overrides=new_environ)
response = Response.from_app(app, environ)
assert b"success" == response.get_data()
assert response.status_code == 200
@conf_vars(
{
('core', 'sql_alchemy_pool_enabled'): 'True',
('core', 'sql_alchemy_pool_size'): '3',
('core', 'sql_alchemy_max_overflow'): '5',
('core', 'sql_alchemy_pool_recycle'): '120',
('core', 'sql_alchemy_pool_pre_ping'): 'True',
}
)
@dont_initialize_flask_app_submodules
@pytest.mark.backend("mysql", "postgres")
def test_should_set_sqlalchemy_engine_options(self):
app = application.cached_app(testing=True)
engine_params = {'pool_size': 3, 'pool_recycle': 120, 'pool_pre_ping': True, 'max_overflow': 5}
if app.config['SQLALCHEMY_DATABASE_URI'].startswith('mysql'):
engine_params['isolation_level'] = 'READ COMMITTED'
assert app.config['SQLALCHEMY_ENGINE_OPTIONS'] == engine_params
@conf_vars(
{
('webserver', 'session_lifetime_minutes'): '3600',
}
)
@dont_initialize_flask_app_submodules
def test_should_set_permanent_session_timeout(self):
app = application.cached_app(testing=True)
assert app.config['PERMANENT_SESSION_LIFETIME'] == timedelta(minutes=3600)
@conf_vars({('webserver', 'cookie_samesite'): ''})
@dont_initialize_flask_app_submodules
def test_correct_default_is_set_for_cookie_samesite(self):
app = application.cached_app(testing=True)
assert app.config['SESSION_COOKIE_SAMESITE'] == 'Lax'
class TestFlaskCli:
@dont_initialize_flask_app_submodules(skip_all_except=['init_appbuilder'])
def test_flask_cli_should_display_routes(self, capsys):
with mock.patch.dict("os.environ", FLASK_APP="airflow.www.app:cached_app"), mock.patch.object(
sys, 'argv', ['flask', 'routes']
), pytest.raises(SystemExit):
runpy.run_module('flask', run_name='__main__')
output = capsys.readouterr()
assert "/login/" in output.out
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import logging
import os
import os.path
import fnmatch
import sys
import traceback
import subprocess
LOG = logging.getLogger(__name__)
def _deprecation_check(arg0):
"""HUE-71. Deprecate build/env/bin/desktop"""
if os.path.basename(arg0) == 'desktop':
to_use = os.path.join(os.path.dirname(arg0), 'hue')
msg = "Warning: '%s' has been deprecated. Please use '%s' instead." % (arg0, to_use)
print(msg, file=sys.stderr)
LOG.warning(msg)
def reload_with_cm_env(cm_managed):
try:
from django.db.backends.oracle.base import Oracle_datetime
except:
if 'LD_LIBRARY_PATH' in os.environ:
print("We need to reload the process to include LD_LIBRARY_PATH for Oracle backend")
try:
if cm_managed:
sys.argv.append("--cm-managed")
sys.argv.append("--skip-reload")
os.execv(sys.argv[0], sys.argv)
except Exception as exc:
print('Failed re-exec: %s' % exc)
sys.exit(1)
def entry():
_deprecation_check(sys.argv[0])
from django.core.exceptions import ImproperlyConfigured
from django.core.management import execute_from_command_line, find_commands
from django.core.management import CommandParser
from django.core.management.base import BaseCommand
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'desktop.settings')
cm_config_file = '/etc/cloudera-scm-agent/config.ini'
ld_path_orig = None
if "LD_LIBRARY_PATH" in list(os.environ.keys()):
ld_path_orig = os.environ["LD_LIBRARY_PATH"]
# What's the subcommand being run?
# This code uses the same logic from django.core.management to handle command args
subcommand = None
if "--skip-reload" in sys.argv:
skip_reload = True
sys.argv.remove("--skip-reload")
else:
skip_reload = False
# Check if --cm-managed flag is set and strip it out
# to prevent from sending to subcommands
if "--cm-managed" in sys.argv:
sys.argv.remove("--cm-managed")
cm_managed = True
else:
cm_managed = False
if len(sys.argv) > 1:
subcommand = sys.argv[1]
if sys.version_info[0] < 3:
args = [None]
else:
args = []
parser = CommandParser(*args, usage="%(prog)s subcommand [options] [args]", add_help=False)
parser.parse_known_args(sys.argv[2:])
if len(sys.argv) > 1:
prof_id = subcommand = sys.argv[1]
#Check if this is a CM managed cluster
if os.path.isfile(cm_config_file) and not cm_managed and not skip_reload:
print("ALERT: This appears to be a CM Managed environment")
print("ALERT: HUE_CONF_DIR must be set when running hue commands in CM Managed environment")
print("ALERT: Please run 'hue <command> --cm-managed'")
else:
prof_id = str(os.getpid())
# CM managed configure env vars
if cm_managed:
if sys.version_info[0] > 2:
from configparser import NoOptionError, RawConfigParser
else:
from ConfigParser import NoOptionError, RawConfigParser
config = RawConfigParser()
config.read(cm_config_file)
try:
cm_agent_run_dir = config.get('General', 'agent_wide_credential_cache_location')
except NoOptionError:
cm_agent_run_dir = '/var/run/cloudera-scm-agent'
pass
#Parse CM supervisor include file for Hue and set env vars
cm_supervisor_dir = cm_agent_run_dir + '/supervisor/include'
cm_process_dir = cm_agent_run_dir + '/process'
hue_env_conf = None
envline = None
cm_hue_string = "HUE_SERVER"
for file in os.listdir(cm_supervisor_dir):
if cm_hue_string in file:
hue_env_conf = file
hue_env_conf = cm_supervisor_dir + "/" + hue_env_conf
if hue_env_conf == None:
process_dirs = fnmatch.filter(os.listdir(cm_process_dir), '*%s*' % cm_hue_string)
process_dirs.sort()
hue_process_dir = cm_process_dir + "/" + process_dirs[-1]
hue_env_conf = fnmatch.filter(os.listdir(hue_process_dir), 'supervisor.conf')[0]
hue_env_conf = hue_process_dir + "/" + hue_env_conf
if not hue_env_conf == None:
if os.path.isfile(hue_env_conf):
hue_env_conf_file = open(hue_env_conf, "r")
for line in hue_env_conf_file:
if "environment" in line:
envline = line
if "directory" in line:
empty, hue_conf_dir = line.split("directory=")
os.environ["HUE_CONF_DIR"] = hue_conf_dir.rstrip()
else:
print("This appears to be a CM managed cluster, but the")
print("supervisor/include file for Hue could not be found")
print("in order to successfully run commands that access")
print("the database you need to set the following env vars:")
print("")
print(" export JAVA_HOME=<java_home>")
print(" export HUE_CONF_DIR=\"%s/`ls -1 %s | grep %s | sort -n | tail -1 `\"" % (cm_processs_dir, cm_process_dir, cm_hue_string))
print(" export HUE_IGNORE_PASSWORD_SCRIPT_ERRORS=1")
print(" export HUE_DATABASE_PASSWORD=<hueDBpassword>")
print("If using Oracle as your database:")
print(" export LD_LIBRARY_PATH=/path/to/instantclient")
print("")
print("If the above does not work, make sure Hue has been started on this server.")
if not envline == None:
empty, environment = envline.split("environment=")
for envvar in environment.split(","):
include_env_vars = ("HADOOP_C", "PARCEL", "SCM_DEFINES", "LD_LIBRARY")
if any(include_env_var in envvar for include_env_var in include_env_vars):
envkey, envval = envvar.split("=")
envval = envval.replace("'", "").rstrip()
os.environ[envkey] = envval
#Set JAVA_HOME
if "JAVA_HOME" not in list(os.environ.keys()):
if os.path.isfile('/usr/lib64/cmf/service/common/cloudera-config.sh'):
locate_java = subprocess.Popen(
['bash', '-c', '. /usr/lib64/cmf/service/common/cloudera-config.sh; locate_java_home'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
elif os.path.isfile('/opt/cloudera/cm-agent/service/common/cloudera-config.sh'):
locate_java = subprocess.Popen(
['bash', '-c', '. /opt/cloudera/cm-agent/service/common/cloudera-config.sh; locate_java_home'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
locate_java = None
JAVA_HOME = "UNKNOWN"
if locate_java is not None:
for line in iter(locate_java.stdout.readline, ''):
if 'JAVA_HOME' in line:
JAVA_HOME = line.rstrip().split('=')[1]
if JAVA_HOME != "UNKNOWN":
os.environ["JAVA_HOME"] = JAVA_HOME
if "JAVA_HOME" not in list(os.environ.keys()):
print("JAVA_HOME must be set and can't be found, please set JAVA_HOME environment variable")
print(" export JAVA_HOME=<java_home>")
sys.exit(1)
#Make sure we set Oracle Client if configured
if "LD_LIBRARY_PATH" not in list(os.environ.keys()):
if "SCM_DEFINES_SCRIPTS" in list(os.environ.keys()):
for scm_script in os.environ["SCM_DEFINES_SCRIPTS"].split(":"):
if "ORACLE" in scm_script:
if os.path.isfile(scm_script):
oracle_source = subprocess.Popen(". %s; env" % scm_script, stdout=subprocess.PIPE, shell=True, executable="/bin/bash")
for line in oracle_source.communicate()[0].splitlines():
if "LD_LIBRARY_PATH" in line:
var, oracle_ld_path = line.split("=")
os.environ["LD_LIBRARY_PATH"] = oracle_ld_path
if "LD_LIBRARY_PATH" not in list(os.environ.keys()):
print("LD_LIBRARY_PATH can't be found, if you are using ORACLE for your Hue database")
print("then it must be set, if not, you can ignore")
print(" export LD_LIBRARY_PATH=/path/to/instantclient")
if "LD_LIBRARY_PATH" in list(os.environ.keys()):
if ld_path_orig is not None and ld_path_orig == os.environ["LD_LIBRARY_PATH"]:
skip_reload = True
if not skip_reload:
reload_with_cm_env(cm_managed)
try:
# Let django handle the normal execution
if os.getenv("DESKTOP_PROFILE"):
_profile(prof_id, lambda: execute_from_command_line(sys.argv))
else:
execute_from_command_line(sys.argv)
except ImproperlyConfigured as e:
if len(sys.argv) > 1 and sys.argv[1] == 'is_db_alive' and 'oracle' in str(e).lower():
print(e, file=sys.stderr) # Oracle connector is improperly configured
sys.exit(10)
else:
raise e
except subprocess.CalledProcessError as e:
if "altscript.sh" in str(e).lower():
print("%s" % e)
print("HUE_CONF_DIR seems to be set to CM location and '--cm-managed' flag not used")
def _profile(prof_id, func):
"""
Wrap a call with a profiler
"""
# Note that some distro don't come with pstats
import pstats
try:
import cProfile as profile
except ImportError:
import profile
PROF_DAT = '/tmp/desktop-profile-%s.dat' % (prof_id,)
prof = profile.Profile()
try:
prof.runcall(func)
finally:
if os.path.exists(PROF_DAT):
os.remove(PROF_DAT)
prof.dump_stats(PROF_DAT)
# Sort the calls by time spent and show top 50
pstats.Stats(PROF_DAT).sort_stats('time').print_stats(50)
print("Complete profile data in %s" % (PROF_DAT,), file=sys.stderr)
|
|
#! usr/bin/python3
# -*- coding: utf8 -*-
#
# Flicket - copyright Paul Bourne: evereux@gmail.com
import datetime
from flask import flash
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from flask_babel import gettext
from flask_login import current_user
from flask_login import login_required
from flask_principal import identity_loaded
from flask_principal import Permission
from flask_principal import Principal
from flask_principal import RoleNeed
from flask_principal import UserNeed
from application import app, db
from application.flicket.models.flicket_user import FlicketUser
from application.flicket.models.flicket_user import FlicketGroup
from application.flicket.scripts.hash_password import hash_password
from application.flicket_admin.forms.forms_admin import AddGroupForm
from application.flicket_admin.forms.forms_admin import AddUserForm
from application.flicket_admin.forms.forms_admin import EnterPasswordForm
from application.flicket_admin.forms.forms_admin import EditUserForm
from . import admin_bp
principals = Principal(app)
# define flicket_admin role need
admin_only = RoleNeed('flicket_admin')
admin_permission = Permission(admin_only)
def create_user(username, password, email=None, name=None, job_title=None, locale=None, disabled=None):
password = hash_password(password)
register = FlicketUser(username=username,
email=email,
name=name,
password=password,
job_title=job_title,
date_added=datetime.datetime.now(),
locale=locale,
disabled=disabled)
db.session.add(register)
db.session.commit()
# add permissions
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
# set the identity user object
identity.user = current_user
# Add the UserNeed to the identity
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
# Assuming the User model has a list of groups, update the
# identity with the groups that the user provides
if hasattr(current_user, 'flicket_groups'):
the_user = FlicketUser.query.filter_by(id=current_user.id).first()
for g in the_user.flicket_groups:
identity.provides.add(RoleNeed('{}'.format(g.group_name)))
@admin_bp.route(app.config['ADMINHOME'])
@login_required
@admin_permission.require(http_exception=403)
def index():
# noinspection PyUnresolvedReferences
return render_template('admin.html', title='Admin')
# shows all users
@admin_bp.route(app.config['ADMINHOME'] + 'users/', methods=['GET', 'POST'])
@admin_bp.route(app.config['ADMINHOME'] + 'users/<int:page>', methods=['GET', 'POST'])
@login_required
@admin_permission.require(http_exception=403)
def users(page=1):
users = FlicketUser.query.order_by(FlicketUser.username)
users = users.paginate(page, app.config['posts_per_page'])
# noinspection PyUnresolvedReferences
return render_template('admin_users.html', title='Users', users=users)
# add user
@admin_bp.route(app.config['ADMINHOME'] + 'add_user/', methods=['GET', 'POST'])
@login_required
@admin_permission.require(http_exception=403)
def add_user():
form = AddUserForm()
if form.validate_on_submit():
create_user(form.username.data,
form.password.data,
email=form.email.data,
name=form.name.data,
job_title=form.job_title.data,
locale=form.locale.data,
disabled=form.disabled.data)
flash(gettext('You have successfully registered new user "{}".'.format(form.username.data)), category='success')
return redirect(url_for('admin_bp.users'))
# noinspection PyUnresolvedReferences
return render_template('admin_user.html', title='Add User', form=form)
# edit user
@admin_bp.route(app.config['ADMINHOME'] + 'edit_user/', methods=['GET', 'POST'])
@login_required
@admin_permission.require(http_exception=403)
def edit_user():
_id = request.args.get('id')
user = FlicketUser.query.filter_by(id=_id).first()
if user:
form = EditUserForm()
if form.validate_on_submit():
# check the username is unique
if user.username != form.username.data:
query = FlicketUser.query.filter_by(username=form.username.data)
if query.count() > 0:
flash(gettext('Username already exists'), category='warning')
else:
# change the username.
user.username = form.username.data
# Don't change the password if nothing was entered.
if form.password.data != '':
user.password = hash_password(form.password.data)
user.email = form.email.data
user.name = form.name.data
user.job_title = form.job_title.data
user.disabled = form.disabled.data
groups = form.groups.data
# bit hacky but until i get better at this.
# at least it keeps the groups table clean. :/
# delete all groups associated with current user.
user.flicket_groups = [] # this is beautifully simple though
# add the user to selected groups
for g in groups:
group_id = FlicketGroup.query.filter_by(id=g).first()
group_id.users.append(user)
db.session.commit()
flash(gettext("User {} edited.".format(user.username)), category='success')
return redirect(url_for('admin_bp.edit_user', id=_id))
# populate form with form data retrieved from database.
form.user_id.data = user.id
form.username.data = user.username
form.email.data = user.email
form.name.data = user.name
form.job_title.data = user.job_title
form.disabled.data = user.disabled
# define list of preselect groups.
groups = []
for g in user.flicket_groups:
groups.append(g.id)
form.groups.data = groups
else:
flash(gettext("Could not find user."), category='warning')
return redirect(url_for('admin_bp.index'))
# noinspection PyUnresolvedReferences
return render_template('admin_user.html',
title='Edit User',
admin_edit=True,
form=form, user=user)
# Delete user
@admin_bp.route(app.config['ADMINHOME'] + 'delete_user/', methods=['GET', 'POST'])
@login_required
@admin_permission.require(http_exception=403)
def delete_user():
form = EnterPasswordForm()
id = request.args.get('id')
user_details = FlicketUser.query.filter_by(id=id).first()
# we won't ever delete the flicket_admin user (id = 1)
if id == '1':
flash(gettext('Can\'t delete default flicket_admin user.'), category='warning')
return redirect(url_for('admin_bp.index'))
if form.validate_on_submit():
# delete the user.
flash(gettext('Deleted user {}s'.format(user_details.username)), category='success')
db.session.delete(user_details)
db.session.commit()
return redirect(url_for('admin_bp.users'))
# populate form with logged in user details
form.id.data = g.user.id
# noinspection PyUnresolvedReferences
return render_template('admin_delete_user.html', title='Delete user',
user_details=user_details, form=form)
# Add new groups
@admin_bp.route(app.config['ADMINHOME'] + 'groups/', methods=['GET', 'POST'])
@login_required
@admin_permission.require(http_exception=403)
def groups():
form = AddGroupForm()
groups = FlicketGroup.query.all()
if form.validate_on_submit():
add_group = FlicketGroup(
group_name=form.group_name.data
)
db.session.add(add_group)
db.session.commit()
flash(gettext('New group "{}" added.'.format(form.group_name.data)), category='success')
return redirect(url_for('admin_bp.groups'))
# noinspection PyUnresolvedReferences
return render_template('admin_groups.html', title='Groups', form=form, groups=groups)
# Edit groups
@admin_bp.route(app.config['ADMINHOME'] + 'edit_group/', methods=['GET', 'POST'])
@login_required
@admin_permission.require(http_exception=403)
def admin_edit_group():
form = AddGroupForm()
id = request.args.get('id')
group = FlicketGroup.query.filter_by(id=id).first()
# if group can't be found in database.
if not group:
flash(gettext('Could not find group {}'.format(group.group_name)), category='warning')
return redirect(url_for('admin_bp.index'))
# prevent editing of flicket_admin group name as this is hard coded into flicket_admin view permissions.
if group.group_name == app.config['ADMIN_GROUP_NAME']:
flash(gettext('Can\'t edit group {}s.'.format(app.config["ADMIN_GROUP_NAME"])), category='warning')
return redirect(url_for('admin_bp.index'))
if form.validate_on_submit():
group.group_name = form.group_name.data
db.session.commit()
flash(gettext('Group name changed to {}.'.format(group.group_name)), category='success')
return redirect(url_for('admin_bp.groups'))
form.group_name.data = group.group_name
# noinspection PyUnresolvedReferences
return render_template('admin_edit_group.html', title='Edit Group', form=form)
# Delete group
@admin_bp.route(app.config['ADMINHOME'] + 'delete_group/', methods=['GET', 'POST'])
@login_required
@admin_permission.require(http_exception=403)
def admin_delete_group():
form = EnterPasswordForm()
id = request.args.get('id')
group_details = FlicketGroup.query.filter_by(id=id).first()
# we won't ever delete the flicket_admin group (id = 1)
if id == '1':
flash(gettext('Can\'t delete default flicket_admin group.'), category='warning')
return redirect(url_for('admin_bp.index'))
if form.validate_on_submit():
# delete the group.
flash(gettext('Deleted group {}s'.format(group_details.group_name)), category="info")
db.session.delete(group_details)
db.session.commit()
return redirect(url_for('admin_bp.groups'))
# populate form with logged in user details
form.id.data = g.user.id
title = gettext('Delete Group')
# noinspection PyUnresolvedReferences
return render_template('admin_delete_group.html', title=title,
group_details=group_details, form=form)
|
|
"""
Set up the demo environment that mimics interaction with devices.
For more details about this component, please refer to the documentation
https://home-assistant.io/components/demo/
"""
import asyncio
import time
from homeassistant import bootstrap
import homeassistant.core as ha
from homeassistant.const import ATTR_ENTITY_ID, CONF_PLATFORM
DEPENDENCIES = ['conversation', 'introduction', 'zone']
DOMAIN = 'demo'
COMPONENTS_WITH_DEMO_PLATFORM = [
'alarm_control_panel',
'binary_sensor',
'calendar',
'camera',
'climate',
'cover',
'device_tracker',
'fan',
'image_processing',
'light',
'lock',
'media_player',
'notify',
'sensor',
'switch',
'tts',
'mailbox',
]
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the demo environment."""
group = hass.components.group
configurator = hass.components.configurator
persistent_notification = hass.components.persistent_notification
config.setdefault(ha.DOMAIN, {})
config.setdefault(DOMAIN, {})
if config[DOMAIN].get('hide_demo_state') != 1:
hass.states.async_set('a.Demo_Mode', 'Enabled')
# Setup sun
if not hass.config.latitude:
hass.config.latitude = 32.87336
if not hass.config.longitude:
hass.config.longitude = 117.22743
tasks = [
bootstrap.async_setup_component(hass, 'sun')
]
# Set up demo platforms
demo_config = config.copy()
for component in COMPONENTS_WITH_DEMO_PLATFORM:
demo_config[component] = {CONF_PLATFORM: 'demo'}
tasks.append(
bootstrap.async_setup_component(hass, component, demo_config))
# Set up input select
tasks.append(bootstrap.async_setup_component(
hass, 'input_select',
{'input_select':
{'living_room_preset': {'options': ['Visitors',
'Visitors with kids',
'Home Alone']},
'who_cooks': {'icon': 'mdi:panda',
'initial': 'Anne Therese',
'name': 'Cook today',
'options': ['Paulus', 'Anne Therese']}}}))
# Set up input boolean
tasks.append(bootstrap.async_setup_component(
hass, 'input_boolean',
{'input_boolean': {'notify': {
'icon': 'mdi:car',
'initial': False,
'name': 'Notify Anne Therese is home'}}}))
# Set up input boolean
tasks.append(bootstrap.async_setup_component(
hass, 'input_number',
{'input_number': {
'noise_allowance': {'icon': 'mdi:bell-ring',
'min': 0,
'max': 10,
'name': 'Allowed Noise',
'unit_of_measurement': 'dB'}}}))
# Set up weblink
tasks.append(bootstrap.async_setup_component(
hass, 'weblink',
{'weblink': {'entities': [{'name': 'Router',
'url': 'http://192.168.1.1'}]}}))
results = yield from asyncio.gather(*tasks, loop=hass.loop)
if any(not result for result in results):
return False
# Set up example persistent notification
persistent_notification.async_create(
'This is an example of a persistent notification.',
title='Example Notification')
# Set up room groups
lights = sorted(hass.states.async_entity_ids('light'))
switches = sorted(hass.states.async_entity_ids('switch'))
media_players = sorted(hass.states.async_entity_ids('media_player'))
tasks2 = []
# Set up history graph
tasks2.append(bootstrap.async_setup_component(
hass, 'history_graph',
{'history_graph': {'switches': {
'name': 'Recent Switches',
'entities': switches,
'hours_to_show': 1,
'refresh': 60
}}}
))
# Set up scripts
tasks2.append(bootstrap.async_setup_component(
hass, 'script',
{'script': {
'demo': {
'alias': 'Toggle {}'.format(lights[0].split('.')[1]),
'sequence': [{
'service': 'light.turn_off',
'data': {ATTR_ENTITY_ID: lights[0]}
}, {
'delay': {'seconds': 5}
}, {
'service': 'light.turn_on',
'data': {ATTR_ENTITY_ID: lights[0]}
}, {
'delay': {'seconds': 5}
}, {
'service': 'light.turn_off',
'data': {ATTR_ENTITY_ID: lights[0]}
}]
}}}))
# Set up scenes
tasks2.append(bootstrap.async_setup_component(
hass, 'scene',
{'scene': [
{'name': 'Romantic lights',
'entities': {
lights[0]: True,
lights[1]: {'state': 'on', 'xy_color': [0.33, 0.66],
'brightness': 200},
}},
{'name': 'Switch on and off',
'entities': {
switches[0]: True,
switches[1]: False,
}},
]}))
tasks2.append(group.Group.async_create_group(hass, 'Living Room', [
lights[1], switches[0], 'input_select.living_room_preset',
'cover.living_room_window', media_players[1],
'scene.romantic_lights']))
tasks2.append(group.Group.async_create_group(hass, 'Bedroom', [
lights[0], switches[1], media_players[0],
'input_number.noise_allowance']))
tasks2.append(group.Group.async_create_group(hass, 'Kitchen', [
lights[2], 'cover.kitchen_window', 'lock.kitchen_door']))
tasks2.append(group.Group.async_create_group(hass, 'Doors', [
'lock.front_door', 'lock.kitchen_door',
'garage_door.right_garage_door', 'garage_door.left_garage_door']))
tasks2.append(group.Group.async_create_group(hass, 'Automations', [
'input_select.who_cooks', 'input_boolean.notify', ]))
tasks2.append(group.Group.async_create_group(hass, 'People', [
'device_tracker.demo_anne_therese', 'device_tracker.demo_home_boy',
'device_tracker.demo_paulus']))
tasks2.append(group.Group.async_create_group(hass, 'Downstairs', [
'group.living_room', 'group.kitchen',
'scene.romantic_lights', 'cover.kitchen_window',
'cover.living_room_window', 'group.doors',
'climate.ecobee',
], view=True))
results = yield from asyncio.gather(*tasks2, loop=hass.loop)
if any(not result for result in results):
return False
# Set up configurator
configurator_ids = []
def hue_configuration_callback(data):
"""Fake callback, mark config as done."""
time.sleep(2)
# First time it is called, pretend it failed.
if len(configurator_ids) == 1:
configurator.notify_errors(
configurator_ids[0],
"Failed to register, please try again.")
configurator_ids.append(0)
else:
configurator.request_done(configurator_ids[0])
def setup_configurator():
"""Set up a configurator."""
request_id = configurator.request_config(
"Philips Hue", hue_configuration_callback,
description=("Press the button on the bridge to register Philips "
"Hue with Home Assistant."),
description_image="/static/images/config_philips_hue.jpg",
fields=[{'id': 'username', 'name': 'Username'}],
submit_caption="I have pressed the button"
)
configurator_ids.append(request_id)
hass.async_add_job(setup_configurator)
return True
|
|
#!/usr/bin/env python
import subprocess
import datetime
import praw
import pyperclip
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit me!
challengePageSubmissionId = 'egfht9'
flaskport = 8997
readAllCommentsWhichCanBeSlower = False
sorryTooLateToSignUpReplyText = "Sorry, but the late signup grace period is over, so you can't officially join this challenge. But feel free to follow along anyway, and comment all you want."
reinstatedReplyText = "OK, I've reinstated you. You should start showing up on the list again starting tomorrow."
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
submission = None
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
# submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
submission = redditSession.submission(id=challengePageSubmissionId)
if readAllCommentsWhichCanBeSlower:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submission
def getCommentsForSubmission(submission):
# return [comment for comment in praw.helpers.flatten_tree(submission.comments) if comment.__class__ == praw.models.Comment]
commentForest = submission.comments
# commentForest.replace_more(limit=None, threshold=0)
return [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatechallenge.html')
def moderatechallenge():
global commentHashesAndComments
global submission
currentDayOfMonthIndex = datetime.date.today().day
currentMonthIndex = datetime.date.today().month
lateCheckinGracePeriodIsInEffect = currentDayOfMonthIndex <= 14 and currentMonthIndex == 1
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submission = getSubmissionForRedditSession(redditSession)
flat_comments = getCommentsForSubmission(submission)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
stringio.write(submission.title)
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplaytoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Copy display.py stdout to clipboard">')
stringio.write('<input type="submit" name="actiontotake" value="Automatically post display.py stdout">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
if participant.isStillIn:
stringio.write(' <small><font color="green">(still in)</font></small>')
else:
stringio.write(' <small><font color="red">(out)</font></small>')
if participant.hasCheckedIn:
stringio.write(' <small><font color="green">(checked in)</font></small>')
else:
stringio.write(' <small><font color="orange">(not checked in)</font></small>')
if participant.hasRelapsed:
stringio.write(' <small><font color="red">(relapsed)</font></small>')
else:
stringio.write(' <small><font color="green">(not relapsed)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
# stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
if lateCheckinGracePeriodIsInEffect:
stringio.write('<input type="submit" name="actiontotake" value="Checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin" style="color:white;background-color:green">')
else:
stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Relapse" style="color:white;background-color:red">')
stringio.write('<input type="submit" name="actiontotake" value="Reinstate with automatic comment">')
stringio.write('<input type="submit" name="actiontotake" value="Reply with sorry-too-late comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="bodyencodedformlcorpus" value="' + b64encode(comment.body.encode('utf-8')) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
bodyEncodedForMLCorpus = str(request.form["bodyencodedformlcorpus"])
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Checkin':
print "checkin - " + username
subprocess.call(['./checkin.py', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusCheckin(bodyEncodedForMLCorpus)
if actionToTake == 'Signup and checkin':
print "signup and checkin - " + username
subprocess.call(['./signup-and-checkin.sh', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusSignupAndCheckin(bodyEncodedForMLCorpus)
elif actionToTake == 'Relapse':
print "relapse - " + username
subprocess.call(['./relapse.py', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusRelapse(bodyEncodedForMLCorpus)
elif actionToTake == 'Reinstate with automatic comment':
print "reinstate - " + username
subprocess.call(['./reinstate.py', username])
comment.reply(reinstatedReplyText)
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusReinstate(bodyEncodedForMLCorpus)
elif actionToTake == 'Reply with sorry-too-late comment':
print "reply with sorry-too-late comment - " + username
comment.reply(sorryTooLateToSignUpReplyText)
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusTooLate(bodyEncodedForMLCorpus)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusSkip(bodyEncodedForMLCorpus)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
recordMLCorpusSkip(bodyEncodedForMLCorpus)
return Response("hello", mimetype='text/html')
@app.route('/copydisplaytoclipboard.html', methods=["POST"])
def copydisplaytoclipboard():
actionToTake = request.form["actiontotake"]
if actionToTake == 'Copy display.py stdout to clipboard':
subprocess.call(['./display.py'])
if actionToTake == 'Automatically post display.py stdout':
subprocess.call(['./display.py'])
submissionText = pyperclip.paste()
submission.edit(submissionText)
return Response("hello", mimetype='text/html')
def recordMLCorpusCheckin(aString):
with open("../new-ml-corpus-year-long-checkin.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusSignupAndCheckin(aString):
with open("../new-ml-corpus-year-long-signup-and-checkin.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusRelapse(aString):
with open("../new-ml-corpus-year-long-relapse.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusReinstate(aString):
with open("../new-ml-corpus-year-long-reinstate.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusTooLate(aString):
with open("../new-ml-corpus-year-long-too-late.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusSkip(aString):
with open("../new-ml-corpus-year-long-skip.txt", "a") as f:
f.write(aString)
f.write("\n")
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
|
import tensorflow as tf
import sys
import time
import os
# Set train_nn to True for training the neural network or False for performing classification.
train_nn = True
if train_nn == False :
# If the following is set to True, training will start if no checkpoint is found in the
# current directry.
continue_training_if_ckpt_not_found = True
# During the classification process, only the testing data will be loaded. Don't change the following variable.
perform_classification = True
# Change it to False if you are doing classification without labels.
perform_classification_with_label = True
if perform_classification_with_label == True :
print 'Process: classification with label.'
else :
print 'Process: classification without label.'
else :
print 'Process: training.'
continue_training_using_previous_model = False
# Don't change the following variable.
perform_classification = False
# Don't change the following variable.
perform_classification_with_label = True
sess = tf.InteractiveSession()
L=200
lx=4 #=int(raw_input('lx'))
V4d=lx*lx*lx*L # 4d volume
Tc = 0.36
# how does the data look like
Ntemp=41 #int(raw_input('Ntemp')) #20 # number of different temperatures used in the simulation
samples_per_T=500 #int(raw_input('samples_per_T')) #250 # number of samples per temperature value
samples_per_T_test=500 # int(raw_input('samples_per_T')) #250 # number of samples per temperature value
numberlabels=2
# Set the following to True for using Juan's input_data.py or False for using Kelvin's data_reader.py.
use_input_data_py = False
if use_input_data_py :
import input_data
mnist = input_data.read_data_sets(numberlabels,lx,L,'txt', one_hot=True)
else :
import data_reader
import numpy as np
U = 9
if perform_classification_with_label == True :
filename = './N%dx%dx%d_L200_U%d_Mu0_T_shuffled' % (lx,lx,lx,U) + '_%.2d.dat'
os.system("ls -l N%dx%dx%d_L200_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L200_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat" %(lx,lx,lx,U,lx,lx,lx,U))
dtau = np.genfromtxt("dtau.dat")
#dtau = dtau[dtau!=Tc]
filenumber = np.arange(1,len(dtau)+1,1)
HSF = data_reader.insert_file_info(filename,filenumber,performing_classification=perform_classification)
mnist = HSF.categorize_data()
#mnist = HSF.categorize_dose_of_data()
#dtau = np.array([0.060, 0.075, 0.090, 0.105, 0.120, 0.135, 0.150, 0.165, \
# 0.180, 0.195, 0.210, 0.225, 0.240, 0.255, 0.270, 0.285, \
# 0.300, 0.315, 0.330, 0.345, 0.510, 0.660, 0.810, \
# 0.960, 1.110, 1.260, 1.410, 1.560, 1.710, 1.860, 2.010, \
# 2.160, 2.310, 2.460, 2.610, 2.760, 2.910, 3.060, 3.210, \
# 3.360])
else :
filename = './N%dx%dx%d_L200_U%d_Mu0_T' % (lx,lx,lx,U) + '%s.HSF.stream'
# Get temperature
os.system("ls -l N%dx%dx%d_L200_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L200_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat" %(lx,lx,lx,U,lx,lx,lx,U))
# Load temperature into a list of string
dtau = np.genfromtxt("dtau.dat",dtype='str')
ndata_per_temp = 1000
classification_data_per_temp = 250
sh = ndata_per_temp - classification_data_per_temp
while ( ndata_per_temp - sh - classification_data_per_temp ) < 0 :
print 'Sum of classification data per temperature and the number of lines skip at the beginning of the file must be equal to number of data per temnperature.'
print 'Number of data per temnperature : %d' % ndata_per_temp
print 'Classification data used per temperature : %d' % classification_data_per_temp
print 'Number of lines skip at the beginning of the file: %d' % sh
classification_data_per_temp = input('Input new classification data used per temperature: ')
sh = input('Input number of lines skip at the beginning of the file: ')
HSF = data_reader.insert_file_info(filename,dtau)
mnist = HSF.load_classification_data(nrows=ndata_per_temp, ncols=lx*lx*lx*L, SkipHeader=sh, load_ndata_per_file=classification_data_per_temp)
if train_nn == True or not(perform_classification) :
n_train_data = len(mnist.train.labels)
epochs=5
bsize=50 #=int(raw_input('bsize'))
training=n_train_data/bsize #=int(raw_input('training'))
while np.modf(float(n_train_data)/bsize)[0] > 0.0 :
print 'Warning! Number of data/ batch size must be an integer.'
print 'number of data: %d' % n_train_data
print 'batch size: %d' % bsize
bsize = int(input('Input new batch size: '))
print "reading sets ok"
#sys.exit("pare aqui")
# defining weighs and initlizatinon
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# defining the convolutional and max pool layers
def conv3d(x, W):
return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='VALID')
# defining the model
x = tf.placeholder("float", shape=[None, (lx)*(lx)*(lx)*L]) # placeholder for the spin configurations
#x = tf.placeholder("float", shape=[None, lx*lx*2]) #with padding and no PBC conv net
y_ = tf.placeholder("float", shape=[None, numberlabels])
#first layer
# convolutional layer # 2x2x2 patch size, 2 channel (2 color), 64 feature maps computed
nmaps1=64
spatial_filter_size=2
W_conv1 = weight_variable([spatial_filter_size, spatial_filter_size, spatial_filter_size,L,nmaps1])
# bias for each of the feature maps
b_conv1 = bias_variable([nmaps1])
# applying a reshape of the data to get the two dimensional structure back
#x_image = tf.reshape(x, [-1,lx,lx,2]) # #with padding and no PBC conv net
x_image = tf.reshape(x, [-1,lx,lx,lx,L]) # with PBC
#We then convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool.
h_conv1 = tf.nn.relu(conv3d(x_image, W_conv1) + b_conv1)
h_pool1=h_conv1
#In order to build a deep network, we stack several layers of this type. The second layer will have 8 features for each 5x5 patch.
# weights and bias of the fully connected (fc) layer. Ihn this case everything looks one dimensiona because it is fully connected
nmaps2=64
#W_fc1 = weight_variable([(lx/2) * (lx/2) * nmaps1,nmaps2 ]) # with maxpool
W_fc1 = weight_variable([(lx-1) * (lx-1)*(lx-1)*nmaps1,nmaps2 ]) # no maxpool images remain the same size after conv
b_fc1 = bias_variable([nmaps2])
# first we reshape the outcome h_pool2 to a vector
#h_pool1_flat = tf.reshape(h_pool1, [-1, (lx/2)*(lx/2)*nmaps1]) # with maxpool
h_pool1_flat = tf.reshape(h_pool1, [-1, (lx-1)*(lx-1)*(lx-1)*nmaps1]) # no maxpool
# then apply the ReLU with the fully connected weights and biases.
h_fc1 = tf.nn.relu(tf.matmul(h_pool1_flat, W_fc1) + b_fc1)
# Dropout: To reduce overfitting, we will apply dropout before the readout layer. We create a placeholder for the probability that a neuron's output is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. TensorFlow's tf.nn.dropout op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling.
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# readout layer. Finally, we add a softmax layer, just like for the one layer softmax regression above.
# weights and bias
W_fc2 = weight_variable([nmaps2, numberlabels])
b_fc2 = bias_variable([numberlabels])
# apply a softmax layer
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
#Train and Evaluate the Model
# cost function to minimize
if use_input_data_py :
#cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y_conv,1e-10,1.0)))
else :
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#sess = tf.Session()
sess.run(tf.initialize_all_variables())
filename_weight_bias = "./model.ckpt"
# Check to see if the checkpoint is located in the current file directory before restoring.
if train_nn == False :
if os.path.isfile(filename_weight_bias) == False and continue_training_if_ckpt_not_found :
print '%s is not found in the current directory, starting training...' % filename_weight_bias
train_nn = True
continue_training_using_previous_model = False
else :
while not(os.path.isfile(filename_weight_bias)) :
print '%s is not found in the current directory.' %filename_weight_bias
filename_weight_bias = raw_input('Input checkpoint model filename: ')
filename_weight_bias = './' + filename_weight_bias
train_nn = False
start_time = time.time()
if train_nn :
if continue_training_using_previous_model :
skip = 'n'
file_exist = os.path.isfile(filename_weight_bias)
while (not(file_exist) and skip == 'n') :
print '%s is not found in the current directory, starting training...' % filename_weight_bias
skip = raw_input('Select y to continue training from scratch or n to continue training using existing model: ')
while skip not in ['y','n']:
skip = raw_input('Select y to continue training from scratch or n to continue training using existing model: ')
if skip == 'y' :
file_exist = False
else :
filename_weight_bias = raw_input('Input checkpoint model filename: ')
while not(os.path.isfile(filename_weight_bias)) :
print '%s is not found in the current directory.' %filename_weight_bias
filename_weight_bias = raw_input('Input checkpoint model filename: ')
filename_weight_bias = './' + filename_weight_bias
if os.path.isfile(filename_weight_bias) :
skip = 'y'
saver = tf.train.Saver([W_conv1, b_conv1, W_fc1,b_fc1,W_fc2,b_fc2])
save_path = saver.restore(sess, filename_weight_bias)
print 'Total number of training epochs: %g' % epochs
start_time = time.time()
test_accuracy_tmp = 0
filename_measure = "./HSF_measure.dat"
ndata_collect_per_epoch = round(float(n_train_data)/bsize/100)
if ndata_collect_per_epoch > 1 :
ndata_collect = ndata_collect_per_epoch*epochs
else :
ndata_collect = epoch
Table_measure = np.zeros(( ndata_collect, 4))
print np.shape(Table_measure)
n = 0
fractional_epoch = bsize*100/float(n_train_data)
for j in range(epochs):
for i in range(training):
batch = mnist.train.next_batch(bsize)
if i%100 == 0:
train_accuracy = sess.run(accuracy,feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
test_accuracy = sess.run(accuracy, feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
Cost = sess.run(cross_entropy, feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print "%.2fs, epoch %.2f, training accuracy %g, test accuracy %g, cost %g"%(time.time()-start_time,n*fractional_epoch, train_accuracy, test_accuracy, Cost)
Table_measure[n,0] = n*fractional_epoch
Table_measure[n,1] = train_accuracy
Table_measure[n,2] = test_accuracy
Table_measure[n,3] = Cost
# To avoid multiple training, the model is saved when the difference between testing
# accuracy and training accuracy doesn't exceed a set value (it is set to 0.05 here)
# and if the current testing accuracy is higher than the previous.
delta_accuracy = abs(train_accuracy - test_accuracy)
if test_accuracy > test_accuracy_tmp :
test_accuracy_tmp = test_accuracy
if delta_accuracy <= 0.05 :
saver = tf.train.Saver([W_conv1, b_conv1, W_fc1,b_fc1,W_fc2,b_fc2])
save_path = saver.save(sess, filename_weight_bias)
check_model = tf.reduce_mean(W_conv1).eval()
best_epoch = n*fractional_epoch
#print "test accuracy %g"%sess.run(accuracy, feed_dict={
#x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
#print "test Trick accuracy %g"%sess.run(accuracy, feed_dict={
#x: mnist.test_Trick.images, y_: mnist.test_Trick.labels, keep_prob: 1.0})
n += 1
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
# Final check to save the best model.
train_accuracy = sess.run(accuracy,feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
test_accuracy = sess.run(accuracy, feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
Cost = sess.run(cross_entropy, feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
delta_accuracy = abs(train_accuracy - test_accuracy)
if test_accuracy > test_accuracy_tmp :
if delta_accuracy <= 0.05 :
saver = tf.train.Saver([W_conv1, b_conv1, W_fc1,b_fc1,W_fc2,b_fc2])
save_path = saver.save(sess, filename_weight_bias)
check_model = tf.reduce_mean(W_conv1).eval()
best_epoch = epochs
print "%.2fs, epoch %.2f, training accuracy %g, test accuracy %g, cost %g"%(time.time()-start_time,epochs, train_accuracy, test_accuracy, Cost)
print 'Best training epoch: %g'%best_epoch
print "Model saved in file: ", save_path
# To proceed, load the best (saved) model instead of the last training model.
saver.restore(sess, filename_weight_bias)
# Check if the saved model and the restored model are the same.
if check_model != tf.reduce_mean(W_conv1).eval() :
print 'Warning! Best training model and the restored model is incompatible. Exiting...'
sys.exit()
# Save the measurements:
# first column : Training epochs
# second column: Training accuracy
# third column : Testing accuracy
# fourth column: Cost
np.savetxt(filename_measure, Table_measure)
else :
saver = tf.train.Saver([W_conv1, b_conv1, W_fc1,b_fc1,W_fc2,b_fc2])
# To proceed, load the best (saved) model instead of the last training model.
saver.restore(sess, filename_weight_bias)
print 'Performing classification...'
if use_input_data_py :
#producing data to get the plots we like
f = open('nnout.dat', 'w')
#output of neural net
ii=0
for i in range(Ntemp):
av=0.0
for j in range(samples_per_T_test):
batch=(mnist.test.images[ii,:].reshape(1,lx*lx*lx*L),mnist.test.labels[ii,:].reshape((1,numberlabels)))
res=sess.run(y_conv,feed_dict={x: batch[0], y_: batch[1],keep_prob: 1.0})
av=av+res
#print ii, res
ii=ii+1
av=av/samples_per_T_test
f.write(str(i)+' '+str(av[0,0])+' '+str(av[0,1])+"\n")
f.close()
f = open('acc.dat', 'w')
# accuracy vs temperature
for ii in range(Ntemp):
batch=(mnist.test.images[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape(samples_per_T_test,L*lx*lx*lx), mnist.test.labels[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape((samples_per_T_test,numberlabels)) )
train_accuracy = sess.run(accuracy,feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
f.write(str(ii)+' '+str(train_accuracy)+"\n")
f.close()
else :
if perform_classification_with_label == True :
# Both the output of neural network and accuracy will be saved in one single file. The first
# column has the temperature, the second holds the output of the second output neuron,
# the third holds the output of the first neuron, the fourth holds the accuracy, and the last
# holds the number of test data used for each temperature.
Table = np.zeros(( len(dtau), 5))
Table[:,0] = dtau
for i in range(len(mnist.test.temps)) :
# Output of neural net vs temperature
Table[mnist.test.temps[i],1] += np.argmax(sess.run(y_conv, feed_dict={x: mnist.test.images[i,:].reshape(1,V4d), keep_prob: 1.0}))
# Accuracy vs temperature
Table[mnist.test.temps[i],3] += sess.run(accuracy, feed_dict={x: mnist.test.images[i,:].reshape(1,V4d), y_: mnist.test.labels[i,:].reshape(1,numberlabels), keep_prob: 1.0})
Table[mnist.test.temps[i],-1] += 1
Table[:,1] = Table[:,1]/Table[:,-1].astype('float')
Table[:,2] = 1.0-Table[:,1]
Table[:,3] = Table[:,3]/Table[:,-1].astype('float')
filename_result = "./result.dat"
np.savetxt(filename_result, Table)
print "Result saved in file: ", filename_result
else :
Table = np.zeros(( len(dtau), 4))
Table[:,0] = dtau
Table[:,-1] = classification_data_per_temp
for j in range(len(dtau)) :
for i in range(classification_data_per_temp) :
# Output of neural net vs temperature
Table[j,1] += np.argmax(sess.run(y_conv, feed_dict={x: mnist.classification.images[i,:].reshape(1,V4d), keep_prob: 1.0}))
Table[:,1] = Table[:,1]/Table[:,-1].astype('float')
Table[:,2] = 1.0-Table[:,1]
filename_result = "./classified.dat"
np.savetxt(filename_result, Table)
print "Classified result saved in file: ", filename_result
#producing data to get the plots we like
#f = open('nnoutTrick.dat', 'w')
#output of neural net
#ii=0
#for i in range(Ntemp):
# av=0.0
# for j in range(samples_per_T_test):
# batch=(mnist.test_Trick.images[ii,:].reshape((1,2*lx*lx)),mnist.test_Trick.labels[ii,:].reshape((1,numberlabels)))
# res=sess.run(y_conv,feed_dict={x: batch[0], y_: batch[1],keep_prob: 1.0})
# av=av+res
# #print ii, res
# ii=ii+1
# av=av/samples_per_T_test
# f.write(str(i)+' '+str(av[0,0])+' '+str(av[0,1])+"\n")
#f.close()
#f = open('accTrick.dat', 'w')
# accuracy vs temperature
#for ii in range(Ntemp):
# batch=(mnist.test_Trick.images[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape(samples_per_T_test,2*lx*lx), mnist.test_Trick.labels[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape((samples_per_T_test,numberlabels)) )
# train_accuracy = sess.run(accuracy,feed_dict={
# x:batch[0], y_: batch[1], keep_prob: 1.0})
# f.write(str(ii)+' '+str(train_accuracy)+"\n")
#f.close()
|
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2013,2014,2015,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Helper classes for network testing """
from functools import total_ordering
import os
from csv import DictReader
import six.moves.cPickle as pickle # pylint: disable=F0401
from six import itervalues, text_type
from ipaddress import (IPv4Network, IPv4Address, IPv6Network, IPv6Address,
ip_network, ip_address)
# Ranges for dynamic network allocation. The idea is to allocate a /N network
# inside 10.N.0.0/16.
SUBNET_RANGE = {
24: IPv4Network(u'10.24.0.0/16'),
25: IPv4Network(u'10.25.0.0/16'),
26: IPv4Network(u'10.26.0.0/16'),
27: IPv4Network(u'10.27.0.0/16'),
28: IPv4Network(u'10.28.0.0/16')}
@total_ordering
class DummyIP(object):
"""
Wrapper around an IP address
This class should work like IPv[46]Address, but it allows attaching some
convenience methods like MAC address generation.
"""
def __init__(self, *args, **kwargs):
self._ip = ip_address(*args, **kwargs)
if isinstance(self._ip, IPv4Address):
octets = [int(i) for i in str(self._ip).split('.')]
self.mac = "02:02:%02x:%02x:%02x:%02x" % tuple(octets)
else:
# FIXME
self.mac = None
def __str__(self):
return str(self._ip)
def __repr__(self):
return repr(self._ip)
def __eq__(self, other):
if isinstance(other, type(self)):
return self._ip == other._ip
else:
return self._ip == other
def __lt__(self, other):
if isinstance(other, type(self)):
return self._ip < other._ip
else:
return self._ip < other
def __int__(self):
return int(self._ip)
def __hash__(self):
return hash(self._ip)
def __add__(self, other):
return DummyIP(self._ip + other)
def __sub__(self, other):
return DummyIP(self._ip - other)
def __getattr__(self, name):
# Proxy lookups to the wrapped network object
if "_ip" not in self.__dict__:
# Happens while unpickling
raise AttributeError
return getattr(self._ip, name)
class IPGenerator(object):
"""
Helper for indexing into the usable IP range of a network
"""
def __init__(self, network, offset):
self.network = network
self.offset = offset
def __getitem__(self, index):
if index < 0:
# Skip the broadcast address
ip = DummyIP(self.network[index - 1])
if ip < self.network[self.offset]:
raise IndexError("Index too small")
else:
ip = DummyIP(self.network[index + self.offset])
if ip >= self.network.broadcast_address:
raise IndexError("Index too big")
return ip
@total_ordering
class NetworkInfo(object):
"""
Wrapper around a network
This class should work like IPv[46]Network, but it allows attaching
Aquilon-related metadata, and a few convenience methods.
"""
def __init__(self, name, cidr, nettype, loc_type, loc_name, side="a",
autocreate=False, comments=None):
if isinstance(cidr, (IPv4Network, IPv6Network)):
self._network = cidr
else:
self._network = ip_network(text_type(cidr))
self.name = name
self.nettype = nettype
self.reserved = list()
self.loc_type = loc_type
self.loc_name = loc_name
self.side = side
self.comments = comments
if isinstance(autocreate, bool):
self.autocreate = autocreate
elif autocreate == "True":
self.autocreate = True
elif autocreate == "False":
self.autocreate = False
else:
raise ValueError("Invalid value for autocreate: %r" % autocreate)
if nettype == 'tor_net':
offsets = [6, 7]
elif nettype == 'tor_net2':
offsets = [7, 8]
elif nettype == 'vm_storage_net':
offsets = [8]
else:
offsets = []
for offset in offsets:
self.reserved.append(DummyIP(self[offset]))
first_usable = max(offsets or [4]) + 1
self.usable = IPGenerator(self, first_usable)
def __getattr__(self, name):
# Proxy lookups to the wrapped network object
if "_network" not in self.__dict__:
# Happens while unpickling
raise AttributeError
return getattr(self._network, name)
def __getitem__(self, idx):
# Cast the result to DummyIP, so the .mac property can be used
return DummyIP(self._network[idx])
def __str__(self):
return str(self._network)
def __repr__(self):
return repr(self._network)
def __contains__(self, other):
# Using a network on the left hand side of "in" works with ipaddr, but
# will return the wrong answer with ipaddress.
assert isinstance(other, (IPv4Address, IPv6Address, DummyIP))
return other in self._network
def __eq__(self, other):
if isinstance(other, type(self)):
return self._network == other._network
else:
return self._network == other
def __lt__(self, other):
if isinstance(other, type(self)):
return self._network < other._network
else:
return self._network < other
def __hash__(self):
return hash(self._network)
@property
def gateway(self):
return self[1]
@property
def ip(self):
return DummyIP(self._network.network_address)
def subnet(self, new_prefix=None):
return [NetworkInfo(str(net.network_address), net, self.nettype,
self.loc_type, self.loc_name, self.side)
for net in self._network.subnets(new_prefix=new_prefix)]
def subnets(self, new_prefix=None):
for net in self._network.subnets(new_prefix=new_prefix):
yield NetworkInfo(str(net.network_address), net, self.nettype,
self.loc_type, self.loc_name, self.side)
@property
def is_ipv4(self):
return isinstance(self._network, IPv4Network)
@property
def is_ipv6(self):
return isinstance(self._network, IPv6Network)
class DummyNetworks(object):
# Borg
__shared_state = {}
def __init__(self, config, *args, **kwargs):
self.__dict__ = self.__shared_state
if getattr(self, "unknown", None):
return
object.__init__(self, *args, **kwargs)
self.statedir = os.path.join(config.get("unittest", "scratchdir"),
"networks")
self.networks = {}
dir = config.get("unittest", "datadir")
filename = os.path.join(dir, "networks.csv")
with open(filename, "r") as datafile:
# Filter out comments
lines = [line for line in datafile if not line.startswith('#')]
reader = DictReader(lines)
for row in reader:
n = NetworkInfo(row["name"], row["cidr"], row["type"],
row["loc_type"], row["loc_name"],
side=row["side"], autocreate=row["autocreate"],
comments=row["comments"])
# Sanity checks
if row["name"] in self.networks:
raise KeyError("Duplicate name '%s' in %s" % (row["name"],
filename))
for existing in itervalues(self.networks):
if n.overlaps(existing):
raise ValueError("Overlapping networks %s and %s in %s"
% (existing, n, filename))
for dynrange in itervalues(SUBNET_RANGE):
if n.overlaps(dynrange):
raise ValueError("Range %s is reserved for dynamic "
"allocation" % dynrange)
self.networks[row["name"]] = n
# Load dynamic networks
if os.path.exists(self.statedir):
for name in os.listdir(self.statedir):
with open(os.path.join(self.statedir, name), "rb") as f:
net = pickle.load(f)
self.networks[net.name] = net
else:
os.makedirs(self.statedir)
def __getitem__(self, name):
return self.networks[name]
def __iter__(self):
for net in itervalues(self.networks):
yield net
def allocate_network(self, testsuite, name, prefixlength, network_type,
loc_type, loc_name, side='a', comments=None):
if prefixlength not in SUBNET_RANGE:
raise ValueError("There's no address range defined for /%d networks"
% prefixlength)
if name in self.networks:
raise ValueError("There's already a network named %s" % name)
range = SUBNET_RANGE[prefixlength]
result = None
for net in range.subnets(new_prefix=prefixlength):
statefile = os.path.join(self.statedir, "%s" % net.network_address)
if os.path.exists(statefile):
continue
result = NetworkInfo(name, str(net), network_type, loc_type,
loc_name, side)
break
if not result:
raise ValueError("Could not allocate network of size /%d" %
prefixlength)
command = ["add_network", "--network", name,
"--ip", result.network_address,
"--netmask", result.netmask,
"--" + loc_type, loc_name, "--type", network_type]
if comments:
command.extend(["--comments", comments])
testsuite.noouttest(command)
with open(statefile, "wb") as f:
pickle.dump(result, f)
self.networks[name] = result
return result
def dispose_network(self, testsuite, name):
if name not in self.networks:
raise ValueError("Trying to dispose unknown network %s" % name)
net = self.networks[name]
command = ["del_network", "--ip", net.network_address]
testsuite.noouttest(command)
statefile = os.path.join(self.statedir, "%s" % net.network_address)
os.unlink(statefile)
del self.networks[name]
|
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
# @author: Dave Lapsley, Nicira Networks, Inc.
# @author: Aaron Rosen, Nicira Networks, Inc.
# @author: Seetharama Ayyadevara, Freescale Semiconductor, Inc.
# @author: Kyle Mestery, Cisco Systems, Inc.
import distutils.version as dist_version
import sys
import time
import eventlet
from oslo.config import cfg
from neutron.agent import l2population_rpc
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as logging_config
from neutron.common import constants as q_const
from neutron.common import legacy
from neutron.common import topics
from neutron.common import utils as q_utils
from neutron import context
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common.rpc import common as rpc_common
from neutron.openstack.common.rpc import dispatcher
from neutron.plugins.openvswitch.common import config # noqa
from neutron.plugins.openvswitch.common import constants
LOG = logging.getLogger(__name__)
# A placeholder for dead vlans.
DEAD_VLAN_TAG = str(q_const.MAX_VLAN_TAG + 1)
# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac'
# attributes set).
class LocalVLANMapping:
def __init__(self, vlan, network_type, physical_network, segmentation_id,
vif_ports=None):
if vif_ports is None:
vif_ports = {}
self.vlan = vlan
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
self.vif_ports = vif_ports
# set of tunnel ports on which packets should be flooded
self.tun_ofports = set()
def __str__(self):
return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
(self.vlan, self.network_type, self.physical_network,
self.segmentation_id))
class Port(object):
"""Represents a neutron port.
Class stores port data in a ORM-free way, so attributres are
still available even if a row has been deleted.
"""
def __init__(self, p):
self.id = p.id
self.network_id = p.network_id
self.device_id = p.device_id
self.admin_state_up = p.admin_state_up
self.status = p.status
def __eq__(self, other):
'''Compare only fields that will cause us to re-wire.'''
try:
return (self and other
and self.id == other.id
and self.admin_state_up == other.admin_state_up)
except Exception:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.id)
class OVSPluginApi(agent_rpc.PluginApi,
sg_rpc.SecurityGroupServerRpcApiMixin):
pass
class OVSSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin):
def __init__(self, context, plugin_rpc, root_helper):
self.context = context
self.plugin_rpc = plugin_rpc
self.root_helper = root_helper
self.init_firewall()
class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2population_rpc.L2populationRpcCallBackMixin):
'''Implements OVS-based tunneling, VLANs and flat networks.
Two local bridges are created: an integration bridge (defaults to
'br-int') and a tunneling bridge (defaults to 'br-tun'). An
additional bridge is created for each physical network interface
used for VLANs and/or flat networks.
All VM VIFs are plugged into the integration bridge. VM VIFs on a
given virtual network share a common "local" VLAN (i.e. not
propagated externally). The VLAN id of this local VLAN is mapped
to the physical networking details realizing that virtual network.
For virtual networks realized as GRE tunnels, a Logical Switch
(LS) identifier and is used to differentiate tenant traffic on
inter-HV tunnels. A mesh of tunnels is created to other
Hypervisors in the cloud. These tunnels originate and terminate on
the tunneling bridge of each hypervisor. Port patching is done to
connect local VLANs on the integration bridge to inter-hypervisor
tunnels on the tunnel bridge.
For each virtual networks realized as a VLANs or flat network, a
veth is used to connect the local VLAN on the integration bridge
with the physical network bridge, with flow rules adding,
modifying, or stripping VLAN tags as necessary.
'''
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
RPC_API_VERSION = '1.1'
def __init__(self, integ_br, tun_br, local_ip,
bridge_mappings, root_helper,
polling_interval, tunnel_types=None,
veth_mtu=None, l2_population=False):
'''Constructor.
:param integ_br: name of the integration bridge.
:param tun_br: name of the tunnel bridge.
:param local_ip: local IP address of this hypervisor.
:param bridge_mappings: mappings from physical network name to bridge.
:param root_helper: utility to use when running shell cmds.
:param polling_interval: interval (secs) to poll DB.
:param tunnel_types: A list of tunnel types to enable support for in
the agent. If set, will automatically set enable_tunneling to
True.
:param veth_mtu: MTU size for veth interfaces.
'''
self.veth_mtu = veth_mtu
self.root_helper = root_helper
self.available_local_vlans = set(xrange(q_const.MIN_VLAN_TAG,
q_const.MAX_VLAN_TAG))
self.tunnel_types = tunnel_types or []
self.l2_pop = l2_population
self.agent_state = {
'binary': 'neutron-openvswitch-agent',
'host': cfg.CONF.host,
'topic': q_const.L2_AGENT_TOPIC,
'configurations': {'bridge_mappings': bridge_mappings,
'tunnel_types': self.tunnel_types,
'tunneling_ip': local_ip,
'l2_population': self.l2_pop},
'agent_type': q_const.AGENT_TYPE_OVS,
'start_flag': True}
# Keep track of int_br's device count for use by _report_state()
self.int_br_device_count = 0
self.int_br = ovs_lib.OVSBridge(integ_br, self.root_helper)
self.setup_rpc()
self.setup_integration_br()
self.setup_physical_bridges(bridge_mappings)
self.local_vlan_map = {}
self.tun_br_ofports = {constants.TYPE_GRE: {},
constants.TYPE_VXLAN: {}}
self.polling_interval = polling_interval
if tunnel_types:
self.enable_tunneling = True
else:
self.enable_tunneling = False
self.local_ip = local_ip
self.tunnel_count = 0
self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port
self._check_ovs_version()
if self.enable_tunneling:
self.setup_tunnel_br(tun_br)
# Collect additional bridges to monitor
self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br)
# Security group agent supprot
self.sg_agent = OVSSecurityGroupAgent(self.context,
self.plugin_rpc,
root_helper)
def _check_ovs_version(self):
if constants.TYPE_VXLAN in self.tunnel_types:
check_ovs_version(constants.MINIMUM_OVS_VXLAN_VERSION,
self.root_helper)
def _report_state(self):
# How many devices are likely used by a VM
self.agent_state.get('configurations')['devices'] = (
self.int_br_device_count)
try:
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_("Failed reporting state!"))
def setup_rpc(self):
mac = self.int_br.get_local_port_mac()
self.agent_id = '%s%s' % ('ovs', (mac.replace(":", "")))
self.topic = topics.AGENT
self.plugin_rpc = OVSPluginApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.dispatcher = self.create_rpc_dispatcher()
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[constants.TUNNEL, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE]]
if self.l2_pop:
consumers.append([topics.L2POPULATION,
topics.UPDATE, cfg.CONF.host])
self.connection = agent_rpc.create_consumers(self.dispatcher,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def get_net_uuid(self, vif_id):
for network_id, vlan_mapping in self.local_vlan_map.iteritems():
if vif_id in vlan_mapping.vif_ports:
return network_id
def network_delete(self, context, **kwargs):
LOG.debug(_("network_delete received"))
network_id = kwargs.get('network_id')
LOG.debug(_("Delete %s"), network_id)
# The network may not be defined on this agent
lvm = self.local_vlan_map.get(network_id)
if lvm:
self.reclaim_local_vlan(network_id)
else:
LOG.debug(_("Network %s not used on agent."), network_id)
def port_update(self, context, **kwargs):
LOG.debug(_("port_update received"))
port = kwargs.get('port')
# Validate that port is on OVS
vif_port = self.int_br.get_vif_port_by_id(port['id'])
if not vif_port:
return
if ext_sg.SECURITYGROUPS in port:
self.sg_agent.refresh_firewall()
network_type = kwargs.get('network_type')
segmentation_id = kwargs.get('segmentation_id')
physical_network = kwargs.get('physical_network')
self.treat_vif_port(vif_port, port['id'], port['network_id'],
network_type, physical_network,
segmentation_id, port['admin_state_up'])
try:
if port['admin_state_up']:
# update plugin about port status
self.plugin_rpc.update_device_up(self.context, port['id'],
self.agent_id)
else:
# update plugin about port status
self.plugin_rpc.update_device_down(self.context, port['id'],
self.agent_id)
except rpc_common.Timeout:
LOG.error(_("RPC timeout while updating port %s"), port['id'])
def tunnel_update(self, context, **kwargs):
LOG.debug(_("tunnel_update received"))
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
tunnel_id = kwargs.get('tunnel_id', tunnel_ip)
if not tunnel_id:
tunnel_id = tunnel_ip
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_("No tunnel_type specified, cannot create tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_("tunnel_type %s not supported by agent"), tunnel_type)
return
if tunnel_ip == self.local_ip:
return
tun_name = '%s-%s' % (tunnel_type, tunnel_id)
if not self.l2_pop:
self.setup_tunnel_port(tun_name, tunnel_ip, tunnel_type)
def fdb_add(self, context, fdb_entries):
LOG.debug(_("fdb_add received"))
for network_id, values in fdb_entries.items():
lvm = self.local_vlan_map.get(network_id)
if not lvm:
# Agent doesn't manage any port in this network
continue
agent_ports = values.get('ports')
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
self.tun_br.defer_apply_on()
for agent_ip, ports in agent_ports.items():
# Ensure we have a tunnel port with this remote agent
ofport = self.tun_br_ofports[
lvm.network_type].get(agent_ip)
if not ofport:
port_name = '%s-%s' % (lvm.network_type, agent_ip)
ofport = self.setup_tunnel_port(port_name, agent_ip,
lvm.network_type)
if ofport == 0:
continue
for port in ports:
self._add_fdb_flow(port, agent_ip, lvm, ofport)
self.tun_br.defer_apply_off()
def fdb_remove(self, context, fdb_entries):
LOG.debug(_("fdb_remove received"))
for network_id, values in fdb_entries.items():
lvm = self.local_vlan_map.get(network_id)
if not lvm:
# Agent doesn't manage any more ports in this network
continue
agent_ports = values.get('ports')
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
self.tun_br.defer_apply_on()
for agent_ip, ports in agent_ports.items():
ofport = self.tun_br_ofports[
lvm.network_type].get(agent_ip)
if not ofport:
continue
for port in ports:
self._del_fdb_flow(port, agent_ip, lvm, ofport)
self.tun_br.defer_apply_off()
def _add_fdb_flow(self, port_info, agent_ip, lvm, ofport):
if port_info == q_const.FLOODING_ENTRY:
lvm.tun_ofports.add(ofport)
ofports = ','.join(lvm.tun_ofports)
self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
priority=1,
dl_vlan=lvm.vlan,
actions="strip_vlan,set_tunnel:%s,"
"output:%s" % (lvm.segmentation_id, ofports))
else:
# TODO(feleouet): add ARP responder entry
self.tun_br.add_flow(table=constants.UCAST_TO_TUN,
priority=2,
dl_vlan=lvm.vlan,
dl_dst=port_info[0],
actions="strip_vlan,set_tunnel:%s,output:%s" %
(lvm.segmentation_id, ofport))
def _del_fdb_flow(self, port_info, agent_ip, lvm, ofport):
if port_info == q_const.FLOODING_ENTRY:
lvm.tun_ofports.remove(ofport)
if len(lvm.tun_ofports) > 0:
ofports = ','.join(lvm.tun_ofports)
self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
priority=1,
dl_vlan=lvm.vlan,
actions="strip_vlan,"
"set_tunnel:%s,output:%s" %
(lvm.segmentation_id, ofports))
else:
# This local vlan doesn't require any more tunelling
self.tun_br.delete_flows(table=constants.FLOOD_TO_TUN,
dl_vlan=lvm.vlan)
# Check if this tunnel port is still used
self.cleanup_tunnel_port(ofport, lvm.network_type)
else:
#TODO(feleouet): remove ARP responder entry
self.tun_br.delete_flows(table=constants.UCAST_TO_TUN,
dl_vlan=lvm.vlan,
dl_dst=port_info[0])
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return dispatcher.RpcDispatcher([self])
def provision_local_vlan(self, net_uuid, network_type, physical_network,
segmentation_id):
'''Provisions a local VLAN.
:param net_uuid: the uuid of the network associated with this vlan.
:param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
if not self.available_local_vlans:
LOG.error(_("No local VLAN available for net-id=%s"), net_uuid)
return
lvid = self.available_local_vlans.pop()
LOG.info(_("Assigning %(vlan_id)s as local vlan for "
"net-id=%(net_uuid)s"),
{'vlan_id': lvid, 'net_uuid': net_uuid})
self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, network_type,
physical_network,
segmentation_id)
if network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
# outbound broadcast/multicast
ofports = ','.join(self.tun_br_ofports[network_type].values())
if ofports:
self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
priority=1,
dl_vlan=lvid,
actions="strip_vlan,"
"set_tunnel:%s,output:%s" %
(segmentation_id, ofports))
# inbound from tunnels: set lvid in the right table
# and resubmit to Table LEARN_FROM_TUN for mac learning
self.tun_br.add_flow(table=constants.TUN_TABLE[network_type],
priority=1,
tun_id=segmentation_id,
actions="mod_vlan_vid:%s,resubmit(,%s)" %
(lvid, constants.LEARN_FROM_TUN))
else:
LOG.error(_("Cannot provision %(network_type)s network for "
"net-id=%(net_uuid)s - tunneling disabled"),
{'network_type': network_type,
'net_uuid': net_uuid})
elif network_type == constants.TYPE_FLAT:
if physical_network in self.phys_brs:
# outbound
br = self.phys_brs[physical_network]
br.add_flow(priority=4,
in_port=self.phys_ofports[physical_network],
dl_vlan=lvid,
actions="strip_vlan,normal")
# inbound
self.int_br.add_flow(
priority=3,
in_port=self.int_ofports[physical_network],
dl_vlan=0xffff,
actions="mod_vlan_vid:%s,normal" % lvid)
else:
LOG.error(_("Cannot provision flat network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == constants.TYPE_VLAN:
if physical_network in self.phys_brs:
# outbound
br = self.phys_brs[physical_network]
br.add_flow(priority=4,
in_port=self.phys_ofports[physical_network],
dl_vlan=lvid,
actions="mod_vlan_vid:%s,normal" % segmentation_id)
# inbound
self.int_br.add_flow(priority=3,
in_port=self.
int_ofports[physical_network],
dl_vlan=segmentation_id,
actions="mod_vlan_vid:%s,normal" % lvid)
else:
LOG.error(_("Cannot provision VLAN network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == constants.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_("Cannot provision unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': network_type,
'net_uuid': net_uuid})
def reclaim_local_vlan(self, net_uuid):
'''Reclaim a local VLAN.
:param net_uuid: the network uuid associated with this vlan.
:param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id,
vif_ids) mapping.
'''
lvm = self.local_vlan_map.pop(net_uuid, None)
if lvm is None:
LOG.debug(_("Network %s not used on agent."), net_uuid)
return
LOG.info(_("Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"),
{'vlan_id': lvm.vlan,
'net_uuid': net_uuid})
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
self.tun_br.delete_flows(
table=constants.TUN_TABLE[lvm.network_type],
tun_id=lvm.segmentation_id)
self.tun_br.delete_flows(dl_vlan=lvm.vlan)
if self.l2_pop:
# Try to remove tunnel ports if not used by other networks
for ofport in lvm.tun_ofports:
self.cleanup_tunnel_port(ofport, lvm.network_type)
elif lvm.network_type == constants.TYPE_FLAT:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.delete_flows(in_port=self.phys_ofports[lvm.
physical_network],
dl_vlan=lvm.vlan)
# inbound
br = self.int_br
br.delete_flows(in_port=self.int_ofports[lvm.physical_network],
dl_vlan=0xffff)
elif lvm.network_type == constants.TYPE_VLAN:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.delete_flows(in_port=self.phys_ofports[lvm.
physical_network],
dl_vlan=lvm.vlan)
# inbound
br = self.int_br
br.delete_flows(in_port=self.int_ofports[lvm.physical_network],
dl_vlan=lvm.segmentation_id)
elif lvm.network_type == constants.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_("Cannot reclaim unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': lvm.network_type,
'net_uuid': net_uuid})
self.available_local_vlans.add(lvm.vlan)
def port_bound(self, port, net_uuid,
network_type, physical_network, segmentation_id):
'''Bind port to net_uuid/lsw_id and install flow for inbound traffic
to vm.
:param port: a ovslib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param network_type: the network type ('gre', 'vlan', 'flat', 'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
if net_uuid not in self.local_vlan_map:
self.provision_local_vlan(net_uuid, network_type,
physical_network, segmentation_id)
lvm = self.local_vlan_map[net_uuid]
lvm.vif_ports[port.vif_id] = port
self.int_br.set_db_attribute("Port", port.port_name, "tag",
str(lvm.vlan))
if int(port.ofport) != -1:
self.int_br.delete_flows(in_port=port.ofport)
def port_unbound(self, vif_id, net_uuid=None):
'''Unbind port.
Removes corresponding local vlan mapping object if this is its last
VIF.
:param vif_id: the id of the vif
:param net_uuid: the net_uuid this port is associated with.
'''
if net_uuid is None:
net_uuid = self.get_net_uuid(vif_id)
if not self.local_vlan_map.get(net_uuid):
LOG.info(_('port_unbound() net_uuid %s not in local_vlan_map'),
net_uuid)
return
lvm = self.local_vlan_map[net_uuid]
lvm.vif_ports.pop(vif_id, None)
if not lvm.vif_ports:
self.reclaim_local_vlan(net_uuid)
def port_dead(self, port):
'''Once a port has no binding, put it on the "dead vlan".
:param port: a ovs_lib.VifPort object.
'''
self.int_br.set_db_attribute("Port", port.port_name, "tag",
DEAD_VLAN_TAG)
self.int_br.add_flow(priority=2, in_port=port.ofport, actions="drop")
def setup_integration_br(self):
'''Setup the integration bridge.
Create patch ports and remove all existing flows.
:param bridge_name: the name of the integration bridge.
:returns: the integration bridge
'''
self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port)
self.int_br.remove_all_flows()
# switch all traffic using L2 learning
self.int_br.add_flow(priority=1, actions="normal")
def setup_ancillary_bridges(self, integ_br, tun_br):
'''Setup ancillary bridges - for example br-ex.'''
ovs_bridges = set(ovs_lib.get_bridges(self.root_helper))
# Remove all known bridges
ovs_bridges.remove(integ_br)
if self.enable_tunneling:
ovs_bridges.remove(tun_br)
br_names = [self.phys_brs[physical_network].br_name for
physical_network in self.phys_brs]
ovs_bridges.difference_update(br_names)
# Filter list of bridges to those that have external
# bridge-id's configured
br_names = []
for bridge in ovs_bridges:
id = ovs_lib.get_bridge_external_bridge_id(self.root_helper,
bridge)
if id != bridge:
br_names.append(bridge)
ovs_bridges.difference_update(br_names)
ancillary_bridges = []
for bridge in ovs_bridges:
br = ovs_lib.OVSBridge(bridge, self.root_helper)
LOG.info(_('Adding %s to list of bridges.'), bridge)
ancillary_bridges.append(br)
return ancillary_bridges
def setup_tunnel_br(self, tun_br):
'''Setup the tunnel bridge.
Creates tunnel bridge, and links it to the integration bridge
using a patch port.
:param tun_br: the name of the tunnel bridge.
'''
self.tun_br = ovs_lib.OVSBridge(tun_br, self.root_helper)
self.tun_br.reset_bridge()
self.patch_tun_ofport = self.int_br.add_patch_port(
cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port)
self.patch_int_ofport = self.tun_br.add_patch_port(
cfg.CONF.OVS.tun_peer_patch_port, cfg.CONF.OVS.int_peer_patch_port)
if int(self.patch_tun_ofport) < 0 or int(self.patch_int_ofport) < 0:
LOG.error(_("Failed to create OVS patch port. Cannot have "
"tunneling enabled on this agent, since this version "
"of OVS does not support tunnels or patch ports. "
"Agent terminated!"))
exit(1)
self.tun_br.remove_all_flows()
# Table 0 (default) will sort incoming traffic depending on in_port
self.tun_br.add_flow(priority=1,
in_port=self.patch_int_ofport,
actions="resubmit(,%s)" %
constants.PATCH_LV_TO_TUN)
self.tun_br.add_flow(priority=0, actions="drop")
# PATCH_LV_TO_TUN table will handle packets coming from patch_int
# unicasts go to table UCAST_TO_TUN where remote adresses are learnt
self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN,
dl_dst="00:00:00:00:00:00/01:00:00:00:00:00",
actions="resubmit(,%s)" % constants.UCAST_TO_TUN)
# Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding
self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN,
dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
actions="resubmit(,%s)" % constants.FLOOD_TO_TUN)
# Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id
# for each tunnel type, and resubmit to table LEARN_FROM_TUN where
# remote mac adresses will be learnt
for tunnel_type in constants.TUNNEL_NETWORK_TYPES:
self.tun_br.add_flow(table=constants.TUN_TABLE[tunnel_type],
priority=0,
actions="drop")
# LEARN_FROM_TUN table will have a single flow using a learn action to
# dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac
# adresses (assumes that lvid has already been set by a previous flow)
learned_flow = ("table=%s,"
"priority=1,"
"hard_timeout=300,"
"NXM_OF_VLAN_TCI[0..11],"
"NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
"load:0->NXM_OF_VLAN_TCI[],"
"load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],"
"output:NXM_OF_IN_PORT[]" %
constants.UCAST_TO_TUN)
# Once remote mac adresses are learnt, packet is outputed to patch_int
self.tun_br.add_flow(table=constants.LEARN_FROM_TUN,
priority=1,
actions="learn(%s),output:%s" %
(learned_flow, self.patch_int_ofport))
# Egress unicast will be handled in table UCAST_TO_TUN, where remote
# mac adresses will be learned. For now, just add a default flow that
# will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them
# as broadcasts/multicasts
self.tun_br.add_flow(table=constants.UCAST_TO_TUN,
priority=0,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN)
# FLOOD_TO_TUN will handle flooding in tunnels based on lvid,
# for now, add a default drop action
self.tun_br.add_flow(table=constants.FLOOD_TO_TUN,
priority=0,
actions="drop")
def setup_physical_bridges(self, bridge_mappings):
'''Setup the physical network bridges.
Creates physical network bridges and links them to the
integration bridge using veths.
:param bridge_mappings: map physical network names to bridge names.
'''
self.phys_brs = {}
self.int_ofports = {}
self.phys_ofports = {}
ip_wrapper = ip_lib.IPWrapper(self.root_helper)
for physical_network, bridge in bridge_mappings.iteritems():
LOG.info(_("Mapping physical network %(physical_network)s to "
"bridge %(bridge)s"),
{'physical_network': physical_network,
'bridge': bridge})
# setup physical bridge
if not ip_lib.device_exists(bridge, self.root_helper):
LOG.error(_("Bridge %(bridge)s for physical network "
"%(physical_network)s does not exist. Agent "
"terminated!"),
{'physical_network': physical_network,
'bridge': bridge})
sys.exit(1)
br = ovs_lib.OVSBridge(bridge, self.root_helper)
br.remove_all_flows()
br.add_flow(priority=1, actions="normal")
self.phys_brs[physical_network] = br
# create veth to patch physical bridge with integration bridge
int_veth_name = constants.VETH_INTEGRATION_PREFIX + bridge
self.int_br.delete_port(int_veth_name)
phys_veth_name = constants.VETH_PHYSICAL_PREFIX + bridge
br.delete_port(phys_veth_name)
if ip_lib.device_exists(int_veth_name, self.root_helper):
ip_lib.IPDevice(int_veth_name, self.root_helper).link.delete()
# Give udev a chance to process its rules here, to avoid
# race conditions between commands launched by udev rules
# and the subsequent call to ip_wrapper.add_veth
utils.execute(['/sbin/udevadm', 'settle', '--timeout=10'])
int_veth, phys_veth = ip_wrapper.add_veth(int_veth_name,
phys_veth_name)
self.int_ofports[physical_network] = self.int_br.add_port(int_veth)
self.phys_ofports[physical_network] = br.add_port(phys_veth)
# block all untranslated traffic over veth between bridges
self.int_br.add_flow(priority=2,
in_port=self.int_ofports[physical_network],
actions="drop")
br.add_flow(priority=2,
in_port=self.phys_ofports[physical_network],
actions="drop")
# enable veth to pass traffic
int_veth.link.set_up()
phys_veth.link.set_up()
if self.veth_mtu:
# set up mtu size for veth interfaces
int_veth.link.set_mtu(self.veth_mtu)
phys_veth.link.set_mtu(self.veth_mtu)
def update_ports(self, registered_ports):
ports = self.int_br.get_vif_port_set()
if ports == registered_ports:
return
self.int_br_device_count = len(ports)
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}
def update_ancillary_ports(self, registered_ports):
ports = set()
for bridge in self.ancillary_brs:
ports |= bridge.get_vif_port_set()
if ports == registered_ports:
return
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}
def treat_vif_port(self, vif_port, port_id, network_id, network_type,
physical_network, segmentation_id, admin_state_up):
if vif_port:
if admin_state_up:
self.port_bound(vif_port, network_id, network_type,
physical_network, segmentation_id)
else:
self.port_dead(vif_port)
else:
LOG.debug(_("No VIF port for port %s defined on agent."), port_id)
def setup_tunnel_port(self, port_name, remote_ip, tunnel_type):
ofport = self.tun_br.add_tunnel_port(port_name,
remote_ip,
self.local_ip,
tunnel_type,
self.vxlan_udp_port)
if ofport < 0:
LOG.error(_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': tunnel_type, 'ip': remote_ip})
return 0
self.tun_br_ofports[tunnel_type][remote_ip] = ofport
# Add flow in default table to resubmit to the right
# tunelling table (lvid will be set in the latter)
self.tun_br.add_flow(priority=1,
in_port=ofport,
actions="resubmit(,%s)" %
constants.TUN_TABLE[tunnel_type])
ofports = ','.join(self.tun_br_ofports[tunnel_type].values())
if ofports and not self.l2_pop:
# Update flooding flows to include the new tunnel
for network_id, vlan_mapping in self.local_vlan_map.iteritems():
if vlan_mapping.network_type == tunnel_type:
self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
priority=1,
dl_vlan=vlan_mapping.vlan,
actions="strip_vlan,"
"set_tunnel:%s,output:%s" %
(vlan_mapping.segmentation_id,
ofports))
return ofport
def cleanup_tunnel_port(self, tun_ofport, tunnel_type):
# Check if this tunnel port is still used
for lvm in self.local_vlan_map.values():
if tun_ofport in lvm.tun_ofports:
break
# If not, remove it
else:
for remote_ip, ofport in self.tun_br_ofports[tunnel_type].items():
if ofport == tun_ofport:
port_name = '%s-%s' % (tunnel_type, remote_ip)
self.tun_br.delete_port(port_name)
self.tun_br_ofports[tunnel_type].pop(remote_ip, None)
def treat_devices_added(self, devices):
resync = False
self.sg_agent.prepare_devices_filter(devices)
for device in devices:
LOG.info(_("Port %s added"), device)
try:
details = self.plugin_rpc.get_device_details(self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug(_("Unable to get port details for "
"%(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
continue
port = self.int_br.get_vif_port_by_id(details['device'])
if 'port_id' in details:
LOG.info(_("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': details})
self.treat_vif_port(port, details['port_id'],
details['network_id'],
details['network_type'],
details['physical_network'],
details['segmentation_id'],
details['admin_state_up'])
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id)
else:
LOG.debug(_("Device %s not defined on plugin"), device)
if (port and int(port.ofport) != -1):
self.port_dead(port)
return resync
def treat_ancillary_devices_added(self, devices):
resync = False
for device in devices:
LOG.info(_("Ancillary Port %s added"), device)
try:
self.plugin_rpc.get_device_details(self.context, device,
self.agent_id)
except Exception as e:
LOG.debug(_("Unable to get port details for "
"%(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
continue
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id)
return resync
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
for device in devices:
LOG.info(_("Attachment %s removed"), device)
try:
details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug(_("port_removed failed for %(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
continue
if details['exists']:
LOG.info(_("Port %s updated."), device)
# Nothing to do regarding local networking
else:
LOG.debug(_("Device %s not defined on plugin"), device)
self.port_unbound(device)
return resync
def treat_ancillary_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_("Attachment %s removed"), device)
try:
details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug(_("port_removed failed for %(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
continue
if details['exists']:
LOG.info(_("Port %s updated."), device)
# Nothing to do regarding local networking
else:
LOG.debug(_("Device %s not defined on plugin"), device)
return resync
def process_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info:
resync_a = self.treat_devices_added(port_info['added'])
if 'removed' in port_info:
resync_b = self.treat_devices_removed(port_info['removed'])
# If one of the above opertaions fails => resync with plugin
return (resync_a | resync_b)
def process_ancillary_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info:
resync_a = self.treat_ancillary_devices_added(port_info['added'])
if 'removed' in port_info:
resync_b = self.treat_ancillary_devices_removed(
port_info['removed'])
# If one of the above opertaions fails => resync with plugin
return (resync_a | resync_b)
def tunnel_sync(self):
resync = False
try:
for tunnel_type in self.tunnel_types:
details = self.plugin_rpc.tunnel_sync(self.context,
self.local_ip,
tunnel_type)
if not self.l2_pop:
tunnels = details['tunnels']
for tunnel in tunnels:
if self.local_ip != tunnel['ip_address']:
tunnel_id = tunnel.get('id', tunnel['ip_address'])
tun_name = '%s-%s' % (tunnel_type, tunnel_id)
self.setup_tunnel_port(tun_name,
tunnel['ip_address'],
tunnel_type)
except Exception as e:
LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"),
{'local_ip': self.local_ip, 'e': e})
resync = True
return resync
def rpc_loop(self):
sync = True
ports = set()
ancillary_ports = set()
tunnel_sync = True
while True:
try:
start = time.time()
if sync:
LOG.info(_("Agent out of sync with plugin!"))
ports.clear()
ancillary_ports.clear()
sync = False
# Notify the plugin of tunnel IP
if self.enable_tunneling and tunnel_sync:
LOG.info(_("Agent tunnel out of sync with plugin!"))
tunnel_sync = self.tunnel_sync()
port_info = self.update_ports(ports)
# notify plugin about port deltas
if port_info:
LOG.debug(_("Agent loop has new devices!"))
# If treat devices fails - must resync with plugin
sync = self.process_network_ports(port_info)
ports = port_info['current']
# Treat ancillary devices if they exist
if self.ancillary_brs:
port_info = self.update_ancillary_ports(ancillary_ports)
if port_info:
rc = self.process_ancillary_network_ports(port_info)
ancillary_ports = port_info['current']
sync = sync | rc
except Exception:
LOG.exception(_("Error in agent event loop"))
sync = True
tunnel_sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug(_("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!"),
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
def daemon_loop(self):
self.rpc_loop()
def check_ovs_version(min_required_version, root_helper):
LOG.debug(_("Checking OVS version for VXLAN support"))
installed_klm_version = ovs_lib.get_installed_ovs_klm_version()
installed_usr_version = ovs_lib.get_installed_ovs_usr_version(root_helper)
# First check the userspace version
if installed_usr_version:
if dist_version.StrictVersion(
installed_usr_version) < dist_version.StrictVersion(
min_required_version):
LOG.error(_('Failed userspace version check for Open '
'vSwitch with VXLAN support. To use '
'VXLAN tunnels with OVS, please ensure '
'the OVS version is %s '
'or newer!'), min_required_version)
sys.exit(1)
# Now check the kernel version
if installed_klm_version:
if dist_version.StrictVersion(
installed_klm_version) < dist_version.StrictVersion(
min_required_version):
LOG.error(_('Failed kernel version check for Open '
'vSwitch with VXLAN support. To use '
'VXLAN tunnels with OVS, please ensure '
'the OVS version is %s or newer!'),
min_required_version)
raise SystemExit(1)
else:
LOG.warning(_('Cannot determine kernel Open vSwitch version, '
'please ensure your Open vSwitch kernel module '
'is at least version %s to support VXLAN '
'tunnels.'), min_required_version)
else:
LOG.warning(_('Unable to determine Open vSwitch version. Please '
'ensure that its version is %s or newer to use VXLAN '
'tunnels with OVS.'), min_required_version)
raise SystemExit(1)
def create_agent_config_map(config):
"""Create a map of agent config parameters.
:param config: an instance of cfg.CONF
:returns: a map of agent configuration parameters
"""
try:
bridge_mappings = q_utils.parse_mappings(config.OVS.bridge_mappings)
except ValueError as e:
raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
kwargs = dict(
integ_br=config.OVS.integration_bridge,
tun_br=config.OVS.tunnel_bridge,
local_ip=config.OVS.local_ip,
bridge_mappings=bridge_mappings,
root_helper=config.AGENT.root_helper,
polling_interval=config.AGENT.polling_interval,
tunnel_types=config.AGENT.tunnel_types,
veth_mtu=config.AGENT.veth_mtu,
l2_population=config.AGENT.l2_population,
)
# If enable_tunneling is TRUE, set tunnel_type to default to GRE
if config.OVS.enable_tunneling and not kwargs['tunnel_types']:
kwargs['tunnel_types'] = [constants.TYPE_GRE]
# Verify the tunnel_types specified are valid
for tun in kwargs['tunnel_types']:
if tun not in constants.TUNNEL_NETWORK_TYPES:
msg = _('Invalid tunnel type specificed: %s'), tun
raise ValueError(msg)
if not kwargs['local_ip']:
msg = _('Tunneling cannot be enabled without a valid local_ip.')
raise ValueError(msg)
return kwargs
def main():
eventlet.monkey_patch()
cfg.CONF.register_opts(ip_lib.OPTS)
cfg.CONF(project='neutron')
logging_config.setup_logging(cfg.CONF)
legacy.modernize_quantum_config(cfg.CONF)
try:
agent_config = create_agent_config_map(cfg.CONF)
except ValueError as e:
LOG.error(_('%s Agent terminated!'), e)
sys.exit(1)
is_xen_compute_host = 'rootwrap-xen-dom0' in agent_config['root_helper']
if is_xen_compute_host:
# Force ip_lib to always use the root helper to ensure that ip
# commands target xen dom0 rather than domU.
cfg.CONF.set_default('ip_lib_force_root', True)
plugin = OVSNeutronAgent(**agent_config)
# Start everything.
LOG.info(_("Agent initialized successfully, now running... "))
plugin.daemon_loop()
sys.exit(0)
if __name__ == "__main__":
main()
|
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import inspect
from botocore.docs.params import RequestParamsDocumenter
from botocore.docs.params import ResponseParamsDocumenter
from botocore.docs.example import ResponseExampleDocumenter
from botocore.docs.example import RequestExampleDocumenter
def get_instance_public_methods(instance):
"""Retrieves an objects public methods
:param instance: The instance of the class to inspect
:rtype: dict
:returns: A dictionary that represents an instance's methods where
the keys are the name of the methods and the
values are the handler to the method.
"""
instance_members = inspect.getmembers(instance)
instance_methods = {}
for name, member in instance_members:
if not name.startswith('_'):
if inspect.ismethod(member):
instance_methods[name] = member
return instance_methods
def document_model_driven_signature(section, name, operation_model,
include=None, exclude=None):
"""Documents the signature of a model-driven method
:param section: The section to write the documentation to.
:param name: The name of the method
:param operation_model: The operation model for the method
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
params = {}
if operation_model.input_shape:
params = operation_model.input_shape.members
parameter_names = list(params.keys())
if include is not None:
for member in include:
parameter_names.append(member.name)
if exclude is not None:
for member in exclude:
if member in parameter_names:
parameter_names.remove(member)
signature_params = ''
if parameter_names:
signature_params = '**kwargs'
section.style.start_sphinx_py_method(name, signature_params)
def document_custom_signature(section, name, method,
include=None, exclude=None):
"""Documents the signature of a custom method
:param section: The section to write the documentation to.
:param name: The name of the method
:param method: The handle to the method being documented
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
args, varargs, keywords, defaults = inspect.getargspec(method)
args = args[1:]
signature_params = inspect.formatargspec(
args, varargs, keywords, defaults)
signature_params = signature_params.lstrip('(')
signature_params = signature_params.rstrip(')')
section.style.start_sphinx_py_method(name, signature_params)
def document_custom_method(section, method_name, method):
"""Documents a non-data driven method
:param section: The section to write the documentation to.
:param method_name: The name of the method
:param method: The handle to the method being documented
"""
document_custom_signature(
section, method_name, method)
method_intro_section = section.add_new_section('method-intro')
method_intro_section.writeln('')
doc_string = inspect.getdoc(method)
if doc_string is not None:
method_intro_section.style.write_py_doc_string(doc_string)
def document_model_driven_method(section, method_name, operation_model,
event_emitter, method_description=None,
example_prefix=None, include_input=None,
include_output=None, exclude_input=None,
exclude_output=None, document_output=True,
include_signature=True):
"""Documents an individual method
:param section: The section to write to
:param method_name: The name of the method
:param operation_model: The model of the operation
:param event_emitter: The event emitter to use to emit events
:param example_prefix: The prefix to use in the method example.
:type include_input: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include_input: The parameter shapes to include in the
input documentation.
:type include_output: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include_input: The parameter shapes to include in the
output documentation.
:type exclude_input: List of the names of the parameters to exclude.
:param exclude_input: The names of the parameters to exclude from
input documentation.
:type exclude_output: List of the names of the parameters to exclude.
:param exclude_input: The names of the parameters to exclude from
output documentation.
:param document_output: A boolean flag to indicate whether to
document the output.
:param include_signature: Whether or not to include the signature.
It is useful for generating docstrings.
"""
# Add the signature if specified.
if include_signature:
document_model_driven_signature(
section, method_name, operation_model, include=include_input,
exclude=exclude_input)
# Add the description for the method.
method_intro_section = section.add_new_section('method-intro')
method_intro_section.include_doc_string(method_description)
# Add the example section.
example_section = section.add_new_section('example')
example_section.style.new_paragraph()
example_section.style.bold('Request Syntax')
context = {
'special_shape_types': {
'streaming_input_shape': operation_model.get_streaming_input(),
'streaming_output_shape': operation_model.get_streaming_output()
}
}
if operation_model.input_shape:
RequestExampleDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter, context=context).document_example(
example_section, operation_model.input_shape,
prefix=example_prefix, include=include_input,
exclude=exclude_input)
else:
example_section.style.new_paragraph()
example_section.style.start_codeblock()
example_section.write(example_prefix + '()')
# Add the request parameter documentation.
request_params_section = section.add_new_section('request-params')
if operation_model.input_shape:
RequestParamsDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter, context=context).document_params(
request_params_section, operation_model.input_shape,
include=include_input, exclude=exclude_input)
# Add the return value documentation
return_section = section.add_new_section('return')
return_section.style.new_line()
if operation_model.output_shape is not None and document_output:
return_section.write(':rtype: dict')
return_section.style.new_line()
return_section.write(':returns: ')
return_section.style.indent()
return_section.style.new_line()
# Add an example return value
return_example_section = return_section.add_new_section('example')
return_example_section.style.new_line()
return_example_section.style.bold('Response Syntax')
return_example_section.style.new_paragraph()
ResponseExampleDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter,
context=context).document_example(
return_example_section, operation_model.output_shape,
include=include_output, exclude=exclude_output)
# Add a description for the return value
return_description_section = return_section.add_new_section(
'description')
return_description_section.style.new_line()
return_description_section.style.bold('Response Structure')
return_description_section.style.new_paragraph()
ResponseParamsDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter,
context=context).document_params(
return_description_section, operation_model.output_shape,
include=include_output, exclude=exclude_output)
else:
return_section.write(':returns: None')
|
|
"""Generic Z-Wave Entity Classes."""
import copy
import logging
from openzwavemqtt.const import (
EVENT_INSTANCE_STATUS_CHANGED,
EVENT_VALUE_CHANGED,
OZW_READY_STATES,
CommandClass,
ValueIndex,
)
from openzwavemqtt.models.node import OZWNode
from openzwavemqtt.models.value import OZWValue
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from . import const
from .const import DOMAIN, PLATFORMS
from .discovery import check_node_schema, check_value_schema
_LOGGER = logging.getLogger(__name__)
OZW_READY_STATES_VALUES = {st.value for st in OZW_READY_STATES}
class ZWaveDeviceEntityValues:
"""Manages entity access to the underlying Z-Wave value objects."""
def __init__(self, hass, options, schema, primary_value):
"""Initialize the values object with the passed entity schema."""
self._hass = hass
self._entity_created = False
self._schema = copy.deepcopy(schema)
self._values = {}
self.options = options
# Go through values listed in the discovery schema, initialize them,
# and add a check to the schema to make sure the Instance matches.
for name, disc_settings in self._schema[const.DISC_VALUES].items():
self._values[name] = None
disc_settings[const.DISC_INSTANCE] = (primary_value.instance,)
self._values[const.DISC_PRIMARY] = primary_value
self._node = primary_value.node
self._schema[const.DISC_NODE_ID] = [self._node.node_id]
def async_setup(self):
"""Set up values instance."""
# Check values that have already been discovered for node
# and see if they match the schema and need added to the entity.
for value in self._node.values():
self.async_check_value(value)
# Check if all the _required_ values in the schema are present and
# create the entity.
self._async_check_entity_ready()
def __getattr__(self, name):
"""Get the specified value for this entity."""
return self._values.get(name, None)
def __iter__(self):
"""Allow iteration over all values."""
return iter(self._values.values())
def __contains__(self, name):
"""Check if the specified name/key exists in the values."""
return name in self._values
@callback
def async_check_value(self, value):
"""Check if the new value matches a missing value for this entity.
If a match is found, it is added to the values mapping.
"""
# Make sure the node matches the schema for this entity.
if not check_node_schema(value.node, self._schema):
return
# Go through the possible values for this entity defined by the schema.
for name, name_value in self._values.items():
# Skip if it's already been added.
if name_value is not None:
continue
# Skip if the value doesn't match the schema.
if not check_value_schema(value, self._schema[const.DISC_VALUES][name]):
continue
# Add value to mapping.
self._values[name] = value
# If the entity has already been created, notify it of the new value.
if self._entity_created:
async_dispatcher_send(
self._hass, f"{DOMAIN}_{self.values_id}_value_added"
)
# Check if entity has all required values and create the entity if needed.
self._async_check_entity_ready()
@callback
def _async_check_entity_ready(self):
"""Check if all required values are discovered and create entity."""
# Abort if the entity has already been created
if self._entity_created:
return
# Go through values defined in the schema and abort if a required value is missing.
for name, disc_settings in self._schema[const.DISC_VALUES].items():
if self._values[name] is None and not disc_settings.get(
const.DISC_OPTIONAL
):
return
# We have all the required values, so create the entity.
component = self._schema[const.DISC_COMPONENT]
_LOGGER.debug(
"Adding Node_id=%s Generic_command_class=%s, "
"Specific_command_class=%s, "
"Command_class=%s, Index=%s, Value type=%s, "
"Genre=%s as %s",
self._node.node_id,
self._node.node_generic,
self._node.node_specific,
self.primary.command_class,
self.primary.index,
self.primary.type,
self.primary.genre,
component,
)
self._entity_created = True
if component in PLATFORMS:
async_dispatcher_send(self._hass, f"{DOMAIN}_new_{component}", self)
@property
def values_id(self):
"""Identification for this values collection."""
return create_value_id(self.primary)
class ZWaveDeviceEntity(Entity):
"""Generic Entity Class for a Z-Wave Device."""
def __init__(self, values):
"""Initialize a generic Z-Wave device entity."""
self.values = values
self.options = values.options
@callback
def on_value_update(self):
"""Call when a value is added/updated in the entity EntityValues Collection.
To be overridden by platforms needing this event.
"""
async def async_added_to_hass(self):
"""Call when entity is added."""
# Add dispatcher and OZW listeners callbacks.
# Add to on_remove so they will be cleaned up on entity removal.
self.async_on_remove(
self.options.listen(EVENT_VALUE_CHANGED, self._value_changed)
)
self.async_on_remove(
self.options.listen(EVENT_INSTANCE_STATUS_CHANGED, self._instance_updated)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, const.SIGNAL_DELETE_ENTITY, self._delete_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{DOMAIN}_{self.values.values_id}_value_added",
self._value_added,
)
)
@property
def device_info(self):
"""Return device information for the device registry."""
node = self.values.primary.node
node_instance = self.values.primary.instance
dev_id = create_device_id(node, self.values.primary.instance)
node_firmware = node.get_value(
CommandClass.VERSION, ValueIndex.VERSION_APPLICATION
)
device_info = {
"identifiers": {(DOMAIN, dev_id)},
"name": create_device_name(node),
"manufacturer": node.node_manufacturer_name,
"model": node.node_product_name,
}
if node_firmware is not None:
device_info["sw_version"] = node_firmware.value
# device with multiple instances is split up into virtual devices for each instance
if node_instance > 1:
parent_dev_id = create_device_id(node)
device_info["name"] += f" - Instance {node_instance}"
device_info["via_device"] = (DOMAIN, parent_dev_id)
return device_info
@property
def extra_state_attributes(self):
"""Return the device specific state attributes."""
return {const.ATTR_NODE_ID: self.values.primary.node.node_id}
@property
def name(self):
"""Return the name of the entity."""
node = self.values.primary.node
return f"{create_device_name(node)}: {self.values.primary.label}"
@property
def unique_id(self):
"""Return the unique_id of the entity."""
return self.values.values_id
@property
def available(self) -> bool:
"""Return entity availability."""
# Use OZW Daemon status for availability.
instance_status = self.values.primary.ozw_instance.get_status()
return instance_status and instance_status.status in OZW_READY_STATES_VALUES
@callback
def _value_changed(self, value):
"""Call when a value from ZWaveDeviceEntityValues is changed.
Should not be overridden by subclasses.
"""
if value.value_id_key in (v.value_id_key for v in self.values if v):
self.on_value_update()
self.async_write_ha_state()
@callback
def _value_added(self):
"""Call when a value from ZWaveDeviceEntityValues is added.
Should not be overridden by subclasses.
"""
self.on_value_update()
@callback
def _instance_updated(self, new_status):
"""Call when the instance status changes.
Should not be overridden by subclasses.
"""
self.on_value_update()
self.async_write_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
async def _delete_callback(self, values_id):
"""Remove this entity."""
if not self.values:
return # race condition: delete already requested
if values_id == self.values.values_id:
await self.async_remove(force_remove=True)
def create_device_name(node: OZWNode):
"""Generate sensible (short) default device name from a OZWNode."""
# Prefer custom name set by OZWAdmin if present
if node.node_name:
return node.node_name
# Prefer short devicename from metadata if present
if node.meta_data and node.meta_data.get("Name"):
return node.meta_data["Name"]
# Fallback to productname or devicetype strings
if node.node_product_name:
return node.node_product_name
if node.node_device_type_string:
return node.node_device_type_string
if node.node_specific_string:
return node.node_specific_string
# Last resort: use Node id (should never happen, but just in case)
return f"Node {node.id}"
def create_device_id(node: OZWNode, node_instance: int = 1):
"""Generate unique device_id from a OZWNode."""
ozw_instance = node.parent.id
dev_id = f"{ozw_instance}.{node.node_id}.{node_instance}"
return dev_id
def create_value_id(value: OZWValue):
"""Generate unique value_id from an OZWValue."""
# [OZW_INSTANCE_ID]-[NODE_ID]-[VALUE_ID_KEY]
return f"{value.node.parent.id}-{value.node.id}-{value.value_id_key}"
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PipelineRunner, an abstract base runner object."""
from __future__ import absolute_import
import importlib
import logging
import os
import shelve
import shutil
import tempfile
from builtins import object
__all__ = ['PipelineRunner', 'PipelineState', 'PipelineResult']
_ALL_KNOWN_RUNNERS = (
'apache_beam.runners.dataflow.dataflow_runner.DataflowRunner',
'apache_beam.runners.direct.direct_runner.BundleBasedDirectRunner',
'apache_beam.runners.direct.direct_runner.DirectRunner',
'apache_beam.runners.direct.direct_runner.SwitchingDirectRunner',
'apache_beam.runners.portability.flink_runner.FlinkRunner',
'apache_beam.runners.portability.portable_runner.PortableRunner',
'apache_beam.runners.test.TestDirectRunner',
'apache_beam.runners.test.TestDataflowRunner',
)
_KNOWN_RUNNER_NAMES = [path.split('.')[-1] for path in _ALL_KNOWN_RUNNERS]
_RUNNER_MAP = {path.split('.')[-1].lower(): path
for path in _ALL_KNOWN_RUNNERS}
# Allow this alias, but don't make public.
_RUNNER_MAP['pythonrpcdirectrunner'] = (
'apache_beam.runners.experimental'
'.python_rpc_direct.python_rpc_direct_runner.PythonRPCDirectRunner')
def create_runner(runner_name):
"""For internal use only; no backwards-compatibility guarantees.
Creates a runner instance from a runner class name.
Args:
runner_name: Name of the pipeline runner. Possible values are listed in
_RUNNER_MAP above.
Returns:
A runner object.
Raises:
RuntimeError: if an invalid runner name is used.
"""
# Get the qualified runner name by using the lower case runner name. If that
# fails try appending the name with 'runner' and check if it matches.
# If that also fails, use the given runner name as is.
runner_name = _RUNNER_MAP.get(
runner_name.lower(),
_RUNNER_MAP.get(runner_name.lower() + 'runner', runner_name))
if '.' in runner_name:
module, runner = runner_name.rsplit('.', 1)
try:
return getattr(importlib.import_module(module), runner)()
except ImportError:
if 'dataflow' in runner_name.lower():
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
else:
raise
else:
raise ValueError(
'Unexpected pipeline runner: %s. Valid values are %s '
'or the fully qualified name of a PipelineRunner subclass.' % (
runner_name, ', '.join(_KNOWN_RUNNER_NAMES)))
class PipelineRunner(object):
"""A runner of a pipeline object.
The base runner provides a run() method for visiting every node in the
pipeline's DAG and executing the transforms computing the PValue in the node.
A custom runner will typically provide implementations for some of the
transform methods (ParDo, GroupByKey, Create, etc.). It may also
provide a new implementation for clear_pvalue(), which is used to wipe out
materialized values in order to reduce footprint.
"""
def run(self, transform, options=None):
"""Run the given transform or callable with this runner.
Blocks until the pipeline is complete. See also `PipelineRunner.run_async`.
"""
result = self.run_async(transform, options)
result.wait_until_finish()
return result
def run_async(self, transform, options=None):
"""Run the given transform or callable with this runner.
May return immediately, executing the pipeline in the background.
The returned result object can be queried for progress, and
`wait_until_finish` may be called to block until completion.
"""
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import PTransform
from apache_beam.pvalue import PBegin
from apache_beam.pipeline import Pipeline
p = Pipeline(runner=self, options=options)
if isinstance(transform, PTransform):
p | transform
else:
transform(PBegin(p))
return p.run()
def run_pipeline(self, pipeline, options):
"""Execute the entire pipeline or the sub-DAG reachable from a node.
Runners should override this method.
"""
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
class RunVisitor(PipelineVisitor):
def __init__(self, runner):
self.runner = runner
def visit_transform(self, transform_node):
try:
self.runner.run_transform(transform_node, options)
except:
logging.error('Error while visiting %s', transform_node.full_label)
raise
pipeline.visit(RunVisitor(self))
def apply(self, transform, input, options):
"""Runner callback for a pipeline.apply call.
Args:
transform: the transform to apply.
input: transform's input (typically a PCollection).
A concrete implementation of the Runner class may want to do custom
pipeline construction for a given transform. To override the behavior
for a transform class Xyz, implement an apply_Xyz method with this same
signature.
"""
for cls in transform.__class__.mro():
m = getattr(self, 'apply_%s' % cls.__name__, None)
if m:
return m(transform, input, options)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (transform, self))
def apply_PTransform(self, transform, input, options):
# The base case of apply is to call the transform's expand.
return transform.expand(input)
def run_transform(self, transform_node, options):
"""Runner callback for a pipeline.run call.
Args:
transform_node: transform node for the transform to run.
A concrete implementation of the Runner class must implement run_Abc for
some class Abc in the method resolution order for every non-composite
transform Xyz in the pipeline.
"""
for cls in transform_node.transform.__class__.mro():
m = getattr(self, 'run_%s' % cls.__name__, None)
if m:
return m(transform_node, options)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (
transform_node.transform, self))
class PValueCache(object):
"""For internal use only; no backwards-compatibility guarantees.
Local cache for arbitrary information computed for PValue objects."""
def __init__(self, use_disk_backed_cache=False):
# Cache of values computed while a runner executes a pipeline. This is a
# dictionary of PValues and their computed values. Note that in principle
# the runner could contain PValues from several pipelines without clashes
# since a PValue is associated with one and only one pipeline. The keys of
# the dictionary are tuple of PValue instance addresses obtained using id()
# and tag names converted to strings.
self._use_disk_backed_cache = use_disk_backed_cache
if use_disk_backed_cache:
self._tempdir = tempfile.mkdtemp()
self._cache = shelve.open(os.path.join(self._tempdir, 'shelve'))
else:
self._cache = {}
def __del__(self):
if self._use_disk_backed_cache:
self._cache.close()
shutil.rmtree(self._tempdir)
def __len__(self):
return len(self._cache)
def to_cache_key(self, transform, tag):
return transform.full_label, tag
def _ensure_pvalue_has_real_producer(self, pvalue):
"""Ensure the passed-in PValue has the real_producer attribute.
Args:
pvalue: A PValue instance whose cached value is requested.
During the runner's execution only the results of the primitive transforms
are cached. Whenever we are looking for a PValue that is the output of a
composite transform we need to find the output of its rightmost transform
part.
"""
if not hasattr(pvalue, 'real_producer'):
real_producer = pvalue.producer
while real_producer.parts:
real_producer = real_producer.parts[-1]
pvalue.real_producer = real_producer
def is_cached(self, pobj):
from apache_beam.pipeline import AppliedPTransform
if isinstance(pobj, AppliedPTransform):
transform = pobj
tag = None
else:
self._ensure_pvalue_has_real_producer(pobj)
transform = pobj.real_producer
tag = pobj.tag
return self.to_cache_key(transform, tag) in self._cache
def cache_output(self, transform, tag_or_value, value=None):
if value is None:
value = tag_or_value
tag = None
else:
tag = tag_or_value
self._cache[
self.to_cache_key(transform, tag)] = value
def get_pvalue(self, pvalue):
"""Gets the value associated with a PValue from the cache."""
self._ensure_pvalue_has_real_producer(pvalue)
try:
return self._cache[self.key(pvalue)]
except KeyError:
if (pvalue.tag is not None
and self.to_cache_key(pvalue.real_producer, None) in self._cache):
# This is an undeclared, empty output of a DoFn executed
# in the local runner before this output was referenced.
return []
else:
raise
def get_unwindowed_pvalue(self, pvalue):
return [v.value for v in self.get_pvalue(pvalue)]
def clear_pvalue(self, pvalue):
"""Removes a PValue from the cache."""
if self.is_cached(pvalue):
del self._cache[self.key(pvalue)]
def key(self, pobj):
self._ensure_pvalue_has_real_producer(pobj)
return self.to_cache_key(pobj.real_producer, pobj.tag)
class PipelineState(object):
"""State of the Pipeline, as returned by :attr:`PipelineResult.state`.
This is meant to be the union of all the states any runner can put a
pipeline in. Currently, it represents the values of the dataflow
API JobState enum.
"""
UNKNOWN = 'UNKNOWN' # not specified by a runner, or unknown to a runner.
STARTING = 'STARTING' # not yet started
STOPPED = 'STOPPED' # paused or not yet started
RUNNING = 'RUNNING' # currently running
DONE = 'DONE' # successfully completed (terminal state)
FAILED = 'FAILED' # failed (terminal state)
CANCELLED = 'CANCELLED' # explicitly cancelled (terminal state)
UPDATED = 'UPDATED' # replaced by another job (terminal state)
DRAINING = 'DRAINING' # still processing, no longer reading data
DRAINED = 'DRAINED' # draining completed (terminal state)
PENDING = 'PENDING' # the job has been created but is not yet running.
CANCELLING = 'CANCELLING' # job has been explicitly cancelled and is
# in the process of stopping
UNRECOGNIZED = 'UNRECOGNIZED' # the job state reported by a runner cannot be
# interpreted by the SDK.
@classmethod
def is_terminal(cls, state):
return state in [cls.STOPPED, cls.DONE, cls.FAILED, cls.CANCELLED,
cls.UPDATED, cls.DRAINED]
class PipelineResult(object):
"""A :class:`PipelineResult` provides access to info about a pipeline."""
def __init__(self, state):
self._state = state
@property
def state(self):
"""Return the current state of the pipeline execution."""
return self._state
def wait_until_finish(self, duration=None):
"""Waits until the pipeline finishes and returns the final status.
Args:
duration (int): The time to wait (in milliseconds) for job to finish.
If it is set to :data:`None`, it will wait indefinitely until the job
is finished.
Raises:
~exceptions.IOError: If there is a persistent problem getting job
information.
~exceptions.NotImplementedError: If the runner does not support this
operation.
Returns:
The final state of the pipeline, or :data:`None` on timeout.
"""
raise NotImplementedError
def cancel(self):
"""Cancels the pipeline execution.
Raises:
~exceptions.IOError: If there is a persistent problem getting job
information.
~exceptions.NotImplementedError: If the runner does not support this
operation.
Returns:
The final state of the pipeline.
"""
raise NotImplementedError
def metrics(self):
"""Returns :class:`~apache_beam.metrics.metric.MetricResults` object to
query metrics from the runner.
Raises:
~exceptions.NotImplementedError: If the runner does not support this
operation.
"""
raise NotImplementedError
# pylint: disable=unused-argument
def aggregated_values(self, aggregator_or_name):
"""Return a dict of step names to values of the Aggregator."""
logging.warn('%s does not implement aggregated_values',
self.__class__.__name__)
return {}
|
|
# Copyright (c) 2013 Qubell Inc., http://qubell.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from qubell import deprecated
from qubell.api.private.environment import EnvironmentList
from qubell.api.private.revision import Revision
from qubell.api.private.service import ServiceMixin
import re
__author__ = "Vasyl Khomenko"
__copyright__ = "Copyright 2013, Qubell.com"
__license__ = "Apache"
__email__ = "vkhomenko@qubell.com"
import logging as log
import simplejson as json
import time
from qubell.api.tools import lazyproperty
from qubell.api.tools import waitForStatus as waitForStatus
from qubell.api.private import exceptions
from qubell.api.private.common import QubellEntityList, Entity
from qubell.api.provider.router import ROUTER as router
DEAD_STATUS = ['Destroyed']
class Instance(Entity, ServiceMixin):
"""
Base class for application instance. Manifest required.
"""
def __init__(self, organization, id):
self.instanceId = self.id = id
self.organization = organization
self.organizationId = organization.organizationId
self.__cached_json = None
self._last_workflow_started_time = None
@lazyproperty
def application(self):
return self.organization.applications[self.applicationId]
@lazyproperty
def environment(self):
return self.organization.environments[self.environmentId]
@lazyproperty
def applicationId(self): return self.json()['applicationId']
@lazyproperty
def environmentId(self): return self.json()['environmentId']
@lazyproperty
def submodules(self):
# TODO: Public api hack.
# Private returns 'submodules', public returns 'components'
if router.public_api_in_use:
return InstanceList(list_json_method=lambda: self.json()['components'], organization=self.organization)
return InstanceList(list_json_method=lambda: self.json()['submodules'], organization=self.organization)
@property
def status(self): return self.json()['status']
@property
def name(self): return self.json()['name']
@property
def userData(self): return self.json()['userData']
def __parse(self, values):
return {val['id']: val['value'] for val in values}
@property
def return_values(self):
""" Guess what api we are using and return as public api does.
Private has {'id':'key', 'value':'keyvalue'} format, public has {'key':'keyvalue'}
"""
# TODO: Public api hack.
retvals = self.json()['returnValues']
if router.public_api_in_use:
return retvals
return self.__parse(retvals)
@property
def error(self): return self.json()['errorMessage']
@property
def activitylog(self):
return self.get_activitylog()
def get_activitylog(self, after=None, severity=None, start=None, end=None):
"""
Returns activitylog object
severity - filter severity ('INFO', DEBUG')
start/end - time or log text
"""
if after:
log = router.get_instance_activitylog(org_id=self.organizationId, instance_id=self.instanceId, timestamp=after).json()
log = router.get_instance_activitylog(org_id=self.organizationId, instance_id=self.instanceId).json()
return activityLog(log, severity=severity, start=start, end=end)
#aliases
returnValues = return_values
errorMessage = error
@property
def parameters(self):
ins = self.json()
# TODO: Public api hack.
# We do not have 'revision' in public api
if router.public_api_in_use:
return self.json()['parameters']
return self.json()['revision']['parameters']
def __getattr__(self, key):
if key in ['instanceId',]:
raise exceptions.NotFoundError('Unable to get instance property: %s' % key)
if key == 'ready':
log.debug('Checking instance status')
return self.ready()
else:
log.debug('Getting instance attribute: %s' % key)
atr = self.json()[key]
log.debug(atr)
return atr
def _cache_free(self):
"""Frees cache"""
self.__cached_json = None
def fresh(self):
#todo: create decorator from this
if self.__cached_json is None:
return False
now = time.time()
elapsed = (now - self.__last_read_time) * 1000.0
return elapsed < 300
def json(self):
'''
return __cached_json, if accessed withing 300 ms.
This allows to optimize calls when many parameters of entity requires withing short time.
'''
if self.fresh():
return self.__cached_json
self.__last_read_time = time.time()
self.__cached_json = router.get_instance(org_id=self.organizationId, instance_id=self.instanceId).json()
return self.__cached_json
@staticmethod
def new(application, revision=None, environment=None, name=None, parameters=None, submodules=None, destroyInterval=None):
if not environment:
environment = application.organization.defaultEnvironment
if not parameters: parameters = {}
conf = {}
conf['parameters'] = parameters
conf['environmentId'] = environment.environmentId
if name:
conf['instanceName'] = name
if destroyInterval:
conf['destroyInterval'] = destroyInterval
if revision:
conf['revisionId'] = revision.id
conf['submodules'] = submodules or {}
log.info("Starting instance: %s\n Application: %s (%s)\n Environment: %s (%s)\n Submodules: %s\n destroyInterval: %s" %
(name,
application.name, application.applicationId,
environment.name, environment.environmentId,
submodules, destroyInterval))
log.debug("Instance configuration: %s" % conf)
data = json.dumps(conf)
before_creation = time.gmtime(time.time())
resp = router.post_organization_instance(org_id=application.organizationId, app_id=application.applicationId, data=data)
instance = Instance(organization=application.organization, id=resp.json()['id'])
instance._last_workflow_started_time = before_creation
log.debug("Instance %s (%s) started." % (instance.name, instance.id))
return instance
def ready(self, timeout=3): # Shortcut for convinience. Timeout = 3 min (ask timeout*6 times every 10 sec)
return waitForStatus(instance=self, final='Running', accepted=['Launching', 'Requested', 'Executing', 'Unknown'], timeout=[timeout*20, 3, 1])
# TODO: Unknown status should be removed
def running(self, timeout=3):
if self.status == 'Running':
log.debug("Instance {} is Running right now".format(self.id))
return True
mrut = self.most_recent_update_time
if mrut:
self._last_workflow_started_time = time.gmtime(time.mktime(mrut) - 1) # skips projection check
return self.ready(timeout)
def destroyed(self, timeout=3): # Shortcut for convinience. Temeout = 3 min (ask timeout*6 times every 10 sec)
return waitForStatus(instance=self, final='Destroyed', accepted=['Destroying', 'Running', 'Executing'], timeout=[timeout*20, 3, 1])
def run_workflow(self, name, parameters=None):
if not parameters: parameters = {}
log.info("Running workflow %s on instance %s (%s)" % (name, self.name, self.id))
log.debug("Parameters: %s" % parameters)
self._last_workflow_started_time = time.gmtime(time.time())
router.post_instance_workflow(org_id=self.organizationId, instance_id=self.instanceId, wf_name=name, data=json.dumps(parameters))
return True
#alias
run_command = run_workflow
def schedule_workflow(self, name, timestamp, parameters=None):
if not parameters: parameters = {}
log.info("Scheduling workflow %s on instance %s (%s), timestamp: %s" % (name, self.name, self.id, timestamp))
log.debug("Parameters: %s" % parameters)
payload = {'parameters': parameters, 'timestamp':timestamp}
router.post_instance_workflow_schedule(org_id=self.organizationId, instance_id=self.instanceId, wf_name=name, data=json.dumps(payload))
return True
def reschedule_workflow(self, workflow_id, timestamp):
log.info("ReScheduling workflow %s on instance %s (%s), timestamp: %s" % (workflow_id, self.name, self.id, timestamp))
payload = {'timestamp':timestamp}
router.post_instance_reschedule(org_id=self.organizationId, instance_id=self.instanceId, workflow_id=workflow_id, data=json.dumps(payload))
return True
def get_manifest(self):
return router.post_application_refresh(org_id=self.organizationId, app_id=self.applicationId).json()
def reconfigure(self, revision=None, parameters=None, submodules=None):
#note: be carefull refactoring this, or you might have unpredictable results
#todo: private api seems requires at least presence of submodule names if exist
payload = {}
payload['parameters'] = self.parameters
if revision:
payload['revisionId'] = revision.id
if submodules:
payload['submodules'] = submodules
if parameters is not None:
payload['parameters'] = parameters
resp = router.put_instance_configuration(org_id=self.organizationId, instance_id=self.instanceId, data=json.dumps(payload))
return resp.json()
def rename(self, name):
payload = json.dumps({'instanceName': name})
return router.put_instance_configuration(org_id=self.organizationId, instance_id=self.instanceId, data=payload)
def force_remove(self):
return router.delete_instance_force(org_id=self.organizationId, instance_id=self.instanceId)
def cancel_command(self):
return router.post_instance_action(org_id=self.organizationId, instance_id=self.instanceId, action="cancel")
def star(self):
return router.post_instance_action(org_id=self.organizationId, instance_id=self.instanceId, action="star")
def unstar(self):
return router.post_instance_action(org_id=self.organizationId, instance_id=self.instanceId, action="unstar")
def delete(self):
self.destroy()
#todo: remove, if destroyed
return True
def destroy(self):
log.info("Destroying instance %s (%s)" % (self.name, self.id))
return self.run_workflow("destroy")
@property
def serve_environments(self):
return EnvironmentList(lambda: self.json()["environments"], organization=self.organization)
def add_as_service(self, environments=None, environment_ids=None):
if not environments or environment_ids:
# Use default if not set
environments = [self.environment,]
if environments:
data = [env.environmentId for env in environments]
else:
assert isinstance(environment_ids, list)
data = environment_ids
router.post_instance_services(org_id=self.organizationId, instance_id=self.instanceId, data=json.dumps(data))
def remove_as_service(self, environments=None):
if not environments:
# Use default if not set
environments = [self.environment,]
for env in environments:
env.remove_service(self)
@property
def serviceId(self):
raise AttributeError("Service is instance reference now, use instanceId")
@property
def most_recent_update_time(self):
"""
Indicated most recent update of the instance, assumption based on:
- if currentWorkflow exists, its startedAt time is most recent update.
- else max of workflowHistory startedAt is most recent update.
"""
parse_time = lambda t: time.gmtime(t/1000)
j = self.json()
try:
if j['currentWorkflow']:
cw_started_at = j['currentWorkflow']['startedAt']
if cw_started_at:
return parse_time(cw_started_at)
max_wf_started_at = max([i['startedAt'] for i in j['workflowHistory']])
return parse_time(max_wf_started_at)
except ValueError:
return None
def _is_projection_updated_instance(self):
"""
This method tries to guess if instance was update since last time.
If return True, definitely Yes, if False, this means more unknown
:return: bool
"""
last = self._last_workflow_started_time
if not router.public_api_in_use:
most_recent = self.most_recent_update_time
else:
most_recent = None
if last and most_recent:
return last < most_recent
return False # can be more clever
class InstanceList(QubellEntityList):
base_clz = Instance
class activityLog(object):
TYPES=['status updated', 'signals updated', 'dynamic links updated', 'command started', 'command finished', 'workflow started', 'workflow finished', 'step started', 'step finished']
log=[]
def __init__(self, log, severity=None, start=None, end=None):
def sort(log):
return sorted(log, key=lambda x: x['time'], reverse=False)
self.log = sort(log)
self.severity = severity
if severity:
self.log = [x for x in self.log if x['severity'] in severity]
if start:
self.log = [x for x in self.log if x['time']>=start]
if end:
self.log = [x for x in self.log if x['time']<=end]
def __len__(self):
return len(self.log)
def __iter__(self):
for i in self.log:
yield i
def __str__(self):
text = 'Severity: %s' % self.severity or 'ALL'
for x in self.log:
try:
text += '\n{0}: {1}: {2}'.format(x['time'], x['eventTypeText'], x['description'].replace('\n', '\n\t\t'))
except KeyError:
text += '\n{0}: {2}'.format(x['time'], x['description'].replace('\n', '\n\t\t'))
return text
def __contains__(self, item):
return True if self.find(item) else False
def __getitem__(self, item):
"""
Guess what item to return: time, index or description
log[0] will return first entry
log[1402654329064] will return description of event with tis time
log['Status is Running'] will return time of event, if found.
"""
if isinstance(item, int):
if item>1000000000000:
return ['{0}: {1}'.format(x['eventTypeText'], x['description']) for x in self.log if x['time']==item][0]
return '{0}: {1}'.format(self.log[item]['eventTypeText'], self.log[item]['description'])
elif isinstance(item, str):
return self.find(item)[0]
return False
def find(self, item, description='', event_type=''):
""" Find regexp in activitylog
find record as if type are in description.
#TODO: should be refactored, dumb logic
"""
if ': ' in item:
splited = item.split(': ', 1)
if splited[0] in self.TYPES:
description = item.split(': ')[1]
event_type = item.split(': ')[0]
else:
description = item
else:
if not description:
description = item
if event_type:
found = [x['time'] for x in self.log if re.search(description, x['description']) and x['eventTypeText']==event_type]
else:
found = [x['time'] for x in self.log if re.search(description, x['description'])]
return found if len(found) else None
def get_interval(self, start_text=None, end_text=None):
if start_text:
begin = self.find(start_text)
interval = activityLog(self.log, self.severity, start=begin[0])
else:
interval = self
if end_text:
end = interval.find(end_text)
interval = activityLog(interval, self.severity, end=end[0])
if len(interval):
return interval
raise exceptions.NotFoundError('Activitylog interval not found: [%s , %s]' % (start_text, end_text))
|
|
__author__ = 'MegabytePhreak'
import types
def _indent(level):
if level > 0:
return ' '*level
return ''
class AstNode(object):
pass
def pprint(self, level=0):
pass
def __str__(self):
return self.pprint(0)
class Subscript(AstNode):
def __init__(self, name, index):
self.name = name
self.index = index
def pprint(self, level=0):
return "Subscript('%s', %d)" % (self.name, self.index)
class EnumDef(AstNode):
def __init__(self, name, encodings):
self.name = name
self.encodings = encodings
def pprint(self, level=0):
strs = ["EnumDef( '%s', [\n" % self.name]
for encoding in self.encodings:
strs += [_indent(level+2), '%s,\n' % encoding.pprint(abs(level)+2)]
strs += _indent(level+1) + '])'
return ''.join(strs)
class EnumEncoding(AstNode):
def __init__(self, mnemonic, value, properties):
self.mnemonic = mnemonic
self.value = value
self.properties = properties
def pprint(self, level=0):
strs = ["EnumEncoding( '%s', %s, [" % (self.mnemonic, self.value)]
if len(self.properties) > 0:
strs += '\n'
for prop in self. properties:
strs += [_indent(level+2), '%s,\n' % prop.pprint(level+2)]
strs += _indent(level+1) + '])'
else:
strs += '])'
return ''.join(strs)
class PropAssign(AstNode):
def __init__(self, name, value, set_default=False):
self.name = name
self.value = value
self.set_default = set_default
def pprint(self, level=0):
value = self.value
if hasattr(self.value, 'pprint'):
value = value.pprint(level+1)
elif isinstance(value, types.StringTypes):
value = (value[:57] + '...') if len(value) > 60 else value
value = "'" + value.encode('string-escape') + "'"
if isinstance(self.name, types.StringTypes):
return "PropAssign('%s', %s)" % (self.name, value)
else:
return "PropAssign(%s, %s)" % (self.name, value)
class IntrPropAssign(AstNode):
def __init__(self, name, value, modifers):
self.name = name
self.value = value
self.modifiers = modifers
def pprint(self, level=0):
return "IntrPropAssign(%s, %s, %s)" % (self.name, self.value, self.modifiers)
class InstanceRef(AstNode):
def __init__(self, path, prop = None):
self.path = path
self.prop = prop
def add_child_ref(self, child):
self.path.append(child)
def set_prop(self, prop):
self.prop = prop
def pprint(self, level=0):
strs = ['InstanceRef([']
for i, elem in enumerate(self.path):
if hasattr(elem, 'pprint'):
strs += [elem.pprint(level+1), ', ']
else:
strs += ["'%s', " % elem]
strs[-1] = strs[-1][:-2]
strs += ['],']
if self.prop is not None:
strs += ", '%s')" % self.prop
else:
strs += ')'
return ''.join(strs)
class AccessType(AstNode):
def __init__(self, value):
if value is None:
value = 'rw'
if value not in ['rw', 'wr', 'r', 'w', 'na']:
raise SyntaxError("Illegal AccessType value '%s'" % value)
if value == 'wr':
value = 'rw'
self.value = value
def pprint(self, level=0):
return 'AccessType(%s)' % repr(self.value)
class AddressingType(AstNode):
def __init__(self, value):
if value is None:
value = 'regalign'
if value not in ['compact', 'regalign', 'fullalign']:
raise SyntaxError("Ilegal AddressingType value '%s'" % value)
self.value = value
def pprint(self, level=0):
return 'AddressingType(%s)' % repr(self.value)
class PrecedenceType(AstNode):
def __init__(self, value):
if value is None:
value = 'sw'
if value not in ['hw', 'sw']:
raise SyntaxError("Ilegal PrecedenceType value '%s'" % value)
self.value = value
def pprint(self, level=0):
return 'PrecedenceType(%s)' % repr(self.value)
class CompDef(AstNode):
def __init__(self, ctype, name, elems):
self.type = ctype
self.name = name
self.elems = elems
def pprint(self, level=0):
strs = ["CompDef( '%s', '%s', [\n" % (self.type, self.name)]
for encoding in self.elems:
strs += [_indent(level+1), '%s,\n' % encoding.pprint(abs(level)+1)]
strs += _indent(level+1) + '])'
return ''.join(strs)
class InstParams(AstNode):
def __init__(self, name, array_params, reset_value, addr_alloc):
self.name = name
self.array_params = array_params
self.reset_value = reset_value
self.addr_alloc = addr_alloc
def pprint(self, level=0):
return "InstParams('%s', %s, %s, %s)" % (self.name, self.array_params, self.reset_value, self.addr_alloc)
class CompInst(AstNode):
def __init__(self, compname, instances, location=None):
self.compname = compname
self.instances = instances
self.location = location
def pprint(self, level=0):
strs = ["CompInst( '%s', [" % (self.compname,)]
if len(self.instances) > 1:
strs += '\n'
for instance in self.instances:
strs += [_indent(level+2), '%s,\n' % instance.pprint(abs(level)+2)]
strs += _indent(level+1) + '],\n'
strs += _indent(level+1) + '%s)' % repr(self.location)
else:
if len(self.instances) == 1:
strs += ['%s], ' % self.instances[0].pprint(abs(level)+1)]
else:
strs += '], '
strs += '%s)' % repr(self.location)
return ''.join(strs)
class AnonCompInst(AstNode):
def __init__(self, comptype, compelems, instances, location=None):
self.comp = CompDef(comptype, None, compelems)
self.instances = instances
self.location = location
def pprint(self, level=0):
strs = ["AnonCompInst( '%s', [\n" % (self.comp.type,)]
for encoding in self.comp.elems:
strs += [_indent(level+1), '%s,\n' % encoding.pprint(abs(level)+1)]
strs += _indent(level+1) + '], ['
if len(self.instances) > 1:
strs += '\n'
for instance in self.instances:
strs += [_indent(level+2), '%s,\n' % instance.pprint(abs(level)+2)]
strs += _indent(level+1) + '],\n'
strs += _indent(level+1) + '%s)' % repr(self.location)
else:
if len(self.instances) == 1:
strs += ['%s], ' % self.instances[0].pprint(abs(level)+1)]
else:
strs += '], '
strs += '%s)' % repr(self.location)
return ''.join(strs)
|
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""Routines to generate WSGI responses"""
############################################################
## Headers
############################################################
import warnings
class HeaderDict(dict):
"""
This represents response headers. It handles the headers as a
dictionary, with case-insensitive keys.
Also there is an ``.add(key, value)`` method, which sets the key,
or adds the value to the current value (turning it into a list if
necessary).
For passing to WSGI there is a ``.headeritems()`` method which is
like ``.items()`` but unpacks value that are lists. It also
handles encoding -- all headers are encoded in ASCII (if they are
unicode).
@@: Should that encoding be ISO-8859-1 or UTF-8? I'm not sure
what the spec says.
"""
def __getitem__(self, key):
return dict.__getitem__(self, self.normalize(key))
def __setitem__(self, key, value):
dict.__setitem__(self, self.normalize(key), value)
def __delitem__(self, key):
dict.__delitem__(self, self.normalize(key))
def __contains__(self, key):
return dict.__contains__(self, self.normalize(key))
has_key = __contains__
def get(self, key, failobj=None):
return dict.get(self, self.normalize(key), failobj)
def setdefault(self, key, failobj=None):
return dict.setdefault(self, self.normalize(key), failobj)
def pop(self, key, *args):
return dict.pop(self, self.normalize(key), *args)
def update(self, other):
for key in other:
self[self.normalize(key)] = other[key]
def normalize(self, key):
return str(key).lower().strip()
def add(self, key, value):
key = self.normalize(key)
if key in self:
if isinstance(self[key], list):
self[key].append(value)
else:
self[key] = [self[key], value]
else:
self[key] = value
def headeritems(self):
result = []
for key, value in self.items():
if isinstance(value, list):
for v in value:
result.append((key, str(v)))
else:
result.append((key, str(value)))
return result
#@classmethod
def fromlist(cls, seq):
self = cls()
for name, value in seq:
self.add(name, value)
return self
fromlist = classmethod(fromlist)
def has_header(headers, name):
"""
Is header named ``name`` present in headers?
"""
name = name.lower()
for header, value in headers:
if header.lower() == name:
return True
return False
def header_value(headers, name):
"""
Returns the header's value, or None if no such header. If a
header appears more than once, all the values of the headers
are joined with ','. Note that this is consistent /w RFC 2616
section 4.2 which states:
It MUST be possible to combine the multiple header fields
into one "field-name: field-value" pair, without changing
the semantics of the message, by appending each subsequent
field-value to the first, each separated by a comma.
However, note that the original netscape usage of 'Set-Cookie',
especially in MSIE which contains an 'expires' date will is not
compatible with this particular concatination method.
"""
name = name.lower()
result = [value for header, value in headers
if header.lower() == name]
if result:
return ','.join(result)
else:
return None
def remove_header(headers, name):
"""
Removes the named header from the list of headers. Returns the
value of that header, or None if no header found. If multiple
headers are found, only the last one is returned.
"""
name = name.lower()
i = 0
result = None
while i < len(headers):
if headers[i][0].lower() == name:
result = headers[i][1]
del headers[i]
continue
i += 1
return result
def replace_header(headers, name, value):
"""
Updates the headers replacing the first occurance of the given name
with the value provided; asserting that no further occurances
happen. Note that this is _not_ the same as remove_header and then
append, as two distinct operations (del followed by an append) are
not atomic in a threaded environment. Returns the previous header
value for the provided name, if any. Clearly one should not use
this function with ``set-cookie`` or other names that may have more
than one occurance in the headers.
"""
name = name.lower()
i = 0
result = None
while i < len(headers):
if headers[i][0].lower() == name:
assert not result, "two values for the header '%s' found" % name
result = headers[i][1]
headers[i] = (name, value)
i += 1
if not result:
headers.append((name, value))
return result
############################################################
## Deprecated methods
############################################################
def error_body_response(error_code, message, __warn=True):
"""
Returns a standard HTML response page for an HTTP error.
**Note:** Deprecated
"""
if __warn:
warnings.warn(
'wsgilib.error_body_response is deprecated; use the '
'wsgi_application method on an HTTPException object '
'instead', DeprecationWarning, 2)
return '''\
<html>
<head>
<title>%(error_code)s</title>
</head>
<body>
<h1>%(error_code)s</h1>
%(message)s
</body>
</html>''' % {
'error_code': error_code,
'message': message,
}
def error_response(environ, error_code, message,
debug_message=None, __warn=True):
"""
Returns the status, headers, and body of an error response.
Use like:
.. code-block:: python
status, headers, body = wsgilib.error_response(
'301 Moved Permanently', 'Moved to <a href="%s">%s</a>'
% (url, url))
start_response(status, headers)
return [body]
**Note:** Deprecated
"""
if __warn:
warnings.warn(
'wsgilib.error_response is deprecated; use the '
'wsgi_application method on an HTTPException object '
'instead', DeprecationWarning, 2)
if debug_message and environ.get('paste.config', {}).get('debug'):
message += '\n\n<!-- %s -->' % debug_message
body = error_body_response(error_code, message, __warn=False)
headers = [('content-type', 'text/html'),
('content-length', str(len(body)))]
return error_code, headers, body
def error_response_app(error_code, message, debug_message=None,
__warn=True):
"""
An application that emits the given error response.
**Note:** Deprecated
"""
if __warn:
warnings.warn(
'wsgilib.error_response_app is deprecated; use the '
'wsgi_application method on an HTTPException object '
'instead', DeprecationWarning, 2)
def application(environ, start_response):
status, headers, body = error_response(
environ, error_code, message,
debug_message=debug_message, __warn=False)
start_response(status, headers)
return [body]
return application
|
|
from aiolocust.core import HttpLocust, Locust, TaskSet, task, events
from aiolocust import ResponseError, InterruptTaskSet
from aiolocust.exception import CatchResponseError, RescheduleTask, RescheduleTaskImmediately, LocustError
from aiolocust.test.testcases import LocustTestCase, WebserverTestCase
class TestTaskSet(LocustTestCase):
def setUp(self):
super(TestTaskSet, self).setUp()
class User(Locust):
host = "127.0.0.1"
self.locust = User()
def test_task_ratio(self):
t1 = lambda l: None
t2 = lambda l: None
class MyTasks(TaskSet):
tasks = {t1:5, t2:2}
l = MyTasks(self.locust)
t1_count = len([t for t in l.tasks if t == t1])
t2_count = len([t for t in l.tasks if t == t2])
self.assertEqual(t1_count, 5)
self.assertEqual(t2_count, 2)
def test_task_decorator_ratio(self):
t1 = lambda l: None
t2 = lambda l: None
class MyTasks(TaskSet):
tasks = {t1:5, t2:2}
host = ""
@task(3)
def t3(self):
pass
@task(13)
def t4(self):
pass
l = MyTasks(self.locust)
t1_count = len([t for t in l.tasks if t == t1])
t2_count = len([t for t in l.tasks if t == t2])
t3_count = len([t for t in l.tasks if t.__name__ == MyTasks.t3.__name__])
t4_count = len([t for t in l.tasks if t.__name__ == MyTasks.t4.__name__])
self.assertEqual(t1_count, 5)
self.assertEqual(t2_count, 2)
self.assertEqual(t3_count, 3)
self.assertEqual(t4_count, 13)
def test_on_start(self):
class MyTasks(TaskSet):
t1_executed = False
t2_executed = False
def on_start(self):
self.t1()
def t1(self):
self.t1_executed = True
@task
def t2(self):
self.t2_executed = True
raise InterruptTaskSet(reschedule=False)
l = MyTasks(self.locust)
self.assertRaises(RescheduleTask, lambda: l.run())
self.assertTrue(l.t1_executed)
self.assertTrue(l.t2_executed)
def test_schedule_task(self):
self.t1_executed = False
self.t2_arg = None
def t1(l):
self.t1_executed = True
def t2(l, arg):
self.t2_arg = arg
class MyTasks(TaskSet):
tasks = [t1, t2]
taskset = MyTasks(self.locust)
taskset.schedule_task(t1)
taskset.execute_next_task()
self.assertTrue(self.t1_executed)
taskset.schedule_task(t2, args=["argument to t2"])
taskset.execute_next_task()
self.assertEqual("argument to t2", self.t2_arg)
def test_schedule_task_with_kwargs(self):
class MyTasks(TaskSet):
@task
def t1(self):
self.t1_executed = True
@task
def t2(self, *args, **kwargs):
self.t2_args = args
self.t2_kwargs = kwargs
loc = MyTasks(self.locust)
loc.schedule_task(loc.t2, [42], {"test_kw":"hello"})
loc.execute_next_task()
self.assertEqual((42, ), loc.t2_args)
self.assertEqual({"test_kw":"hello"}, loc.t2_kwargs)
loc.schedule_task(loc.t2, args=[10, 4], kwargs={"arg1":1, "arg2":2})
loc.execute_next_task()
self.assertEqual((10, 4), loc.t2_args)
self.assertEqual({"arg1":1, "arg2":2}, loc.t2_kwargs)
def test_schedule_task_bound_method(self):
class MyTasks(TaskSet):
host = ""
@task()
def t1(self):
self.t1_executed = True
self.schedule_task(self.t2)
def t2(self):
self.t2_executed = True
taskset = MyTasks(self.locust)
taskset.schedule_task(taskset.get_next_task())
taskset.execute_next_task()
self.assertTrue(taskset.t1_executed)
taskset.execute_next_task()
self.assertTrue(taskset.t2_executed)
def test_taskset_inheritance(self):
def t1(l):
pass
class MyBaseTaskSet(TaskSet):
tasks = [t1]
host = ""
class MySubTaskSet(MyBaseTaskSet):
@task
def t2(self):
pass
l = MySubTaskSet(self.locust)
self.assertEqual(2, len(l.tasks))
self.assertEqual([t1, MySubTaskSet.t2], l.tasks)
def test_task_decorator_with_or_without_argument(self):
class MyTaskSet(TaskSet):
@task
def t1(self):
pass
taskset = MyTaskSet(self.locust)
self.assertEqual(len(taskset.tasks), 1)
class MyTaskSet2(TaskSet):
@task()
def t1(self):
pass
taskset = MyTaskSet2(self.locust)
self.assertEqual(len(taskset.tasks), 1)
class MyTaskSet3(TaskSet):
@task(3)
def t1(self):
pass
taskset = MyTaskSet3(self.locust)
self.assertEqual(len(taskset.tasks), 3)
def test_sub_taskset(self):
class MySubTaskSet(TaskSet):
min_wait=1
max_wait=1
@task()
def a_task(self):
self.locust.sub_locust_task_executed = True
self.interrupt()
class MyTaskSet(TaskSet):
tasks = [MySubTaskSet]
self.sub_locust_task_executed = False
loc = MyTaskSet(self.locust)
loc.schedule_task(loc.get_next_task())
self.assertRaises(RescheduleTaskImmediately, lambda: loc.execute_next_task())
self.assertTrue(self.locust.sub_locust_task_executed)
def test_sub_taskset_tasks_decorator(self):
class MyTaskSet(TaskSet):
@task
class MySubTaskSet(TaskSet):
min_wait=1
max_wait=1
@task()
def a_task(self):
self.locust.sub_locust_task_executed = True
self.interrupt()
self.sub_locust_task_executed = False
loc = MyTaskSet(self.locust)
loc.schedule_task(loc.get_next_task())
self.assertRaises(RescheduleTaskImmediately, lambda: loc.execute_next_task())
self.assertTrue(self.locust.sub_locust_task_executed)
def test_sub_taskset_arguments(self):
class MySubTaskSet(TaskSet):
min_wait=1
max_wait=1
@task()
def a_task(self):
self.locust.sub_taskset_args = self.args
self.locust.sub_taskset_kwargs = self.kwargs
self.interrupt()
class MyTaskSet(TaskSet):
sub_locust_args = None
sub_locust_kwargs = None
tasks = [MySubTaskSet]
self.locust.sub_taskset_args = None
self.locust.sub_taskset_kwargs = None
loc = MyTaskSet(self.locust)
loc.schedule_task(MySubTaskSet, args=[1,2,3], kwargs={"hello":"world"})
self.assertRaises(RescheduleTaskImmediately, lambda: loc.execute_next_task())
self.assertEqual((1,2,3), self.locust.sub_taskset_args)
self.assertEqual({"hello":"world"}, self.locust.sub_taskset_kwargs)
def test_interrupt_taskset_in_main_taskset(self):
class MyTaskSet(TaskSet):
@task
def interrupted_task(self):
raise InterruptTaskSet(reschedule=False)
class MyLocust(Locust):
host = "http://127.0.0.1"
task_set = MyTaskSet
class MyTaskSet2(TaskSet):
@task
def interrupted_task(self):
self.interrupt()
class MyLocust2(Locust):
host = "http://127.0.0.1"
task_set = MyTaskSet2
l = MyLocust()
l2 = MyLocust2()
self.assertRaises(LocustError, lambda: l.run())
self.assertRaises(LocustError, lambda: l2.run())
try:
l.run()
except LocustError as e:
self.assertTrue("MyLocust" in e.args[0], "MyLocust should have been referred to in the exception message")
self.assertTrue("MyTaskSet" in e.args[0], "MyTaskSet should have been referred to in the exception message")
except:
raise
try:
l2.run()
except LocustError as e:
self.assertTrue("MyLocust2" in e.args[0], "MyLocust2 should have been referred to in the exception message")
self.assertTrue("MyTaskSet2" in e.args[0], "MyTaskSet2 should have been referred to in the exception message")
except:
raise
def test_on_start_interrupt(self):
class SubTaskSet(TaskSet):
def on_start(self):
if self.kwargs["reschedule"]:
self.interrupt(reschedule=True)
else:
self.interrupt(reschedule=False)
class MyLocust(Locust):
host = ""
task_set = SubTaskSet
l = MyLocust()
task_set = SubTaskSet(l)
self.assertRaises(RescheduleTaskImmediately, lambda: task_set.run(reschedule=True))
self.assertRaises(RescheduleTask, lambda: task_set.run(reschedule=False))
def test_parent_attribute(self):
from aiolocust.exception import StopLocust
parents = {}
class SubTaskSet(TaskSet):
def on_start(self):
parents["sub"] = self.parent
@task
class SubSubTaskSet(TaskSet):
def on_start(self):
parents["subsub"] = self.parent
@task
def stop(self):
raise StopLocust()
class RootTaskSet(TaskSet):
tasks = [SubTaskSet]
class MyLocust(Locust):
host = ""
task_set = RootTaskSet
l = MyLocust()
l.run()
self.assertTrue(isinstance(parents["sub"], RootTaskSet))
self.assertTrue(isinstance(parents["subsub"], SubTaskSet))
class TestWebLocustClass(WebserverTestCase):
def test_get_request(self):
self.response = ""
def t1(l):
self.response = l.client.get("/ultra_fast")
class MyLocust(HttpLocust):
tasks = [t1]
host = "http://127.0.0.1:%i" % self.port
my_locust = MyLocust()
t1(my_locust)
self.assertEqual(self.response.text, "This is an ultra fast response")
def test_client_request_headers(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("hello", locust.client.get("/request_header_test", headers={"X-Header-Test":"hello"}).text)
def test_client_get(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("GET", locust.client.get("/request_method").text)
def test_client_get_absolute_url(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("GET", locust.client.get("http://127.0.0.1:%i/request_method" % self.port).text)
def test_client_post(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("POST", locust.client.post("/request_method", {"arg":"hello world"}).text)
self.assertEqual("hello world", locust.client.post("/post", {"arg":"hello world"}).text)
def test_client_put(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("PUT", locust.client.put("/request_method", {"arg":"hello world"}).text)
self.assertEqual("hello world", locust.client.put("/put", {"arg":"hello world"}).text)
def test_client_delete(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual("DELETE", locust.client.delete("/request_method").text)
self.assertEqual(200, locust.client.delete("/request_method").status_code)
def test_client_head(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
self.assertEqual(200, locust.client.head("/request_method").status_code)
def test_client_basic_auth(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
class MyAuthorizedLocust(HttpLocust):
host = "http://locust:menace@127.0.0.1:%i" % self.port
class MyUnauthorizedLocust(HttpLocust):
host = "http://locust:wrong@127.0.0.1:%i" % self.port
locust = MyLocust()
unauthorized = MyUnauthorizedLocust()
authorized = MyAuthorizedLocust()
response = authorized.client.get("/basic_auth")
self.assertEqual(200, response.status_code)
self.assertEqual("Authorized", response.text)
self.assertEqual(401, locust.client.get("/basic_auth").status_code)
self.assertEqual(401, unauthorized.client.get("/basic_auth").status_code)
def test_log_request_name_argument(self):
from aiolocust.stats import global_stats
self.response = ""
class MyLocust(HttpLocust):
tasks = []
host = "http://127.0.0.1:%i" % self.port
@task()
def t1(l):
self.response = l.client.get("/ultra_fast", name="new name!")
my_locust = MyLocust()
my_locust.t1()
self.assertEqual(1, global_stats.get("new name!", "GET").num_requests)
self.assertEqual(0, global_stats.get("/ultra_fast", "GET").num_requests)
def test_locust_client_error(self):
class MyTaskSet(TaskSet):
@task
def t1(self):
self.client.get("/")
self.interrupt()
class MyLocust(Locust):
host = "http://127.0.0.1:%i" % self.port
task_set = MyTaskSet
my_locust = MyLocust()
self.assertRaises(LocustError, lambda: my_locust.client.get("/"))
my_taskset = MyTaskSet(my_locust)
self.assertRaises(LocustError, lambda: my_taskset.client.get("/"))
def test_redirect_url_original_path_as_name(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
l = MyLocust()
l.client.get("/redirect")
from aiolocust.stats import global_stats
self.assertEqual(1, len(global_stats.entries))
self.assertEqual(1, global_stats.get("/redirect", "GET").num_requests)
self.assertEqual(0, global_stats.get("/ultra_fast", "GET").num_requests)
class TestCatchResponse(WebserverTestCase):
def setUp(self):
super(TestCatchResponse, self).setUp()
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
self.locust = MyLocust()
self.num_failures = 0
self.num_success = 0
def on_failure(request_type, name, response_time, exception):
self.num_failures += 1
self.last_failure_exception = exception
def on_success(**kwargs):
self.num_success += 1
events.request_failure += on_failure
events.request_success += on_success
def test_catch_response(self):
self.assertEqual(500, self.locust.client.get("/fail").status_code)
self.assertEqual(1, self.num_failures)
self.assertEqual(0, self.num_success)
with self.locust.client.get("/ultra_fast", catch_response=True) as response: pass
self.assertEqual(1, self.num_failures)
self.assertEqual(1, self.num_success)
with self.locust.client.get("/ultra_fast", catch_response=True) as response:
raise ResponseError("Not working")
self.assertEqual(2, self.num_failures)
self.assertEqual(1, self.num_success)
def test_catch_response_http_fail(self):
with self.locust.client.get("/fail", catch_response=True) as response: pass
self.assertEqual(1, self.num_failures)
self.assertEqual(0, self.num_success)
def test_catch_response_http_manual_fail(self):
with self.locust.client.get("/ultra_fast", catch_response=True) as response:
response.failure("Haha!")
self.assertEqual(1, self.num_failures)
self.assertEqual(0, self.num_success)
self.assertTrue(
isinstance(self.last_failure_exception, CatchResponseError),
"Failure event handler should have been passed a CatchResponseError instance"
)
def test_catch_response_http_manual_success(self):
with self.locust.client.get("/fail", catch_response=True) as response:
response.success()
self.assertEqual(0, self.num_failures)
self.assertEqual(1, self.num_success)
def test_catch_response_allow_404(self):
with self.locust.client.get("/does/not/exist", catch_response=True) as response:
self.assertEqual(404, response.status_code)
if response.status_code == 404:
response.success()
self.assertEqual(0, self.num_failures)
self.assertEqual(1, self.num_success)
def test_interrupt_taskset_with_catch_response(self):
class MyTaskSet(TaskSet):
@task
def interrupted_task(self):
with self.client.get("/ultra_fast", catch_response=True) as r:
raise InterruptTaskSet()
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
task_set = MyTaskSet
l = MyLocust()
ts = MyTaskSet(l)
self.assertRaises(InterruptTaskSet, lambda: ts.interrupted_task())
self.assertEqual(0, self.num_failures)
self.assertEqual(0, self.num_success)
def test_catch_response_connection_error_success(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:1"
l = MyLocust()
with l.client.get("/", catch_response=True) as r:
self.assertEqual(r.status_code, 0)
self.assertEqual(None, r.content)
r.success()
self.assertEqual(1, self.num_success)
self.assertEqual(0, self.num_failures)
def test_catch_response_connection_error_fail(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:1"
l = MyLocust()
with l.client.get("/", catch_response=True) as r:
self.assertEqual(r.status_code, 0)
self.assertEqual(None, r.content)
r.success()
self.assertEqual(1, self.num_success)
self.assertEqual(0, self.num_failures)
|
|
from musp import *
from math import pi, sin, cos
from random import random
import numpy as np
tonic_freq = PitchedSound.note_frequency("F#", 3)
tempo_dur = 2.6/7
datadir = os.path.expanduser("~/.mu-sp")
def aulib(sound_dir):
return os.path.join(datadir, "audio", sound_dir)
def rhlib(rh_name):
return os.path.join(datadir, "rhythm/an_egg_rh", rh_name + ".rh")
def loctrans(far, angle, mem=[0]):
mem[0] += pi*2/200
return Location((angle, mem[0]), far)
def halftones_for_scale_deg(degree):
semitones = [0, 2, 3, 5, 7, 8, 10][int(degree) - 1]
if degree % 1 == .5:
semitones += 1
return semitones
def deg_freq(degree):
octave_mult = 1
while degree > 7:
degree -= 7
octave_mult *= 2
return tonic_freq*octave_mult * PitchedSound.temper_ratio**halftones_for_scale_deg(degree)
def fundamental_rhythm(beat):
return beat.split([3, 3, 7, 3])
def apply_rhythm(beat, rhythm_file, key_sound_map):
with open(rhythm_file) as rf:
char_times = eval(''.join(rf.readline()))
beat_map = beat.interleave_split(char_times)
for key, beats in beat_map.iteritems():
for beat in beats:
try:
for sound, loc in key_sound_map[key]:
beat.attach(sound, loc)
except:
beat.attach(*key_sound_map[key])
crystal_sound = RandomPitchedSound()
crystal_sound.populate_with_dir(aulib("crystal_ding"))
def add_tracks_fromto(tracklist, listoftracks):
for track in tracklist:
listoftracks.append(track)
def crystal_sounding(beat):
beat.set_duration(tempo_dur*7)
part1 = "565765243"
crys1 = Track("crystals...", Sound.default_rate, padding=.5, end_padding=2)
crys1_root = crys1.link_root(beat)
with open(rhlib("crys_1_j")) as rf:
char_times = eval(''.join(rf.readline()))
beats = crys1_root.interleave_split(char_times)['j']
for b, deg in zip(beats, part1):
b.attach(SpreadSound(crystal_sound.for_pitch(4*deg_freq(int(deg))), (.1, .1, .1), .02, 3), loctrans(4, pi/2))
return [crys1]
def crystal_rise(beat):
beat.set_duration(tempo_dur*7)
part2 = "34567"
crys2 = Track("more crystals...", Sound.default_rate, padding=.5, end_padding=2)
crys2_root = crys2.link_root(beat)
beats = crys2_root.split_even(14)[9:]
for b, deg in zip(beats, part2):
b.attach(crystal_sound.for_pitch(2*deg_freq(int(deg))), loctrans(4, -pi/2))
return [crys2]
def crystal_complex(beat):
beat.set_duration(tempo_dur*14)
#part3 = "78765774554"*2
part3 = "17876577547187657232"
crys3 = Track("more (muy complicated) crystals...", Sound.default_rate, padding=.5, end_padding=2)
crys3_root = crys3.link_root(beat)
beats = crys3_root.split([1, .5, .5, 1, 1, 1, 2, #groups of 7
2, 1, 2, 2,
2, 2, 1, 1, 1,
3, 1, 1, 2])
for b, deg in zip(beats, part3):
deg = int(deg) + 8
b.attach(crystal_sound.for_pitch(deg_freq(int(deg)+4)), loctrans(4, -pi/6))
return [crys3]
def apply_each_half(beat, one_beat_function, firsthalf=True, secondhalf=True):
if not firsthalf and not secondhalf:
return
sometracks = []
try:
b1, b2 = beat.beats
except:
b1, b2 = beat.split_even(2)
if firsthalf:
add_tracks_fromto(one_beat_function(b1), sometracks)
if secondhalf:
add_tracks_fromto(one_beat_function(b2), sometracks)
return sometracks
def crystal_compiled_block(beat, levels):
level_funcs = [lambda b: apply_each_half(b, crystal_sounding, True, False),
lambda b: apply_each_half(b, crystal_sounding, False, True),
lambda b: apply_each_half(b, crystal_rise, False, True),
lambda b: apply_each_half(b, crystal_rise, True, False),
crystal_complex]
allthesetracks = []
for l in levels:
add_tracks_fromto(level_funcs[l](beat), allthesetracks)
return allthesetracks
bow_violin_sound = RandomPitchedSound()
bow_violin_sound.populate_with_dir(aulib("bowed_violin"))
pluck_violin_sound = RandomPitchedSound()
pluck_violin_sound.populate_with_dir(aulib("plucked_violin_ring"))
pluck_violin_sound.populate_with_dir(aulib("plucked_violin_damp"))
def vibrato_snd_for_beat_frac(beat, deg, f, distance, sound=bow_violin_sound, h=0):
# h is vibrato hertz
vibrato_f = lambda t: PitchedSound.temper_ratio**(.25/(1.0 + np.exp(-t * 3))*sin(t*h*(2*pi)))
beat.attach(ClippedSound(ResampledSound(sound.for_pitch(deg_freq(float(deg))), vibrato_f,
cache=False), tempo_dur*f), loctrans(distance, -pi/3))
def violin_pluck_chords(beat):
violin1 = Track("Violin me once!", Sound.default_rate)
violin_root = violin1.link_root(beat)
degrees = (1, 1, 1, 1, 1, 1)
durations = (1, 1, 2, 3, 5, 2)
distances = (4, 3, 2, 4, 2, 3)
for deg, dur, dist, b in zip(degrees, durations, distances, violin_root.split(durations)):
vibrato_snd_for_beat_frac(b, deg, dur, dist/5.0, sound=pluck_violin_sound, h=7)
violin2 = Track("Violin me twice!", Sound.default_rate)
violin_root = violin2.link_root(beat)
degrees = (5, 5, 5, 4, 4, 3)
durations = [d + .05 for d in (1, 1, 2, 3, 5, 2)]
distances = (3, 3.5, 3, 2, 2, 4)
for deg, dur, dist, b in zip(degrees, durations, distances, violin_root.split(durations)):
vibrato_snd_for_beat_frac(b, deg, dur + .1, dist/5.0, sound=pluck_violin_sound, h=7)
violin3 = Track("Violin me thrice!", Sound.default_rate)
violin_root = violin3.link_root(beat)
degrees = (7, 6, 7, 7, 6, 4)
durations = [d - .05 for d in (1, 1, 2, 3, 5, 2)]
distances = (4, 3.5, 4, 3, 4, 3.5)
for deg, dur, dist, b in zip(degrees, durations, distances, violin_root.split(durations)):
vibrato_snd_for_beat_frac(b, deg, dur + .1, dist/5.0, sound=pluck_violin_sound, h=7)
return [violin1, violin2, violin3]
werb_raw = RawPitchedSound(os.path.join(aulib("werb_sine"), "werb_sine.0.110.wav"))
werb_sounds = {}
def werb_for_beat_frac(beat, degree, duration, distance):
if degree not in werb_sounds:
werb_sounds[degree] = RandomIntervalSound(werb_raw.for_pitch(.49*deg_freq(degree)), margin=.01)
werb_sound = werb_sounds[degree]
beat.attach(werb_sound.for_interval(duration*tempo_dur), loctrans(distance, pi))
def werb_under(beat):
werb = Track("werbtrack", Sound.default_rate)
werb_root = werb.link_root(beat)
for b, d in zip(werb_root.split_even(4), (1, 2, 3, 4)):
werb_for_beat_frac(b, d, 14.0/4, .5)
return [werb]
random_mid_drum = RandomSound()
random_mid_drum.populate_with_dir(aulib("snares_off"))
mid_drum = SpreadSound(random_mid_drum, (.2, .2, 0), 0, 1)
def descending_snaresoff_tuple(beat, n):
beats = [beat] if n is 1 else beat.split_even(n)
for b, i in zip(beats, range(n, 0, -1)):
b.attach(mid_drum, loctrans(i + .2, pi*2/12*i))
def mid_drum_rhythm(beat):
drum = Track("Snares off please", Sound.default_rate)
drum_root = drum.link_root(beat)
one, two, three, four, five, six, seven = drum_root.split_even(7)
descending_snaresoff_tuple(one, 2)
descending_snaresoff_tuple(two, 1)
descending_snaresoff_tuple(three, 3)
descending_snaresoff_tuple(four, 4)
descending_snaresoff_tuple(five, 1)
descending_snaresoff_tuple(six, 6)
descending_snaresoff_tuple(seven, 1)
return [drum]
def create_main(beat):
trackbag = []
for levels, crystaltest in zip([(0, 1), (0, 1, 2), (0, 1, 2, 4), (0, 1, 2, 3, 4), (2, 3, 4)],
beat.split(5)):
add_tracks_fromto(crystal_compiled_block(crystaltest, levels), trackbag)
add_tracks_fromto(violin_pluck_chords(crystaltest), trackbag)
add_tracks_fromto(werb_under(crystaltest), trackbag)
add_tracks_fromto(apply_each_half(crystaltest, mid_drum_rhythm), trackbag)
return trackbag
mainbeat = Beat()
mix = Mixer("Let's make some art, I guess...!", Sound.default_rate, create_main(mainbeat))
mix.play(quick_play=False)
|
|
"""Processing and presentation of computed data
Result objects hold computed data and offer postprocessing and plotting functions
which are specifically adapted to the nature of the stored data.
"""
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
from . import pltutils
from .utils import with_defaults, x_pi
from .support.pickle import pickleable, save, load
from .support.structure import Positions, AbstractSites, Sites, Hoppings
__all__ = ['Bands', 'Eigenvalues', 'NDSweep', 'Series', 'SpatialMap', 'StructureMap',
'Sweep', 'make_path', 'save', 'load']
def _make_crop_indices(obj, limits):
"""Return the indices into `obj` which retain only the data within the given limits"""
idx = np.ones(obj.num_sites, dtype=np.bool)
for name, limit in limits.items():
v = getattr(obj, name)
idx = np.logical_and(idx, v >= limit[0])
idx = np.logical_and(idx, v < limit[1])
return idx
class Path(np.ndarray):
"""An ndarray which represents a path connecting certain points
Attributes
----------
point_indices : List[int]
Indices of the significant points along the path. Minimum 2: start and end.
"""
def __new__(cls, array, point_indices):
obj = np.asarray(array).view(cls)
assert len(point_indices) >= 2
obj.point_indices = point_indices
return obj
def __array_finalize__(self, obj):
if obj is None:
return
default_indices = [0, obj.shape[0] - 1] if len(obj.shape) >= 1 else []
self.point_indices = getattr(obj, 'point_indices', default_indices)
def __reduce__(self):
r = super().__reduce__()
state = r[2] + (self.point_indices,)
return r[0], r[1], state
# noinspection PyMethodOverriding,PyArgumentList
def __setstate__(self, state):
self.point_indices = state[-1]
super().__setstate__(state[:-1])
@property
def points(self):
"""Significant points along the path, including start and end"""
return self[self.point_indices]
@property
def is_simple(self):
"""Is it just a simple path between two points?"""
return len(self.point_indices) == 2
def as_1d(self):
"""Return a 1D representation of the path -- useful for plotting
For simple paths (2 points) the closest 1D path with real positions is returned.
Otherwise, an `np.arange(size)` is returned, where `size` matches the path. This doesn't
have any real meaning, but it's something that can be used as the x-axis in a line plot.
Examples
--------
>>> np.allclose(make_path(-2, 1, step=1).as_1d().T, [-2, -1, 0, 1])
True
>>> np.allclose(make_path([0, -2], [0, 1], step=1).as_1d().T, [-2, -1, 0, 1])
True
>>> np.allclose(make_path(1, -1, 4, step=1).as_1d().T, [0, 1, 2, 3, 4, 5, 6, 7])
True
"""
if self.is_simple:
if len(self.shape) == 1:
return self
else: # return the first axis with non-zero length
return self[:, np.flatnonzero(np.diff(self.points, axis=0))[0]]
else:
return np.arange(self.shape[0])
def plot(self, point_labels=None, **kwargs):
"""Quiver plot of the path
Parameters
----------
point_labels : List[str]
Labels for the :attr:`.Path.points`.
**kwargs
Forwarded to :func:`~matplotlib.pyplot.quiver`.
"""
ax = plt.gca()
ax.set_aspect('equal')
default_color = pltutils.get_palette('Set1')[1]
kwargs = with_defaults(kwargs, scale_units='xy', angles='xy', scale=1, zorder=2,
lw=1.5, color=default_color, edgecolor=default_color)
x, y = map(np.array, zip(*self.points))
plt.quiver(x[:-1], y[:-1], np.diff(x), np.diff(y), **kwargs)
ax.autoscale_view()
pltutils.add_margin(0.5)
pltutils.despine(trim=True)
if point_labels:
for k_point, label in zip(self.points, point_labels):
ha, va = pltutils.align(*(-k_point))
pltutils.annotate_box(label, k_point * 1.05, fontsize='large',
ha=ha, va=va, bbox=dict(lw=0))
def make_path(k0, k1, *ks, step=0.1):
"""Create a path which connects the given k points
Parameters
----------
k0, k1, *ks
Points in k-space to connect.
step : float
Length in k-space between two samples. Smaller step -> finer detail.
Examples
--------
>>> np.allclose(make_path(0, 3, -1, step=1).T, [0, 1, 2, 3, 2, 1, 0, -1])
True
>>> np.allclose(make_path([0, 0], [2, 3], [-1, 4], step=1.4),
... [[0, 0], [1, 1.5], [2, 3], [0.5, 3.5], [-1, 4]])
True
"""
k_points = [np.atleast_1d(k) for k in (k0, k1) + ks]
if not all(k.shape == k_points[0].shape for k in k_points[:1]):
raise RuntimeError("All k-points must have the same shape")
k_paths = []
point_indices = [0]
for k_start, k_end in zip(k_points[:-1], k_points[1:]):
num_steps = int(np.linalg.norm(k_end - k_start) // step)
# k_path.shape == num_steps, k_space_dimensions
k_path = np.array([np.linspace(s, e, num_steps, endpoint=False)
for s, e in zip(k_start, k_end)]).T
k_paths.append(k_path)
point_indices.append(point_indices[-1] + num_steps)
k_paths.append(k_points[-1])
return Path(np.vstack(k_paths), point_indices)
@pickleable
class Series:
"""A series of data points determined by a common relation, i.e. :math:`y = f(x)`
Attributes
----------
variable : array_like
Independent variable for which the data was computed.
data : array_like
An array of values which were computed as a function of `variable`.
It can be 1D or 2D. In the latter case each column represents the result
of a different function applied to the same `variable` input.
labels : dict
Plot labels: 'variable', 'data', 'title' and 'columns'.
"""
def __init__(self, variable, data, labels=None):
self.variable = np.atleast_1d(variable)
self.data = np.atleast_1d(data)
self.labels = with_defaults(labels, variable="x", data="y", columns="")
def with_data(self, data):
"""Return a copy of this result object with different data"""
result = copy(self)
result.data = data
return result
def reduced(self):
"""Return a copy where the data is summed over the columns
Only applies to results which may have multiple columns of data, e.g.
results for multiple orbitals for LDOS calculation.
"""
return self.with_data(self.data.sum(axis=1))
def plot(self, **kwargs):
"""Labeled line plot
Parameters
----------
**kwargs
Forwarded to `plt.plot()`.
"""
plt.plot(self.variable, self.data, **kwargs)
plt.xlim(self.variable.min(), self.variable.max())
plt.xlabel(self.labels["variable"])
plt.ylabel(self.labels["data"])
if "title" in self.labels:
plt.title(self.labels["title"])
pltutils.despine()
if self.data.ndim > 1:
labels = [str(i) for i in range(self.data.shape[-1])]
pltutils.legend(labels=labels, title=self.labels["columns"])
@pickleable
class SpatialMap:
"""Represents some spatially dependent property: data mapped to site positions"""
def __init__(self, data, positions, sublattices=None):
self._data = np.atleast_1d(data)
if sublattices is None and isinstance(positions, AbstractSites):
self._sites = positions
else:
self._sites = Sites(positions, sublattices)
if self.num_sites != data.size:
raise RuntimeError("Data size doesn't match number of sites")
@property
def num_sites(self) -> int:
"""Total number of lattice sites"""
return self._sites.size
@property
def data(self) -> np.ndarray:
"""1D array of values for each site, i.e. maps directly to x, y, z site coordinates"""
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def positions(self) -> Positions:
"""Lattice site positions. Named tuple with x, y, z fields, each a 1D array."""
return self._sites.positions
@property
def xyz(self) -> np.ndarray:
"""Return a new array with shape=(N, 3). Convenient, but slow for big systems."""
return np.array(self.positions).T
@property
def x(self) -> np.ndarray:
"""1D array of coordinates, short for :attr:`.positions.x <.SpatialMap.positions.x>`"""
return self._sites.x
@property
def y(self) -> np.ndarray:
"""1D array of coordinates, short for :attr:`.positions.y <.SpatialMap.positions.y>`"""
return self._sites.y
@property
def z(self) -> np.ndarray:
"""1D array of coordinates, short for :attr:`.positions.z <.SpatialMap.positions.z>`"""
return self._sites.z
@property
def sublattices(self) -> np.ndarray:
"""1D array of sublattices IDs"""
return self._sites.ids
@property
def sub(self) -> np.ndarray:
"""1D array of sublattices IDs, short for :attr:`.sublattices <.SpatialMap.sublattices>`"""
return self._sites.ids
def with_data(self, data) -> "SpatialMap":
"""Return a copy of this object with different data mapped to the sites"""
result = copy(self)
result._data = data
return result
def save_txt(self, filename):
with open(filename + '.dat', 'w') as file:
file.write('# {:12}{:13}{:13}\n'.format('x(nm)', 'y(nm)', 'data'))
for x, y, d in zip(self.x, self.y, self.data):
file.write(("{:13.5e}" * 3 + '\n').format(x, y, d))
def __getitem__(self, idx):
"""Same rules as numpy indexing"""
if hasattr(idx, "contains"):
idx = idx.contains(*self.positions) # got a Shape object -> evaluate it
return self.__class__(self._data[idx], self._sites[idx])
def cropped(self, **limits):
"""Return a copy which retains only the sites within the given limits
Parameters
----------
**limits
Attribute names and corresponding limits. See example.
Examples
--------
Leave only the data where -10 <= x < 10 and 2 <= y < 4::
new = original.cropped(x=[-10, 10], y=[2, 4])
"""
return self[_make_crop_indices(self, limits)]
def clipped(self, v_min, v_max):
"""Clip (limit) the values in the `data` array, see :func:`~numpy.clip`"""
return self.with_data(np.clip(self.data, v_min, v_max))
def convolve(self, sigma=0.25):
# TODO: slow and only works in the xy-plane
x, y, _ = self.positions
r = np.sqrt(x**2 + y**2)
data = np.empty_like(self.data)
for i in range(len(data)):
idx = np.abs(r - r[i]) < sigma
data[i] = np.sum(self.data[idx] * np.exp(-0.5 * ((r[i] - r[idx]) / sigma)**2))
data[i] /= np.sum(np.exp(-0.5 * ((r[i] - r[idx]) / sigma)**2))
self._data = data
@staticmethod
def _decorate_plot():
ax = plt.gca()
ax.set_aspect('equal')
ax.set_xlabel('x (nm)')
ax.set_ylabel('y (nm)')
pltutils.despine(trim=True)
def plot_pcolor(self, **kwargs):
"""Color plot of the xy plane
Parameters
----------
**kwargs
Forwarded to :func:`~matplotlib.pyplot.tripcolor`.
"""
x, y, _ = self.positions
kwargs = with_defaults(kwargs, shading='gouraud', rasterized=True)
pcolor = plt.tripcolor(x, y, self.data, **kwargs)
self._decorate_plot()
return pcolor
def plot_contourf(self, num_levels=50, **kwargs):
"""Filled contour plot of the xy plane
Parameters
----------
num_levels : int
Number of contour levels.
**kwargs
Forwarded to :func:`~matplotlib.pyplot.tricontourf`.
"""
levels = np.linspace(self.data.min(), self.data.max(), num=num_levels)
x, y, _ = self.positions
kwargs = with_defaults(kwargs, levels=levels, rasterized=True)
contourf = plt.tricontourf(x, y, self.data, **kwargs)
self._decorate_plot()
return contourf
def plot_contour(self, **kwargs):
"""Contour plot of the xy plane
Parameters
----------
**kwargs
Forwarded to :func:`~matplotlib.pyplot.tricontour`.
"""
x, y, _ = self.positions
contour = plt.tricontour(x, y, self.data, **kwargs)
self._decorate_plot()
return contour
@pickleable
class StructureMap(SpatialMap):
"""A subclass of :class:`.SpatialMap` that also includes hoppings between sites"""
def __init__(self, data, sites, hoppings, boundaries=()):
super().__init__(data, sites)
self._hoppings = hoppings
self._boundaries = boundaries
@property
def spatial_map(self) -> SpatialMap:
"""Just the :class:`SpatialMap` subset without hoppings"""
return SpatialMap(self._data, self._sites)
@property
def hoppings(self) -> Hoppings:
"""Sparse matrix of hopping IDs"""
return self._hoppings
@property
def boundaries(self) -> list:
"""Boundary hoppings between different translation units (only for infinite systems)"""
return self._boundaries
def __getitem__(self, idx):
"""Same rules as numpy indexing"""
if hasattr(idx, "contains"):
idx = idx.contains(*self.positions) # got a Shape object -> evaluate it
return self.__class__(self.data[idx], self._sites[idx], self._hoppings[idx],
[b[idx] for b in self._boundaries])
def with_data(self, data) -> "StructureMap":
"""Return a copy of this object with different data mapped to the sites"""
result = copy(self)
result._data = data
return result
def plot(self, cmap='YlGnBu', site_radius=(0.03, 0.05), num_periods=1, **kwargs):
"""Plot the spatial structure with a colormap of :attr:`data` at the lattice sites
Both the site size and color are used to display the data.
Parameters
----------
cmap : str
Matplotlib colormap to be used for the data.
site_radius : Tuple[float, float]
Min and max radius of lattice sites. This range will be used to visually
represent the magnitude of the data.
num_periods : int
Number of times to repeat periodic boundaries.
**kwargs
Additional plot arguments as specified in :func:`.structure_plot_properties`.
"""
from .system import (plot_sites, plot_hoppings, plot_periodic_boundaries,
structure_plot_properties)
def to_radii(data):
if not isinstance(site_radius, (tuple, list)):
return site_radius
positive_data = data - data.min()
maximum = positive_data.max()
if not np.allclose(maximum, 0):
delta = site_radius[1] - site_radius[0]
return site_radius[0] + delta * positive_data / maximum
else:
return site_radius[1]
props = structure_plot_properties(**kwargs)
props['site'] = with_defaults(props['site'], radius=to_radii(self.data), cmap=cmap)
collection = plot_sites(self.positions, self.data, **props['site'])
hop = self.hoppings.tocoo()
props['hopping'] = with_defaults(props['hopping'], color='#bbbbbb')
plot_hoppings(self.positions, hop, **props['hopping'])
props['site']['alpha'] = props['hopping']['alpha'] = 0.5
plot_periodic_boundaries(self.positions, hop, self.boundaries, self.data,
num_periods, **props)
plt.gca().set_aspect('equal', 'datalim')
plt.xlabel("{} (nm)".format(props["axes"][0]))
plt.ylabel("{} (nm)".format(props["axes"][1]))
pltutils.despine(trim=True)
pltutils.add_margin()
if collection:
plt.sci(collection)
return collection
@pickleable
class Structure:
"""Holds and plots the structure of a tight-binding system
Similar to :class:`StructureMap`, but only holds the structure without
mapping to any actual data.
"""
def __init__(self, sites, hoppings, boundaries=()):
self._sites = sites
self._hoppings = hoppings
self._boundaries = boundaries
@property
def num_sites(self) -> int:
"""Total number of lattice sites"""
return self._sites.size
@property
def positions(self) -> Positions:
"""Lattice site positions. Named tuple with x, y, z fields, each a 1D array."""
return self._sites.positions
@property
def xyz(self) -> np.ndarray:
"""Return a new array with shape=(N, 3). Convenient, but slow for big systems."""
return np.array(self.positions).T
@property
def x(self) -> np.ndarray:
"""1D array of coordinates, short for :attr:`.positions.x <.SpatialMap.positions.x>`"""
return self._sites.x
@property
def y(self) -> np.ndarray:
"""1D array of coordinates, short for :attr:`.positions.y <.SpatialMap.positions.y>`"""
return self._sites.y
@property
def z(self) -> np.ndarray:
"""1D array of coordinates, short for :attr:`.positions.z <.SpatialMap.positions.z>`"""
return self._sites.z
@property
def sublattices(self) -> np.ndarray:
"""1D array of sublattices IDs"""
return self._sites.ids
@property
def sub(self) -> np.ndarray:
"""1D array of sublattices IDs, short for :attr:`.sublattices <.SpatialMap.sublattices>`"""
return self._sites.ids
@property
def hoppings(self) -> Hoppings:
"""Sparse matrix of hopping IDs"""
return self._hoppings
@property
def boundaries(self) -> list:
"""Boundary hoppings between different translation units (only for infinite systems)"""
return self._boundaries
def __getitem__(self, idx):
"""Same rules as numpy indexing"""
if hasattr(idx, "contains"):
idx = idx.contains(*self.positions) # got a Shape object -> evaluate it
return Structure(self._sites[idx], self._hoppings[idx],
[b[idx] for b in self._boundaries])
def find_nearest(self, position, sublattice=""):
"""Find the index of the atom closest to the given position
Parameters
----------
position : array_like
Where to look.
sublattice : Optional[str]
Look for a specific sublattice site. By default any will do.
Returns
-------
int
"""
return self._sites.find_nearest(position, sublattice)
def cropped(self, **limits):
"""Return a copy which retains only the sites within the given limits
Parameters
----------
**limits
Attribute names and corresponding limits. See example.
Examples
--------
Leave only the data where -10 <= x < 10 and 2 <= y < 4::
new = original.cropped(x=[-10, 10], y=[2, 4])
"""
return self[_make_crop_indices(self, limits)]
def with_data(self, data) -> StructureMap:
"""Map some data to this structure"""
return StructureMap(data, self._sites, self._hoppings, self._boundaries)
def plot(self, num_periods=1, **kwargs):
"""Plot the structure: sites, hoppings and periodic boundaries (if any)
Parameters
----------
num_periods : int
Number of times to repeat the periodic boundaries.
**kwargs
Additional plot arguments as specified in :func:`.structure_plot_properties`.
"""
from .system import (plot_sites, plot_hoppings, plot_periodic_boundaries,
structure_plot_properties, decorate_structure_plot)
props = structure_plot_properties(**kwargs)
plot_hoppings(self.positions, self._hoppings, **props['hopping'])
plot_sites(self.positions, self.sublattices, **props['site'])
plot_periodic_boundaries(self.positions, self._hoppings, self._boundaries,
self.sublattices, num_periods, **props)
decorate_structure_plot(**props)
@pickleable
class Eigenvalues:
"""Hamiltonian eigenvalues with optional probability map
Attributes
----------
values : np.ndarray
probability : np.ndarray
"""
def __init__(self, eigenvalues, probability=None):
self.values = np.atleast_1d(eigenvalues)
self.probability = np.atleast_1d(probability)
@property
def indices(self):
return np.arange(0, self.values.size)
def _decorate_plot(self, mark_degenerate, number_states, margin=0.1):
"""Common elements for the two eigenvalue plots"""
if mark_degenerate:
# draw lines between degenerate states
from .solver import Solver
from matplotlib.collections import LineCollection
pairs = ((s[0], s[-1]) for s in Solver.find_degenerate_states(self.values))
lines = [[(i, self.values[i]) for i in pair] for pair in pairs]
plt.gca().add_collection(LineCollection(lines, color='black', alpha=0.5))
if number_states:
# draw a number next to each state
for index, energy in enumerate(self.values):
pltutils.annotate_box(index, (index, energy), fontsize='x-small',
xytext=(0, -10), textcoords='offset points')
margin = 0.25
plt.xlabel('state')
plt.ylabel('E (eV)')
plt.xlim(-1, len(self.values))
pltutils.despine(trim=True)
pltutils.add_margin(margin, axis="y")
def plot(self, mark_degenerate=True, show_indices=False, **kwargs):
"""Standard eigenvalues scatter plot
Parameters
----------
mark_degenerate : bool
Plot a line which connects degenerate states.
show_indices : bool
Plot index number next to all states.
**kwargs
Forwarded to plt.scatter().
"""
plt.scatter(self.indices, self.values, **with_defaults(kwargs, c='#377ec8', s=15, lw=0.1))
self._decorate_plot(mark_degenerate, show_indices)
def plot_heatmap(self, size=(7, 77), mark_degenerate=True, show_indices=False, **kwargs):
"""Eigenvalues scatter plot with a heatmap indicating probability density
Parameters
----------
size : Tuple[int, int]
Min and max scatter dot size.
mark_degenerate : bool
Plot a line which connects degenerate states.
show_indices : bool
Plot index number next to all states.
**kwargs
Forwarded to plt.scatter().
"""
if not np.any(self.probability):
return self.plot(mark_degenerate, show_indices, **kwargs)
# higher probability states should be drawn above lower ones
idx = np.argsort(self.probability)
indices, energy, probability = (v[idx] for v in
(self.indices, self.values, self.probability))
scatter_point_sizes = size[0] + size[1] * probability / probability.max()
plt.scatter(indices, energy, **with_defaults(kwargs, cmap='YlOrRd', lw=0.2, alpha=0.85,
c=probability, s=scatter_point_sizes,
edgecolor="k"))
self._decorate_plot(mark_degenerate, show_indices)
return self.probability.max()
@pickleable
class Bands:
"""Band structure along a path in k-space
Attributes
----------
k_path : :class:`Path`
The path in reciprocal space along which the bands were calculated.
E.g. constructed using :func:`make_path`.
energy : array_like
Energy values for the bands along the path in k-space.
"""
def __init__(self, k_path, energy):
self.k_path = np.atleast_1d(k_path).view(Path)
self.energy = np.atleast_1d(energy)
@staticmethod
def _point_names(k_points):
names = []
for k_point in k_points:
k_point = np.atleast_1d(k_point)
values = map(x_pi, k_point)
fmt = "[{}]" if len(k_point) > 1 else "{}"
names.append(fmt.format(', '.join(values)))
return names
@property
def num_bands(self):
return self.energy.shape[1]
def plot(self, point_labels=None, **kwargs):
"""Line plot of the band structure
Parameters
----------
point_labels : List[str]
Labels for the `k_points`.
**kwargs
Forwarded to `plt.plot()`.
"""
default_color = pltutils.get_palette('Set1')[1]
default_linewidth = np.clip(5 / self.num_bands, 1.1, 1.6)
kwargs = with_defaults(kwargs, color=default_color, lw=default_linewidth)
k_space = self.k_path.as_1d()
plt.plot(k_space, self.energy, **kwargs)
plt.xlim(k_space.min(), k_space.max())
plt.xlabel('k-space')
plt.ylabel('E (eV)')
pltutils.add_margin()
pltutils.despine(trim=True)
point_labels = point_labels or self._point_names(self.k_path.points)
plt.xticks(k_space[self.k_path.point_indices], point_labels)
# Draw vertical lines at significant points. Because of the `transLimits.transform`,
# this must be the done last, after all other plot elements are positioned.
for idx in self.k_path.point_indices:
ymax = plt.gca().transLimits.transform([0, max(self.energy[idx])])[1]
plt.axvline(k_space[idx], ymax=ymax, color="0.4", lw=0.8, ls=":", zorder=-1)
def plot_kpath(self, point_labels=None, **kwargs):
"""Quiver plot of the k-path along which the bands were computed
Combine with :meth:`.Lattice.plot_brillouin_zone` to see the path in context.
Parameters
----------
point_labels : List[str]
Labels for the k-points.
**kwargs
Forwarded to :func:`~matplotlib.pyplot.quiver`.
"""
self.k_path.plot(point_labels, **kwargs)
@pickleable
class Sweep:
"""2D parameter sweep with `x` and `y` 1D array parameters and `data` 2D array result
Attributes
----------
x : array_like
1D array with x-axis values -- usually the primary parameter being swept.
y : array_like
1D array with y-axis values -- usually the secondary parameter.
data : array_like
2D array with `shape == (x.size, y.size)` containing the main result data.
labels : dict
Plot labels: 'title', 'x', 'y' and 'data'.
tags : dict
Any additional user defined variables.
"""
def __init__(self, x, y, data, labels=None, tags=None):
self.x = np.atleast_1d(x)
self.y = np.atleast_1d(y)
self.data = np.atleast_2d(data)
self.labels = with_defaults(labels, title="", x="x", y="y", data="data")
self.tags = tags
def __getitem__(self, item):
"""Same rules as numpy indexing"""
if isinstance(item, tuple):
idx_x, idx_y = item
else:
idx_x = item
idx_y = slice(None)
return self._with_data(self.x[idx_x], self.y[idx_y], self.data[idx_x, idx_y])
def _with_data(self, x, y, data):
return self.__class__(x, y, data, self.labels, self.tags)
@property
def _plain_labels(self):
"""Labels with latex symbols stripped out"""
trans = str.maketrans('', '', '$\\')
return {k: v.translate(trans) for k, v in self.labels.items()}
def _xy_grids(self):
"""Expand x and y into 2D arrays matching data."""
xgrid = np.column_stack([self.x] * self.y.size)
ygrid = np.row_stack([self.y] * self.x.size)
return xgrid, ygrid
def save_txt(self, filename):
"""Save text file with 3 columns: x, y, data.
Parameters
----------
filename : str
"""
with open(filename, 'w') as file:
file.write("#{x:>11} {y:>12} {data:>12}\n".format(**self._plain_labels))
xgrid, ygrid = self._xy_grids()
for row in zip(xgrid.flat, ygrid.flat, self.data.flat):
values = ("{:12.5e}".format(v) for v in row)
file.write(" ".join(values) + "\n")
def cropped(self, x=None, y=None):
"""Return a copy with data cropped to the limits in the x and/or y axes
A call with x=[-1, 2] will leave data only where -1 <= x <= 2.
Parameters
----------
x, y : Tuple[float, float]
Min and max data limit.
Returns
-------
:class:`~pybinding.Sweep`
"""
idx_x = np.logical_and(x[0] <= self.x, self.x <= x[1]) if x else np.arange(self.x.size)
idx_y = np.logical_and(y[0] <= self.y, self.y <= y[1]) if y else np.arange(self.y.size)
return self._with_data(self.x[idx_x], self.y[idx_y], self.data[np.ix_(idx_x, idx_y)])
def mirrored(self, axis='x'):
"""Return a copy with data mirrored in around specified axis
Only makes sense if the axis starts at 0.
Parameters
----------
axis : 'x' or 'y'
Returns
-------
:class:`~pybinding.Sweep`
"""
if axis == 'x':
x = np.concatenate((-self.x[::-1], self.x[1:]))
data = np.vstack((self.data[::-1], self.data[1:]))
return self._with_data(x, self.y, data)
elif axis == 'y':
y = np.concatenate((-self.y[::-1], self.y[1:]))
data = np.hstack((self.data[:, ::-1], self.data[:, 1:]))
return self._with_data(self.x, y, data)
else:
RuntimeError("Invalid axis")
def interpolated(self, mul=None, size=None, kind='linear'):
"""Return a copy with interpolate data using :class:`scipy.interpolate.interp1d`
Call with `mul=2` to double the size of the x-axis and interpolate data to match.
To interpolate in both axes pass a tuple, e.g. `mul=(4, 2)`.
Parameters
----------
mul : Union[int, Tuple[int, int]]
Number of times the size of the axes should be multiplied.
size : Union[int, Tuple[int, int]]
New size of the axes. Zero will leave size unchanged.
kind
Forwarded to :class:`scipy.interpolate.interp1d`.
Returns
-------
:class:`~pybinding.Sweep`
"""
if not mul and not size:
return self
from scipy.interpolate import interp1d
x, y, data = self.x, self.y, self.data
if mul:
try:
mul_x, mul_y = mul
except TypeError:
mul_x, mul_y = mul, 1
size_x = x.size * mul_x
size_y = y.size * mul_y
else:
try:
size_x, size_y = size
except TypeError:
size_x, size_y = size, 0
if size_x > 0 and size_x != x.size:
interpolate = interp1d(x, data, axis=0, kind=kind)
x = np.linspace(x.min(), x.max(), size_x, dtype=x.dtype)
data = interpolate(x)
if size_y > 0 and size_y != y.size:
interpolate = interp1d(y, data, kind=kind)
y = np.linspace(y.min(), y.max(), size_y, dtype=y.dtype)
data = interpolate(y)
return self._with_data(x, y, data)
def _convolved(self, sigma, axis='x'):
"""Return a copy where the data is convolved with a Gaussian function
Parameters
----------
sigma : float
Gaussian broadening.
axis : 'x' or 'y'
Returns
-------
:class:`~pybinding.Sweep`
"""
def convolve(v, data0):
v0 = v[v.size // 2]
gaussian = np.exp(-0.5 * ((v - v0) / sigma)**2)
gaussian /= gaussian.sum()
extend = 10 # TODO: rethink this
data1 = np.concatenate((data0[extend::-1], data0, data0[:-extend:-1]))
data1 = np.convolve(data1, gaussian, 'same')
return data1[extend:-extend]
x, y, data = self.x, self.y, self.data.copy()
if 'x' in axis:
for i in range(y.size):
data[:, i] = convolve(x, data[:, i])
if 'y' in axis:
for i in range(x.size):
data[i, :] = convolve(y, data[i, :])
return self._with_data(x, y, data)
def plot(self, **kwargs):
"""Plot a 2D colormap of :attr:`Sweep.data`
Parameters
----------
**kwargs
Forwarded to :func:`matplotlib.pyplot.pcolormesh`.
"""
mesh = plt.pcolormesh(self.x, self.y, self.data.T,
**with_defaults(kwargs, cmap='RdYlBu_r', rasterized=True))
plt.xlim(self.x.min(), self.x.max())
plt.ylim(self.y.min(), self.y.max())
plt.title(self.labels['title'])
plt.xlabel(self.labels['x'])
plt.ylabel(self.labels['y'])
return mesh
def colorbar(self, **kwargs):
"""Draw a colorbar with the label of :attr:`Sweep.data`"""
return pltutils.colorbar(**with_defaults(kwargs, label=self.labels['data']))
def _plot_slice(self, axis, x, y, value, **kwargs):
plt.plot(x, y, **kwargs)
split = self.labels[axis].split(' ', 1)
label = split[0]
unit = '' if len(split) == 1 else split[1].strip('()')
plt.title('{}, {} = {:.2g} {}'.format(self.labels['title'], label, value, unit))
plt.xlim(x.min(), x.max())
plt.xlabel(self.labels['x' if axis == 'y' else 'y'])
plt.ylabel(self.labels['data'])
pltutils.despine()
def _slice_x(self, x):
"""Return a slice of data nearest to x and the found values of x.
Parameters
----------
x : float
"""
idx = np.abs(self.x - x).argmin()
return self.data[idx, :], self.x[idx]
def _slice_y(self, y):
"""Return a slice of data nearest to y and the found values of y.
Parameters
----------
y : float
"""
idx = np.abs(self.y - y).argmin()
return self.data[:, idx], self.y[idx]
def plot_slice_x(self, x, **kwargs):
z, value = self._slice_x(x)
self._plot_slice('x', self.y, z, value, **kwargs)
def plot_slice_y(self, y, **kwargs):
z, value = self._slice_y(y)
self._plot_slice('y', self.x, z, value, **kwargs)
@pickleable
class NDSweep:
"""ND parameter sweep
Attributes
----------
variables : tuple of array_like
The parameters being swept.
data : np.ndarray
Main result array with `shape == [len(v) for v in variables]`.
labels : dict
Plot labels: 'title', 'x', 'y' and 'data'.
tags : dict
Any additional user defined variables.
"""
def __init__(self, variables, data, labels=None, tags=None):
self.variables = variables
self.data = np.reshape(data, [len(v) for v in variables])
self.labels = with_defaults(labels, title="", axes=[], data="data")
# alias the first 3 axes to x, y, z for compatibility with Sweep labels
for axis, label in zip('xyz', self.labels['axes']):
self.labels[axis] = label
self.tags = tags
|
|
"""
Proximal operators / mappings
"""
import numpy as np
from abc import ABCMeta, abstractmethod
from functools import wraps
from scipy.optimize import minimize as scipy_minimize
from scipy.sparse import spdiags
from scipy.sparse.linalg import spsolve
try:
from skimage.restoration import denoise_tv_bregman
except ImportError:
print('Error: scikit-image not found. TVD will not work.')
__all__ = ['nucnorm', 'sparse', 'linsys', 'squared_error', 'identity',
'lbfgs', 'tvd', 'smooth', 'linear', 'fantope']
class ProximalOperatorBaseClass(metaclass=ABCMeta):
@abstractmethod
def __call__(self, x, rho):
raise NotImplementedError
def proxify(func):
class ProxOp(ProximalOperatorBaseClass):
"""
Proximal operator base class
"""
@wraps(func)
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self, x, rho=1.0):
"""
Applies the proximal operator
Parameters
----------
x : array_like
The
rho : float
(default: 1.0)
Returns
-------
z : array_like
"""
return func(x, rho, *self.args, **self.kwargs)
return ProxOp
@proxify
def nucnorm(x, rho, penalty, newshape=None):
"""
Nuclear norm
Parameters
----------
penalty : float
nuclear norm penalty hyperparameter
newshape : tuple, optional
Desired shape of the parameters to apply the nuclear norm to. The given
parameters are reshaped to an array with this shape, or not reshaped if
the value of newshape is None. (Default: None)
"""
orig_shape = x.shape
if newshape is not None:
x = x.reshape(newshape)
u, s, v = np.linalg.svd(x, full_matrices=False)
sthr = np.maximum(s - (penalty / rho), 0)
return np.linalg.multi_dot((u, np.diag(sthr), v)).reshape(orig_shape)
@proxify
def sparse(x, rho, penalty):
"""
Proximal operator for the l1-norm: soft thresholding
Parameters
----------
penalty : float
Strength or weight on the l1-norm
"""
lmbda = penalty / rho
return (x - lmbda) * (x >= lmbda) + (x + lmbda) * (x <= -lmbda)
class linsys(ProximalOperatorBaseClass):
def __init__(self, A, b):
"""
Proximal operator for solving a linear least squares system, Ax = b
Parameters
----------
A : array_like
Sensing matrix (Ax = b)
b : array_like
Responses (Ax = b)
"""
self.P = A.T.dot(A)
self.q = A.T.dot(b)
self.n = self.q.size
def __call__(self, x, rho):
return np.linalg.solve(rho * np.eye(self.n) + self.P, rho * x + self.q)
@proxify
def squared_error(x, rho, x_obs):
"""
Proximal operator for squared error (l2 or Fro. norm)
squared_error(x_obs)
Parameters
----------
x_obs : array_like
Observed array or matrix that you want to stay close to
"""
return (x + x_obs / rho) / (1. + 1. / rho)
@proxify
def lbfgs(x, rho, f_df, maxiter=20):
"""
Minimize the proximal operator of a given objective using L-BFGS
Parameters
----------
f_df : function
Returns the objective and gradient of the function to minimize
maxiter : int
Maximum number of L-BFGS iterations
"""
def f_df_augmented(theta):
f, df = f_df(theta)
obj = f + (rho / 2.) * np.linalg.norm(theta - x) ** 2
grad = df + rho * (theta - x)
return obj, grad
res = scipy_minimize(f_df_augmented, x, jac=True, method='L-BFGS-B',
options={'maxiter': maxiter, 'disp': False})
return res.x
@proxify
def tvd(x, rho, penalty):
"""
Total variation denoising proximal operator
Parameters
----------
penalty : float
"""
return denoise_tv_bregman(x, rho / penalty)
@proxify
def nonneg(x, rho):
"""Projection onto the non-negative orthant"""
return np.maximum(x, 0)
@proxify
def smooth(x, rho, penalty, axis=0, newshape=None):
"""
Applies a smoothing operator along one dimension
currently only accepts a matrix as input
Parameters
----------
penalty : float
axis : int, optional
Axis along which to apply the smoothing (Default: 0)
newshape : tuple, optional
Desired shape of the parameters to apply the nuclear norm to. The given
parameters are reshaped to an array with this shape, or not reshaped if
the value of newshape is None. (Default: None)
"""
orig_shape = x.shape
if newshape is not None:
x = x.reshape(newshape)
# Apply Laplacian smoothing (l2 norm on the parameters multiplied by
# the laplacian)
n = x.shape[axis]
lap_op = spdiags([(2 + rho / penalty) * np.ones(n),
-1 * np.ones(n), -1 * np.ones(n)],
[0, -1, 1], n, n, format='csc')
A = penalty * lap_op
b = rho * np.rollaxis(x, axis, 0)
return np.rollaxis(spsolve(A, b), axis, 0).reshape(orig_shape)
@proxify
def sdcone(x, rho):
"""Projection onto the semidefinite cone"""
U, V = np.linalg.eigh(x)
return V.dot(np.diag(np.maximum(U, 0)).dot(V.T))
@proxify
def linear(x, rho, weights):
"""Proximal operator for a linear function w^T x"""
return x - weights / rho
@proxify
def simplex(x, rho):
"""
Projection onto the probability simplex
http://arxiv.org/pdf/1309.1541v1.pdf
"""
# sort the elements in descending order
u = np.flipud(np.sort(x.ravel()))
lambdas = (1 - np.cumsum(u)) / (1. + np.arange(u.size))
ix = np.where(u + lambdas > 0)[0].max()
return np.maximum(x + lambdas[ix], 0)
@proxify
def columns(x, rho, proxop):
"""Applies a proximal operator to the columns of a matrix"""
xnext = np.zeros_like(x)
for ix in range(x.shape[1]):
xnext[:, ix] = proxop(x[:, ix], rho)
return xnext
@proxify
def identity(x, rho=None):
"""Identity operator"""
return x
@proxify
def fantope(x, rho, dim, tol=1e-4):
"""
Projection onto the fantope [1]_
.. [1] Vu, Vincent Q., et al. "Fantope projection and selection: A
near-optimal convex relaxation of sparse PCA." Advances in
neural information processing systems. 2013.
"""
U, V = np.linalg.eigh(x)
minval, maxval = np.maximum(U.min(), 0), np.maximum(U.max(), 20 * dim)
while True:
theta = 0.5 * (maxval + minval)
thr_eigvals = np.minimum(np.maximum((U - theta), 0), 1)
constraint = np.sum(thr_eigvals)
if np.abs(constraint - dim) <= tol:
break
elif constraint < dim:
maxval = theta
elif constraint > dim:
minval = theta
else:
break
return np.linalg.multi_dot((V, np.diag(thr_eigvals), V.T))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.contrib.rnn.python.ops import rnn_cell as contrib_rnn_cell
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
# pylint: enable=protected-access
Linear = core_rnn_cell._Linear # pylint: disable=invalid-name
class RNNCellTest(test.TestCase):
def testLinear(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(1.0)):
x = array_ops.zeros([1, 2])
l = Linear([x], 2, False)([x])
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([l], {x.name: np.array([[1., 2.]])})
self.assertAllClose(res[0], [[3.0, 3.0]])
# Checks prevent you from accidentally creating a shared function.
with self.assertRaises(ValueError):
l1 = Linear([x], 2, False)([x])
# But you can create a new one in a new scope and share the variables.
with variable_scope.variable_scope("l1") as new_scope:
l1 = Linear([x], 2, False)([x])
with variable_scope.variable_scope(new_scope, reuse=True):
Linear([l1], 2, False)([l1])
self.assertEqual(len(variables_lib.trainable_variables()), 2)
def testBasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellNotTrainable(self):
with self.test_session() as sess:
def not_trainable_getter(getter, *args, **kwargs):
kwargs["trainable"] = False
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"root",
initializer=init_ops.constant_initializer(0.5),
custom_getter=not_trainable_getter):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertFalse(cell.trainable_variables)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.non_trainable_variables])
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
def testGRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.175991, 0.175991]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test GRUCell with input_size != num_units.
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.156736, 0.156736]])
def testSRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.SRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.509682, 0.509682]])
def testSRUCellWithDiffSize(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.SRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.55255556, 0.55255556]])
def testBasicLSTMCell(self):
for dtype in [dtypes.float16, dtypes.float32]:
np_dtype = dtype.as_numpy_dtype
with self.test_session(graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2], dtype=dtype)
m = array_ops.zeros([1, 8], dtype=dtype)
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=False)
self.assertEqual(cell.dtype, None)
self.assertEqual("cell-0", cell._checkpoint_dependencies[0].name)
self.assertEqual("cell-1", cell._checkpoint_dependencies[1].name)
g, out_m = cell(x, m)
# Layer infers the input type.
self.assertEqual(cell.dtype, dtype.name)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME
]
self.assertEqual(expected_variable_names,
[v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])
})
self.assertEqual(len(res), 2)
variables = variables_lib.global_variables()
self.assertEqual(expected_variable_names, [v.name for v in variables])
# The numbers in results were not calculated, this is just a
# smoke test.
self.assertAllClose(res[0], np.array(
[[0.240, 0.240]], dtype=np_dtype), 1e-2)
expected_mem = np.array(
[[0.689, 0.689, 0.448, 0.448, 0.398, 0.398, 0.240, 0.240]],
dtype=np_dtype)
self.assertAllClose(res[1], expected_mem, 1e-2)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test BasicLSTMCell with input_size != num_units.
x = array_ops.zeros([1, 3], dtype=dtype)
m = array_ops.zeros([1, 4], dtype=dtype)
g, out_m = rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.array([[1., 1., 1.]], dtype=np_dtype),
m.name: 0.1 * np.ones([1, 4], dtype=np_dtype)
})
self.assertEqual(len(res), 2)
def testBasicLSTMCellDimension0Error(self):
"""Tests that dimension 0 in both(x and m) shape must be equal."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size - 1, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run(
[g, out_m], {
x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size - 1, state_size])
})
def testBasicLSTMCellStateSizeError(self):
"""Tests that state_size must be num_units * 2."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 3 # state_size must be num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run(
[g, out_m], {
x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size, state_size])
})
def testBasicLSTMCellStateTupleType(self):
with self.test_session():
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = (array_ops.zeros([1, 2]),) * 2
m1 = (array_ops.zeros([1, 2]),) * 2
cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicLSTMCell(2) for _ in range(2)],
state_is_tuple=True)
self.assertTrue(isinstance(cell.state_size, tuple))
self.assertTrue(
isinstance(cell.state_size[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(
isinstance(cell.state_size[1], rnn_cell_impl.LSTMStateTuple))
# Pass in regular tuples
_, (out_m0, out_m1) = cell(x, (m0, m1))
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
# Pass in LSTMStateTuples
variable_scope.get_variable_scope().reuse_variables()
zero_state = cell.zero_state(1, dtypes.float32)
self.assertTrue(isinstance(zero_state, tuple))
self.assertTrue(isinstance(zero_state[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(zero_state[1], rnn_cell_impl.LSTMStateTuple))
_, (out_m0, out_m1) = cell(x, zero_state)
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 4])
m1 = array_ops.zeros([1, 4])
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=True)
g, (out_m0, out_m1) = cell(x, (m0, m1))
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 4]),
m1.name: 0.1 * np.ones([1, 4])
})
self.assertEqual(len(res), 3)
# The numbers in results were not calculated, this is just a smoke test.
# Note, however, these values should match the original
# version having state_is_tuple=False.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem0 = np.array(
[[0.68967271, 0.68967271, 0.44848421, 0.44848421]])
expected_mem1 = np.array(
[[0.39897051, 0.39897051, 0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem0)
self.assertAllClose(res[2], expected_mem1)
def testLSTMCell(self):
with self.test_session() as sess:
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
output, state = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[output, state], {
x.name: np.array([[1., 1.], [2., 2.], [3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1].shape, (batch_size, state_size))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testLSTMCellVariables(self):
with self.test_session():
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
cell(x, m) # Execute to create variables
variables = variables_lib.global_variables()
self.assertEquals(variables[0].op.name, "root/lstm_cell/kernel")
self.assertEquals(variables[1].op.name, "root/lstm_cell/bias")
self.assertEquals(variables[2].op.name,
"root/lstm_cell/projection/kernel")
def testLSTMCellLayerNorm(self):
with self.test_session() as sess:
num_units = 2
num_proj = 3
batch_size = 1
input_size = 4
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
c = array_ops.zeros([batch_size, num_units])
h = array_ops.zeros([batch_size, num_proj])
state = rnn_cell_impl.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormLSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
layer_norm=True,
norm_gain=1.0,
norm_shift=0.0)
g, out_m = cell(x, state)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.ones((batch_size, input_size)),
c.name: 0.1 * np.ones((batch_size, num_units)),
h.name: 0.1 * np.ones((batch_size, num_proj))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1][0].shape, (batch_size, num_units))
self.assertEqual(res[1][1].shape, (batch_size, num_proj))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) < 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) < 1e-6)
def testOutputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.OutputProjectionWrapper(rnn_cell_impl.GRUCell(3), 2)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.231907, 0.231907]])
def testInputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.InputProjectionWrapper(
rnn_cell_impl.GRUCell(3), num_proj=3)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
def testResidualWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
base_cell = rnn_cell_impl.GRUCell(3)
g, m_new = base_cell(x, m)
variable_scope.get_variable_scope().reuse_variables()
g_res, m_new_res = rnn_cell_impl.ResidualWrapper(base_cell)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# Residual connections
self.assertAllClose(res[1], res[0] + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testResidualWrapperWithSlice(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 5])
m = array_ops.zeros([1, 3])
base_cell = rnn_cell_impl.GRUCell(3)
g, m_new = base_cell(x, m)
variable_scope.get_variable_scope().reuse_variables()
def residual_with_slice_fn(inp, out):
inp_sliced = array_ops.slice(inp, [0, 0], [-1, 3])
return inp_sliced + out
g_res, m_new_res = rnn_cell_impl.ResidualWrapper(
base_cell, residual_with_slice_fn)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res_g, res_g_res, res_m_new, res_m_new_res = sess.run(
[g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# Residual connections
self.assertAllClose(res_g_res, res_g + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res_m_new, res_m_new_res)
def testDeviceWrapper(self):
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = rnn_cell_impl.DeviceWrapper(rnn_cell_impl.GRUCell(3), "/cpu:14159")
outputs, _ = cell(x, m)
self.assertTrue("cpu:14159" in outputs.device.lower())
def _retrieve_cpu_gpu_stats(self, run_metadata):
cpu_stats = None
gpu_stats = None
step_stats = run_metadata.step_stats
for ds in step_stats.dev_stats:
if "cpu:0" in ds.device[-5:].lower():
cpu_stats = ds.node_stats
if "gpu:0" == ds.device[-5:].lower():
gpu_stats = ds.node_stats
return cpu_stats, gpu_stats
def testDeviceWrapperDynamicExecutionNodesAreAllProperlyLocated(self):
if not test.is_gpu_available():
# Can't perform this test w/o a GPU
return
gpu_dev = test.gpu_device_name()
with self.test_session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1, 3])
cell = rnn_cell_impl.DeviceWrapper(rnn_cell_impl.GRUCell(3), gpu_dev)
with ops.device("/cpu:0"):
outputs, _ = rnn.dynamic_rnn(
cell=cell, inputs=x, dtype=dtypes.float32)
run_metadata = config_pb2.RunMetadata()
opts = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
sess.run([variables_lib.global_variables_initializer()])
_ = sess.run(outputs, options=opts, run_metadata=run_metadata)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
self.assertFalse([s for s in cpu_stats if "gru_cell" in s.node_name])
self.assertTrue([s for s in gpu_stats if "gru_cell" in s.node_name])
def testEmbeddingWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1], dtype=dtypes.int32)
m = array_ops.zeros([1, 2])
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.GRUCell(2), embedding_classes=3, embedding_size=2)
self.assertEqual(embedding_cell.output_size, 2)
g, new_m = embedding_cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 2))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.17139, 0.17139]])
def testEmbeddingWrapperWithDynamicRnn(self):
with self.test_session() as sess:
with variable_scope.variable_scope("root"):
inputs = ops.convert_to_tensor([[[0], [0]]], dtype=dtypes.int64)
input_lengths = ops.convert_to_tensor([2], dtype=dtypes.int64)
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.BasicLSTMCell(1, state_is_tuple=True),
embedding_classes=1,
embedding_size=2)
outputs, _ = rnn.dynamic_rnn(
cell=embedding_cell,
inputs=inputs,
sequence_length=input_lengths,
dtype=dtypes.float32)
sess.run([variables_lib.global_variables_initializer()])
# This will fail if output's dtype is inferred from input's.
sess.run(outputs)
def testMultiRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 4])
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res, [[0.175991, 0.175991, 0.13248, 0.13248]])
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_bad)
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_good)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class DropoutWrapperTest(test.TestCase):
def _testDropoutWrapper(self,
batch_size=None,
time_steps=None,
parallel_iterations=None,
**kwargs):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
if batch_size is None and time_steps is None:
# 2 time steps, batch size 1, depth 3
batch_size = 1
time_steps = 2
x = constant_op.constant(
[[[2., 2., 2.]], [[1., 1., 1.]]], dtype=dtypes.float32)
m = rnn_cell_impl.LSTMStateTuple(
*[constant_op.constant([[0.1, 0.1, 0.1]], dtype=dtypes.float32
)] * 2)
else:
x = constant_op.constant(
np.random.randn(time_steps, batch_size, 3).astype(np.float32))
m = rnn_cell_impl.LSTMStateTuple(*[
constant_op.
constant([[0.1, 0.1, 0.1]] * batch_size, dtype=dtypes.float32)
] * 2)
outputs, final_state = rnn.dynamic_rnn(
cell=rnn_cell_impl.DropoutWrapper(
rnn_cell_impl.LSTMCell(3), dtype=x.dtype, **kwargs),
time_major=True,
parallel_iterations=parallel_iterations,
inputs=x,
initial_state=m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([outputs, final_state])
self.assertEqual(res[0].shape, (time_steps, batch_size, 3))
self.assertEqual(res[1].c.shape, (batch_size, 3))
self.assertEqual(res[1].h.shape, (batch_size, 3))
return res
def testWrappedCellProperty(self):
cell = rnn_cell_impl.BasicRNNCell(10)
wrapper = rnn_cell_impl.DropoutWrapper(cell)
# Github issue 15810
self.assertEqual(wrapper.wrapped_cell, cell)
def testDropoutWrapperKeepAllConstantInput(self):
keep = array_ops.ones([])
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepAll(self):
keep = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperWithSeed(self):
keep_some = 0.5
random_seed.set_random_seed(2)
## Use parallel_iterations = 1 in both calls to
## _testDropoutWrapper to ensure the (per-time step) dropout is
## consistent across both calls. Otherwise the seed may not end
## up being munged consistently across both graphs.
res_standard_1 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1)
# Clear away the graph and the test session (which keeps variables around)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2)
res_standard_2 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1)
self.assertAllClose(res_standard_1[0], res_standard_2[0])
self.assertAllClose(res_standard_1[1].c, res_standard_2[1].c)
self.assertAllClose(res_standard_1[1].h, res_standard_2[1].h)
def testDropoutWrapperKeepNoOutput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_none,
state_keep_prob=keep_all)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(np.zeros(res[0].shape), res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepNoStateExceptLSTMCellMemory(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
# Even though we dropout state, by default DropoutWrapper never
# drops out the memory ("c") term of an LSTMStateTuple.
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_all,
state_keep_prob=keep_none)
true_c_state = np.array([[1.713925, 1.713925, 1.713925]], dtype=np.float32)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
self.assertAllClose(true_full_output[0], res[0][0])
# Second output is modified by zero input state
self.assertGreater(np.linalg.norm(true_full_output[1] - res[0][1]), 1e-4)
# h state has been set to zero
self.assertAllClose(np.zeros(res[1].h.shape), res[1].h)
# c state of an LSTMStateTuple is NEVER modified.
self.assertAllClose(true_c_state, res[1].c)
def testDropoutWrapperKeepNoInput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
# All outputs are different because inputs are zeroed out
res = self._testDropoutWrapper(
input_keep_prob=keep_none,
output_keep_prob=keep_all,
state_keep_prob=keep_all)
self.assertGreater(np.linalg.norm(res[0] - true_full_output), 1e-4)
self.assertGreater(np.linalg.norm(res[1].h - true_full_output[1]), 1e-4)
self.assertGreater(np.linalg.norm(res[1].c - true_full_final_c), 1e-4)
def testDropoutWrapperRecurrentOutput(self):
keep_some = 0.8
keep_all = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_some,
state_keep_prob=keep_all,
variational_recurrent=True,
input_size=3,
batch_size=5,
time_steps=7)
# Ensure the same dropout pattern for all time steps
output_mask = np.abs(res[0]) > 1e-6
for m in output_mask[1:]:
self.assertAllClose(output_mask[0], m)
def testDropoutWrapperRecurrentStateInputAndOutput(self):
keep_some = 0.9
res = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
input_size=3,
batch_size=5,
time_steps=7)
# Smoke test for the state/input masks.
output_mask = np.abs(res[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res[1].c) > 1e-6
state_h_mask = np.abs(res[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
def testDropoutWrapperRecurrentStateInputAndOutputWithSeed(self):
keep_some = 0.9
random_seed.set_random_seed(2347)
np.random.seed(23487)
res0 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
input_size=3,
batch_size=5,
time_steps=7,
seed=-234987)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2347)
np.random.seed(23487)
res1 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
input_size=3,
batch_size=5,
time_steps=7,
seed=-234987)
output_mask = np.abs(res0[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res0[1].c) > 1e-6
state_h_mask = np.abs(res0[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
# Ensure seeded calculation is identical.
self.assertAllClose(res0[0], res1[0])
self.assertAllClose(res0[1].c, res1[1].c)
self.assertAllClose(res0[1].h, res1[1].h)
class SlimRNNCellTest(test.TestCase):
def testBasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
my_cell = functools.partial(basic_rnn_cell, num_units=2)
# pylint: disable=protected-access
g, _ = rnn_cell_impl._SlimRNNCell(my_cell)(x, m)
# pylint: enable=protected-access
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellMatch(self):
batch_size = 32
input_size = 100
num_units = 10
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inputs = random_ops.random_uniform((batch_size, input_size))
_, initial_state = basic_rnn_cell(inputs, None, num_units)
rnn_cell = rnn_cell_impl.BasicRNNCell(num_units)
outputs, state = rnn_cell(inputs, initial_state)
variable_scope.get_variable_scope().reuse_variables()
my_cell = functools.partial(basic_rnn_cell, num_units=num_units)
# pylint: disable=protected-access
slim_cell = rnn_cell_impl._SlimRNNCell(my_cell)
# pylint: enable=protected-access
slim_outputs, slim_state = slim_cell(inputs, initial_state)
self.assertEqual(slim_outputs.get_shape(), outputs.get_shape())
self.assertEqual(slim_state.get_shape(), state.get_shape())
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([slim_outputs, slim_state, outputs, state])
self.assertAllClose(res[0], res[2])
self.assertAllClose(res[1], res[3])
def basic_rnn_cell(inputs, state, num_units, scope=None):
if state is None:
if inputs is not None:
batch_size = inputs.get_shape()[0]
dtype = inputs.dtype
else:
batch_size = 0
dtype = dtypes.float32
init_output = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_state = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_output.set_shape([batch_size, num_units])
init_state.set_shape([batch_size, num_units])
return init_output, init_state
else:
with variable_scope.variable_scope(scope, "basic_rnn_cell",
[inputs, state]):
output = math_ops.tanh(
Linear([inputs, state], num_units, True)([inputs, state]))
return output, output
if __name__ == "__main__":
test.main()
|
|
from __future__ import unicode_literals
from moto.core import BaseBackend, BaseModel
from moto.ec2 import ec2_backends
import uuid
import datetime
from random import choice
from .exceptions import ResourceNotFoundException, ValidationException
class OpsworkInstance(BaseModel):
"""
opsworks maintains its own set of ec2 instance metadata.
This metadata exists before any instance reservations are made, and is
used to populate a reservation request when "start" is called
"""
def __init__(self, stack_id, layer_ids, instance_type, ec2_backend,
auto_scale_type=None,
hostname=None,
os=None,
ami_id="ami-08111162",
ssh_keyname=None,
availability_zone=None,
virtualization_type="hvm",
subnet_id=None,
architecture="x86_64",
root_device_type="ebs",
block_device_mappings=None,
install_updates_on_boot=True,
ebs_optimized=False,
agent_version="INHERIT",
instance_profile_arn=None,
associate_public_ip=None,
security_group_ids=None):
self.ec2_backend = ec2_backend
self.instance_profile_arn = instance_profile_arn
self.agent_version = agent_version
self.ebs_optimized = ebs_optimized
self.install_updates_on_boot = install_updates_on_boot
self.architecture = architecture
self.virtualization_type = virtualization_type
self.ami_id = ami_id
self.auto_scale_type = auto_scale_type
self.instance_type = instance_type
self.layer_ids = layer_ids
self.stack_id = stack_id
# may not be totally accurate defaults; instance-type dependent
self.root_device_type = root_device_type
# todo: refactor how we track block_device_mappings to use
# boto.ec2.blockdevicemapping.BlockDeviceType and standardize
# formatting in to_dict()
self.block_device_mappings = block_device_mappings
if self.block_device_mappings is None:
self.block_device_mappings = [{
'DeviceName': 'ROOT_DEVICE',
'Ebs': {
'VolumeSize': 8,
'VolumeType': 'gp2'
}
}]
self.security_group_ids = security_group_ids
if self.security_group_ids is None:
self.security_group_ids = []
self.os = os
self.hostname = hostname
self.ssh_keyname = ssh_keyname
self.availability_zone = availability_zone
self.subnet_id = subnet_id
self.associate_public_ip = associate_public_ip
self.instance = None
self.reported_os = {}
self.infrastructure_class = "ec2 (fixed)"
self.platform = "linux (fixed)"
self.id = "{0}".format(uuid.uuid4())
self.created_at = datetime.datetime.utcnow()
def start(self):
"""
create an ec2 reservation if one doesn't already exist and call
start_instance. Update instance attributes to the newly created instance
attributes
"""
if self.instance is None:
reservation = self.ec2_backend.add_instances(
image_id=self.ami_id,
count=1,
user_data="",
security_group_names=[],
security_group_ids=self.security_group_ids,
instance_type=self.instance_type,
key_name=self.ssh_keyname,
ebs_optimized=self.ebs_optimized,
subnet_id=self.subnet_id,
associate_public_ip=self.associate_public_ip,
)
self.instance = reservation.instances[0]
self.reported_os = {
'Family': 'rhel (fixed)',
'Name': 'amazon (fixed)',
'Version': '2016.03 (fixed)'
}
self.platform = self.instance.platform
self.security_group_ids = self.instance.security_groups
self.architecture = self.instance.architecture
self.virtualization_type = self.instance.virtualization_type
self.subnet_id = self.instance.subnet_id
self.root_device_type = self.instance.root_device_type
self.ec2_backend.start_instances([self.instance.id])
@property
def status(self):
if self.instance is None:
return "stopped"
return self.instance._state.name
def to_dict(self):
d = {
"AgentVersion": self.agent_version,
"Architecture": self.architecture,
"AvailabilityZone": self.availability_zone,
"BlockDeviceMappings": self.block_device_mappings,
"CreatedAt": self.created_at.isoformat(),
"EbsOptimized": self.ebs_optimized,
"InstanceId": self.id,
"Hostname": self.hostname,
"InfrastructureClass": self.infrastructure_class,
"InstallUpdatesOnBoot": self.install_updates_on_boot,
"InstanceProfileArn": self.instance_profile_arn,
"InstanceType": self.instance_type,
"LayerIds": self.layer_ids,
"Os": self.os,
"Platform": self.platform,
"ReportedOs": self.reported_os,
"RootDeviceType": self.root_device_type,
"SecurityGroupIds": self.security_group_ids,
"AmiId": self.ami_id,
"Status": self.status,
}
if self.ssh_keyname is not None:
d.update({"SshKeyName": self.ssh_keyname})
if self.auto_scale_type is not None:
d.update({"AutoScaleType": self.auto_scale_type})
if self.instance is not None:
d.update({"Ec2InstanceId": self.instance.id})
d.update({"ReportedAgentVersion": "2425-20160406102508 (fixed)"})
d.update({"RootDeviceVolumeId": "vol-a20e450a (fixed)"})
if self.ssh_keyname is not None:
d.update(
{"SshHostDsaKeyFingerprint": "24:36:32:fe:d8:5f:9c:18:b1:ad:37:e9:eb:e8:69:58 (fixed)"})
d.update(
{"SshHostRsaKeyFingerprint": "3c:bd:37:52:d7:ca:67:e1:6e:4b:ac:31:86:79:f5:6c (fixed)"})
d.update({"PrivateDns": self.instance.private_dns})
d.update({"PrivateIp": self.instance.private_ip})
d.update({"PublicDns": getattr(self.instance, 'public_dns', None)})
d.update({"PublicIp": getattr(self.instance, 'public_ip', None)})
return d
class Layer(BaseModel):
def __init__(self, stack_id, type, name, shortname,
attributes=None,
custom_instance_profile_arn=None,
custom_json=None,
custom_security_group_ids=None,
packages=None,
volume_configurations=None,
enable_autohealing=None,
auto_assign_elastic_ips=None,
auto_assign_public_ips=None,
custom_recipes=None,
install_updates_on_boot=None,
use_ebs_optimized_instances=None,
lifecycle_event_configuration=None):
self.stack_id = stack_id
self.type = type
self.name = name
self.shortname = shortname
self.attributes = attributes
if attributes is None:
self.attributes = {
'BundlerVersion': None,
'EcsClusterArn': None,
'EnableHaproxyStats': None,
'GangliaPassword': None,
'GangliaUrl': None,
'GangliaUser': None,
'HaproxyHealthCheckMethod': None,
'HaproxyHealthCheckUrl': None,
'HaproxyStatsPassword': None,
'HaproxyStatsUrl': None,
'HaproxyStatsUser': None,
'JavaAppServer': None,
'JavaAppServerVersion': None,
'Jvm': None,
'JvmOptions': None,
'JvmVersion': None,
'ManageBundler': None,
'MemcachedMemory': None,
'MysqlRootPassword': None,
'MysqlRootPasswordUbiquitous': None,
'NodejsVersion': None,
'PassengerVersion': None,
'RailsStack': None,
'RubyVersion': None,
'RubygemsVersion': None
} # May not be accurate
self.packages = packages
if packages is None:
self.packages = packages
self.custom_recipes = custom_recipes
if custom_recipes is None:
self.custom_recipes = {
'Configure': [],
'Deploy': [],
'Setup': [],
'Shutdown': [],
'Undeploy': [],
}
self.custom_security_group_ids = custom_security_group_ids
if custom_security_group_ids is None:
self.custom_security_group_ids = []
self.lifecycle_event_configuration = lifecycle_event_configuration
if lifecycle_event_configuration is None:
self.lifecycle_event_configuration = {
"Shutdown": {"DelayUntilElbConnectionsDrained": False}
}
self.volume_configurations = volume_configurations
if volume_configurations is None:
self.volume_configurations = []
self.custom_instance_profile_arn = custom_instance_profile_arn
self.custom_json = custom_json
self.enable_autohealing = enable_autohealing
self.auto_assign_elastic_ips = auto_assign_elastic_ips
self.auto_assign_public_ips = auto_assign_public_ips
self.install_updates_on_boot = install_updates_on_boot
self.use_ebs_optimized_instances = use_ebs_optimized_instances
self.id = "{0}".format(uuid.uuid4())
self.created_at = datetime.datetime.utcnow()
def __eq__(self, other):
return self.id == other.id
def to_dict(self):
d = {
"Attributes": self.attributes,
"AutoAssignElasticIps": self.auto_assign_elastic_ips,
"AutoAssignPublicIps": self.auto_assign_public_ips,
"CreatedAt": self.created_at.isoformat(),
"CustomRecipes": self.custom_recipes,
"CustomSecurityGroupIds": self.custom_security_group_ids,
"DefaultRecipes": {
"Configure": [],
"Setup": [],
"Shutdown": [],
"Undeploy": []
}, # May not be accurate
"DefaultSecurityGroupNames": ['AWS-OpsWorks-Custom-Server'],
"EnableAutoHealing": self.enable_autohealing,
"LayerId": self.id,
"LifecycleEventConfiguration": self.lifecycle_event_configuration,
"Name": self.name,
"Shortname": self.shortname,
"StackId": self.stack_id,
"Type": self.type,
"UseEbsOptimizedInstances": self.use_ebs_optimized_instances,
"VolumeConfigurations": self.volume_configurations,
}
if self.custom_json is not None:
d.update({"CustomJson": self.custom_json})
if self.custom_instance_profile_arn is not None:
d.update(
{"CustomInstanceProfileArn": self.custom_instance_profile_arn})
return d
class Stack(BaseModel):
def __init__(self, name, region, service_role_arn, default_instance_profile_arn,
vpcid="vpc-1f99bf7a",
attributes=None,
default_os='Ubuntu 12.04 LTS',
hostname_theme='Layer_Dependent',
default_availability_zone='us-east-1a',
default_subnet_id='subnet-73981004',
custom_json=None,
configuration_manager=None,
chef_configuration=None,
use_custom_cookbooks=False,
use_opsworks_security_groups=True,
custom_cookbooks_source=None,
default_ssh_keyname=None,
default_root_device_type='instance-store',
agent_version='LATEST'):
self.name = name
self.region = region
self.service_role_arn = service_role_arn
self.default_instance_profile_arn = default_instance_profile_arn
self.vpcid = vpcid
self.attributes = attributes
if attributes is None:
self.attributes = {'Color': None}
self.configuration_manager = configuration_manager
if configuration_manager is None:
self.configuration_manager = {'Name': 'Chef', 'Version': '11.4'}
self.chef_configuration = chef_configuration
if chef_configuration is None:
self.chef_configuration = {}
self.custom_cookbooks_source = custom_cookbooks_source
if custom_cookbooks_source is None:
self.custom_cookbooks_source = {}
self.custom_json = custom_json
self.default_ssh_keyname = default_ssh_keyname
self.default_os = default_os
self.hostname_theme = hostname_theme
self.default_availability_zone = default_availability_zone
self.default_subnet_id = default_subnet_id
self.use_custom_cookbooks = use_custom_cookbooks
self.use_opsworks_security_groups = use_opsworks_security_groups
self.default_root_device_type = default_root_device_type
self.agent_version = agent_version
self.id = "{0}".format(uuid.uuid4())
self.layers = []
self.apps = []
self.account_number = "123456789012"
self.created_at = datetime.datetime.utcnow()
def __eq__(self, other):
return self.id == other.id
def generate_hostname(self):
# this doesn't match amazon's implementation
return "{theme}-{rand}-(moto)".format(
theme=self.hostname_theme,
rand=[choice("abcdefghijhk") for _ in range(4)])
@property
def arn(self):
return "arn:aws:opsworks:{region}:{account_number}:stack/{id}".format(
region=self.region,
account_number=self.account_number,
id=self.id
)
def to_dict(self):
response = {
"AgentVersion": self.agent_version,
"Arn": self.arn,
"Attributes": self.attributes,
"ChefConfiguration": self.chef_configuration,
"ConfigurationManager": self.configuration_manager,
"CreatedAt": self.created_at.isoformat(),
"CustomCookbooksSource": self.custom_cookbooks_source,
"DefaultAvailabilityZone": self.default_availability_zone,
"DefaultInstanceProfileArn": self.default_instance_profile_arn,
"DefaultOs": self.default_os,
"DefaultRootDeviceType": self.default_root_device_type,
"DefaultSshKeyName": self.default_ssh_keyname,
"DefaultSubnetId": self.default_subnet_id,
"HostnameTheme": self.hostname_theme,
"Name": self.name,
"Region": self.region,
"ServiceRoleArn": self.service_role_arn,
"StackId": self.id,
"UseCustomCookbooks": self.use_custom_cookbooks,
"UseOpsworksSecurityGroups": self.use_opsworks_security_groups,
"VpcId": self.vpcid
}
if self.custom_json is not None:
response.update({"CustomJson": self.custom_json})
if self.default_ssh_keyname is not None:
response.update({"DefaultSshKeyName": self.default_ssh_keyname})
return response
class OpsWorksBackend(BaseBackend):
def __init__(self, ec2_backend):
self.stacks = {}
self.layers = {}
self.instances = {}
self.ec2_backend = ec2_backend
def reset(self):
ec2_backend = self.ec2_backend
self.__dict__ = {}
self.__init__(ec2_backend)
def create_stack(self, **kwargs):
stack = Stack(**kwargs)
self.stacks[stack.id] = stack
return stack
def create_layer(self, **kwargs):
name = kwargs['name']
shortname = kwargs['shortname']
stackid = kwargs['stack_id']
if stackid not in self.stacks:
raise ResourceNotFoundException(stackid)
if name in [l.name for l in self.stacks[stackid].layers]:
raise ValidationException(
'There is already a layer named "{0}" '
'for this stack'.format(name))
if shortname in [l.shortname for l in self.stacks[stackid].layers]:
raise ValidationException(
'There is already a layer with shortname "{0}" '
'for this stack'.format(shortname))
layer = Layer(**kwargs)
self.layers[layer.id] = layer
self.stacks[stackid].layers.append(layer)
return layer
def create_instance(self, **kwargs):
stack_id = kwargs['stack_id']
layer_ids = kwargs['layer_ids']
if stack_id not in self.stacks:
raise ResourceNotFoundException(
"Unable to find stack with ID {0}".format(stack_id))
unknown_layers = set(layer_ids) - set(self.layers.keys())
if unknown_layers:
raise ResourceNotFoundException(", ".join(unknown_layers))
layers = [self.layers[id] for id in layer_ids]
if len(set([layer.stack_id for layer in layers])) != 1 or \
any([layer.stack_id != stack_id for layer in layers]):
raise ValidationException(
"Please only provide layer IDs from the same stack")
stack = self.stacks[stack_id]
# pick the first to set default instance_profile_arn and
# security_group_ids on the instance.
layer = layers[0]
kwargs.setdefault("hostname", stack.generate_hostname())
kwargs.setdefault("ssh_keyname", stack.default_ssh_keyname)
kwargs.setdefault("availability_zone", stack.default_availability_zone)
kwargs.setdefault("subnet_id", stack.default_subnet_id)
kwargs.setdefault("root_device_type", stack.default_root_device_type)
if layer.custom_instance_profile_arn:
kwargs.setdefault("instance_profile_arn",
layer.custom_instance_profile_arn)
kwargs.setdefault("instance_profile_arn",
stack.default_instance_profile_arn)
kwargs.setdefault("security_group_ids",
layer.custom_security_group_ids)
kwargs.setdefault("associate_public_ip", layer.auto_assign_public_ips)
kwargs.setdefault("ebs_optimized", layer.use_ebs_optimized_instances)
kwargs.update({"ec2_backend": self.ec2_backend})
opsworks_instance = OpsworkInstance(**kwargs)
self.instances[opsworks_instance.id] = opsworks_instance
return opsworks_instance
def describe_stacks(self, stack_ids):
if stack_ids is None:
return [stack.to_dict() for stack in self.stacks.values()]
unknown_stacks = set(stack_ids) - set(self.stacks.keys())
if unknown_stacks:
raise ResourceNotFoundException(", ".join(unknown_stacks))
return [self.stacks[id].to_dict() for id in stack_ids]
def describe_layers(self, stack_id, layer_ids):
if stack_id is not None and layer_ids is not None:
raise ValidationException(
"Please provide one or more layer IDs or a stack ID"
)
if stack_id is not None:
if stack_id not in self.stacks:
raise ResourceNotFoundException(
"Unable to find stack with ID {0}".format(stack_id))
return [layer.to_dict() for layer in self.stacks[stack_id].layers]
unknown_layers = set(layer_ids) - set(self.layers.keys())
if unknown_layers:
raise ResourceNotFoundException(", ".join(unknown_layers))
return [self.layers[id].to_dict() for id in layer_ids]
def describe_instances(self, instance_ids, layer_id, stack_id):
if len(list(filter(None, (instance_ids, layer_id, stack_id)))) != 1:
raise ValidationException("Please provide either one or more "
"instance IDs or one stack ID or one "
"layer ID")
if instance_ids:
unknown_instances = set(instance_ids) - set(self.instances.keys())
if unknown_instances:
raise ResourceNotFoundException(", ".join(unknown_instances))
return [self.instances[id].to_dict() for id in instance_ids]
if layer_id:
if layer_id not in self.layers:
raise ResourceNotFoundException(
"Unable to find layer with ID {0}".format(layer_id))
instances = [i.to_dict() for i in self.instances.values()
if layer_id in i.layer_ids]
return instances
if stack_id:
if stack_id not in self.stacks:
raise ResourceNotFoundException(
"Unable to find stack with ID {0}".format(stack_id))
instances = [i.to_dict() for i in self.instances.values()
if stack_id == i.stack_id]
return instances
def start_instance(self, instance_id):
if instance_id not in self.instances:
raise ResourceNotFoundException(
"Unable to find instance with ID {0}".format(instance_id))
self.instances[instance_id].start()
opsworks_backends = {}
for region, ec2_backend in ec2_backends.items():
opsworks_backends[region] = OpsWorksBackend(ec2_backend)
|
|
import graph
import probabilisticstate as pst
from scipy import stats
from random import random as rd
'''
Probabilistic version of a graph. It uses probabilistic states instead of
common states. This allows for a method that computes statistical tests of two
states' morphs, create randomly generated sequences and use statistical criteria
to complete the last level of the graph.
'''
class ProbabilisticGraph(graph.Graph):
def __init__(self, states=[], alphabet=[], path=''):
p_states = []
for s in states:
if s:
s = pst.ProbabilisticState(s.name, s.outedges)
p_states.append(s)
graph.Graph.__init__(self, p_states, alphabet, path)
'''
Name: compare_morphs
Input:
*morphs 1 & 2: morphs of two differents states contained in the current
graph;
*alpha: the precision parameter for the statistical test;
*test: the type of statistical test that will be used. Right now, chi-
squared and kolmogorv-smirnov are implemented.
Output:
*[p >= alpha, p]: the first element returns True if the test passes for
the specified alpha and False otherwise. The second one gives the exact
p-value that returned from the test.
Description:
Performs a statistical test between two state's morphs, which are prob
distributions of their output edges. First it extracts just the probs
from one of the morphs and then it extracts it for the second morph,
making sure they are in the same order (i.e. the probs for the same
symbol are in the same position). It performs the test and returns its
results.
'''
@staticmethod
def compare_morphs(morph1, morph2, alpha, test):
probs1 = [float(x[1]) for x in morph1]
probs2 = []
symbols_2 = [x[0] for x in morph2]
#Loop to guarantee the probability distributions are in the same order:
to_del = []
i = 0
for a in morph1:
if a[0] in symbols_2:
for b in morph2:
if b[0] == a[0]:
if float(b[1]) == float(a[1]) == 0.0:
to_del.append(i)
else:
probs2.append(float(b[1]))
else:
probs2.append(0.0)
i += 1
for d in to_del[::-1]:
del probs1[d]
if probs1 == probs2:
return [True, 1.0]
else:
if test == "chi-squared":
[X, p] = stats.chisquare(probs1, probs2)
elif test == "ks":
[KS, p] = stats.ks_2samp(probs1, probs2)
return [p >= alpha, p]
'''
Name: generate_sequence
Input:
*length: The desired length for the generated sequence;
*ini_state: initial state from which the sequence-generation walk will
begin.
Output:
*data: Sequence of user-specified length.
Description:
Starts from an initial state and uses the randomstep function from the
probabilistic state to retrieve a randomly generated symbol and a
destination state for the next iteration. Stores the names of visited
states, which might be useful in the future, to check if there are
states that are not reached during a certain
'''
@staticmethod
def generate_sequence(length, ini_state):
data = ''
s = ini_state
visited_states = [s.name]
for i in range(0, length):
d, s = s.random_step()
if s:
if s.name not in visited_states:
visited_states.append(s.name)
data += d
return data, visited_states
'''
Name: expand_last_level
Input:
*L: The length of the labels in the level to be considered the last;
*method: which algorithm will be used to expand the last level;
*test: statistical test that is used in some of the methods (either chi-
squared or KS);
*alpha: statistical test precision parameter.
Output:
*A new probabilistic graph with the last level connected by the chosen
method.
Description:
Chooses from one of three methods of last level expansion to reconnect
the states at the level L. The methods are:
*D-Markov: ends the last level with D-Markov connections;
*Old: Tests the destination state with all its possible suffixes and
substitutes it by the one that returns the highest p-value;
*New: Similar to the old method, but checks only if the suffixes
pass the statistical test and return the one with the longest label
'''
def expand_last_level(self, l, method, alpha=0.95, test='chi-squared', synch_words=[]):
if method == "dmark":
h = self.dmark_expansion(l)
else:
h = self.expansion(l, alpha, test, method, synch_words)
return h
'''
Name: dmark_expansion
Input:
*L: The length of the labels in the level to be considered the last.
Output:
*A new probabilistic graph with the last level connected by the chosen
method.
Description:
Implements the D-Markov expansion. An example of a D-Markov expansion
will suffice to understandhow it works.
Suppose a prob graph over binary alphabet. The state 0101 would go to
state 01010 when the 0-labeled edge is taken. If the graph is to be
ended by the length 4, the state labeled with the last 4 symbols of
01010 is the new destination, that is, 1010.
If such a state does not exist, the algorithm will drop the first digit
of the label until it finds a state that exists. If none does, it will
connect to the root.
'''
def dmark_expansion(self, l):
last_level = [x for x in self.states if x.name_length() == l]
new_last_level = []
for x in last_level:
new_outedges = []
for e in x.outedges:
for i in range(1, l+1):
if i < l:
dest = x.name[i:] + e[0]
next_state = self.state_named(dest)
if next_state or (e[2] == 0.0):
new_outedges.append((e[0], next_state, e[2]))
break
else:
new_outedges.append((e[0], self.root(), e[2]))
x.outedges = new_outedges
new_last_level.append(x)
new_states = [x for x in self.states if x.name_length() < l]
new_states.extend(new_last_level)
return ProbabilisticGraph(new_states, self.alphabet)
'''
Name: expansion
Input:
*L: The length of the labels in the level to be considered the last;
*method: which algorithm will be used to expand the last level;
*test: statistical test that is used in some of the methods (either chi-
squared or KS);
*alpha: statistical test precision parameter.
Output:
*A new probabilistic graph with the last level connected by the chosen
method.
Description:
The new and old methods are very similar. They will make the same
statistical tests, but use different criteria to choose from the results
This function applies the general method for both of them and calls
an appropriate function to choose between the old and new methods'
criteria.
'''
def expansion(self, l, alpha, test, method, s_words=[]):
last_level = [x for x in self.states if x.name_length() == l]
new_last_level = []
new_states = [x for x in self.states if x.name_length() < l]
for s in last_level:
new_outedges = []
for edge in s.outedges:
a = edge[0]
next_name = s.name + a
true_next = self.state_named(next_name)
if true_next:
lgth = len(next_name)
results = []
for i in range(1, lgth+1):
if i < lgth:
candidate = self.state_named(next_name[i:])
else:
candidate = self.root()
if candidate:
r = self.compare_morphs(true_next.morph(), candidate.morph(),
alpha, test)
else:
r = [False, 0.0]
results.append([r, candidate])
if method == 'old':
new_next = self.old_method(results)
elif method == 'new':
new_next = self.new_method(results, next_name[1:])
elif method == 'omega':
new_next = self.omega_method(true_next, results, new_states + new_last_level, next_name[1:],
alpha, test)
elif method == 'omega_inverted':
synch_n_ext = []
if s_words:
for word in new_states + last_level:
for syn in s_words:
s_len = len(syn)
prefix = word.name[:s_len]
if syn == prefix:
synch_n_ext.append(word)
break
new_next = self.omega_inverted_method(true_next, results, synch_n_ext, next_name[1:],
alpha, test)
new_outedge = (a, new_next, edge[2])
new_outedges.append(new_outedge)
else:
new_outedges.append((a, None, '0.0'))
s.outedges = new_outedges
new_last_level.append(s)
new_states.extend(new_last_level)
h = ProbabilisticGraph(new_states, self.alphabet)
return h
'''
Name: old_method
Input:
*results: A list of 2-tuples. The first element of the 2-tuple is the
result of a statistical test and the second is a state (which is a
suffix of the real expanded state) for which the test was taken.
Output:
*The state for which the test result was the highest.
Description:
Implements the old method criterion.
'''
@staticmethod
def old_method(results):
w = [r[0][1] for r in results]
arg = w.index(max(w))
return results[arg][1]
'''
Name: new_method
Input:
*results: A list of 2-tuples. The first element of the 2-tuple is the
result of a statistical test and the second is a state (which is a
suffix of the real expanded state) for which the test was taken.
Output:
*The state with longest label that passed the test.
Description:
Implements the new method criterion. If no state passes the test, it
simply applies the d-markov criterion for this specific state.
'''
@staticmethod
def new_method(results, default_name):
w = [c[1] for c in results if c[0][0]]
if w:
lens = [len(y.name) for y in w]
arg = lens.index(max(lens))
return w[arg]
else:
return [x[1] for x in results if x[1].name == default_name][0]
def omega_method(self, exp, results, new_last_level, default_name, alpha, test):
w = [c[1] for c in results if c[0][0]]
if w:
lens = [len(y.name) for y in w]
arg = lens.index(max(lens))
return w[arg]
else:
for s in new_last_level:
r = self.compare_morphs(s.morph(), exp.morph(), alpha, test)
if r[0]:
return s
return [x[1] for x in results if x[1].name == default_name][0]
def omega_inverted_method(self, exp, results, synch_words, default_name, alpha, test):
for s in synch_words:
r = self.compare_morphs(s.morph(), exp.morph(), alpha, test)
if r[0]:
return s
w = [c[1] for c in results if c[0][0]]
if w:
lens = [len(y.name) for y in w]
arg = lens.index(max(lens))
return w[arg]
else:
return [x[1] for x in results if x[1].name == default_name][0]
'''
Name: open_graph_file
Input:
*path: file path where the graph file is saved.
Description:
Adapts super's open_graph_file in order to convert all states to prob
states.
'''
def open_graph_file(self, path):
aux = graph.Graph(path=path)
states = []
for s in aux.states:
states.append(pst.ProbabilisticState(s.name, s.outedges))
for ns in states:
newedges = []
for edge in ns.outedges:
if edge[1]:
destname = edge[1].name
newdest = [x for x in states if x.name == destname][0]
else:
newdest = None
newedge = (edge[0], newdest, edge[2])
newedges.append(newedge)
ns.outedges = newedges
self.states = states
self.alphabet = aux.alphabet
def irreducible(self, seq_len):
d, v = self.generate_sequence(seq_len, self.states[0])
irreducible_states = [s for s in self.states if s.name in v]
self.states = irreducible_states
def remove_unreachable_states(self):
g = graph.Graph(self.states, self.alphabet)
h = g.remove_unreachable_states()
return ProbabilisticGraph(h.states, h.alphabet)
|
|
import json
import logging
import os
import sys
from pprint import pprint
from google.cloud import storage
from google.api_core.exceptions import NotFound
from rdr_service.config import PUBSUB_NOTIFICATION_BUCKETS_PROD, PUBSUB_NOTIFICATION_BUCKETS_SANDBOX
from rdr_service.tools.tool_libs.tool_base import cli_run, ToolBase
tool_cmd = 'pubsub-manager'
tool_desc = 'Manage GCloud Pub/Sub Notifications'
_logger = logging.getLogger("rdr_logger")
CONFIG_ROOT = os.path.join(os.path.dirname(__file__), '../../config')
SUPPORTED_PROJECT_CONFIGS = {
'all-of-us-rdr-prod': 'pub_sub_config_prod.json',
'all-of-us-rdr-sandbox': 'pub_sub_config_sandbox.json'
}
class PubSubNotificationManager(ToolBase):
def run(self):
if self.args.command == "list":
return self.command_list()
if self.args.command == "create":
return self.command_create()
if self.args.command == "delete":
return self.command_delete()
def command_list(self):
"""
Lists all Pub/Sub notifications for all registered buckets.
If --bucket is supplied, only lists Pub/Sub notifications for
given bucket.
"""
# Get buckets
project_bucket_mappings = {
'all-of-us-rdr-prod': PUBSUB_NOTIFICATION_BUCKETS_PROD,
'all-of-us-rdr-sandbox': PUBSUB_NOTIFICATION_BUCKETS_SANDBOX,
}
bucket_list = [self.args.bucket] if self.args.bucket else project_bucket_mappings[self.gcp_env.project]
notifications_dict = {"notifications": []}
for bucket_name in bucket_list:
# call storage api
client = storage.Client()
bucket = client.get_bucket(bucket_name)
notifications = bucket.list_notifications(client)
for notification in notifications:
# Skip the default topic notification (which won't have an integer ID"
try:
id_int = int(notification.notification_id)
except ValueError:
continue
if self.args.id and self.args.id != id_int:
continue
output_dict = dict()
try:
output_dict['bucket'] = bucket_name
output_dict['id'] = notification.notification_id
output_dict['topic_name'] = notification.topic_name
output_dict['topic_project'] = notification.topic_project
output_dict['payload_format'] = notification.payload_format
output_dict['object_name_prefix'] = notification._properties['object_name_prefix']
output_dict['event_types'] = notification.event_types
except KeyError:
pass
notifications_dict['notifications'].append(output_dict)
pprint(notifications_dict)
return 0
def command_create(self):
"""
Create a new Pub/Sub notification based on the JSON
in the supplied --config-file for configurations
where 'id' key has a value of null
"""
config_path = self.get_config_path()
if not os.path.exists(config_path):
_logger.error(f'File {config_path} was not found.')
return 1
with open(config_path) as f:
config_data = json.load(f)
new_notifications_list = filter(
lambda x: x['id'] is None,
config_data['notifications']
)
for new_notification in new_notifications_list:
notification = self.create_notification(new_notification)
new_notification['id'] = notification.notification_id
# Output created notification config data
_logger.info('Notification created:')
pprint(new_notification)
# replace existing json file with updated notification config
self.update_notification_config(config_data, config_path)
return 0
def command_delete(self):
"""
Delete the Pub/Sub notification with --id and --bucket
"""
if self.gcp_env.project not in SUPPORTED_PROJECT_CONFIGS.keys():
_logger.error(f'Project config not supported {self.gcp_env.project}')
return 1
if not self.args.bucket and not self.args.id:
_logger.error("--bucket and --id required for delete.")
return 1
# Get notification
client = storage.Client()
bucket = client.get_bucket(self.args.bucket)
target = bucket.get_notification(self.args.id, client)
if self.gcp_env.project != target.topic_project:
_logger.error("Notification project and specified project do not match.")
return 1
# Delete the notification
try:
target.delete(client=client)
except NotFound:
_logger.error(f"Notification ID {self.args.id} not found.")
return 1
_logger.info(f"Notification id {self.args.id} has been deleted.")
_logger.info("Removing notification from config...")
self.delete_notification_from_config()
return 0
def create_notification(self, new_notification: dict):
"""
Calls google api to create new pub/sub notification
:param new_notification: dict from config file
:return:
"""
bucket_name = new_notification['bucket']
if self.gcp_env.project != new_notification['topic_project']:
_logger.error(f'Notification project mismatch.')
sys.exit(1)
# create notification
client = storage.Client()
bucket = client.get_bucket(bucket_name)
notification = bucket.notification(
topic_name=new_notification['topic_name'],
topic_project=new_notification['topic_project'],
custom_attributes=None,
event_types=new_notification['event_types'],
blob_name_prefix=new_notification['object_name_prefix'],
payload_format=new_notification['payload_format'],
notification_id=None,
)
notification.create(client=client)
return notification
def get_config_path(self):
if self.args.config_file:
config_name = self.args.config_file
else:
config_name = SUPPORTED_PROJECT_CONFIGS[self.gcp_env.project]
return os.path.join(CONFIG_ROOT, config_name)
@staticmethod
def update_notification_config(data, config_path):
with open(config_path, "w") as f:
f.write(json.dumps(data, indent=2))
def delete_notification_from_config(self):
config_path = self.get_config_path()
with open(config_path, "r") as f:
config_data = json.load(f)
notification_iter = filter(
lambda x: x['id'] == str(self.args.id),
config_data['notifications']
)
for notification in notification_iter:
config_data['notifications'].remove(notification)
with open(config_path, "w") as f:
f.write(json.dumps(config_data, indent=2))
_logger.info("Notification removed from config.")
def add_additional_arguments(parser):
parser.add_argument("--command", default=None, required=True, choices=['list', 'create', 'delete'], type=str)
parser.add_argument("--bucket", default=None, required=False, help="GCS bucket to target", type=str)
parser.add_argument("--config-file", default=None, required=False,
help="path to json notification config file", type=str)
parser.add_argument("--id", default=None, required=False, help="notification ID to target", type=int)
def run():
return cli_run(tool_cmd, tool_desc, PubSubNotificationManager, add_additional_arguments)
|
|
#!/usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
from smach import StateMachine
import actionlib
import time
import threading
from smach_ros import SimpleActionState
from smach_ros import ActionServerWrapper
from std_msgs.msg import String, Float64, UInt8, Bool
from wm_interpreter.msg import *
from collections import Counter
TIMEOUT_LENGTH = 10
# define state WaitingQuestion
class WaitingQuestion(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['NotUnderstood', 'Question', 'Timeout'],
input_keys=[],
output_keys=['WQ_question_out'])
self.RecoString = []
self.state = "WaitingQuestion"
self.QUESTIONS = []
self.QUESTIONS.append(["What is your name",
"Do a little presentation",
"Who are the inventors of the C programming language",
"Who is the inventor of the Python programming language",
"Which robot was the star in the movie Wall-E",
"Where does the term computer bug come from",
"What is the name of the round robot in the new Star Wars movie",
"How many curry sausages are eaten in Germany each year",
"Who is president of the galaxy in The Hitchhiker Guide to the Galaxy",
"Which robot is the love interest in Wall-E",
"Which company makes ASIMO",
"What company makes Big Dog",
"What is the funny clumsy character of the Star Wars prequels",
"How many people live in the Germany",
"What are the colours of the German flag",
"What city is the capital of the Germany",
"How many arms do you have",
"What is the heaviest element",
"what did Alan Turing create",
"Who is the helicopter pilot in the A-Team",
"What Apollo was the last to land on the moon",
"Who was the last man to step on the moon",
"In which county is the play of Hamlet set",
"What are names of Donald Duck nephews",
"How many metres are in a mile",
"Name a dragon in The Lord of the Rings",
"Who is the Chancellor of Germany",
"Who developed the first industrial robot",
"What's the difference between a cyborg and an android",
"Do you know any cyborg",
"In which city is this year's RoboCup hosted",
"Which city hosted last year's RoboCup",
"In which city will next year's RoboCup be hosted",
"Name the main rivers surrounding Leipzig",
"Where is the zoo of this city located",
"Where did the peaceful revolution of 1989 start",
"Where is the world's oldest trade fair hosted",
"Where is one of the world's largest dark music festivals hosted",
"Where is Europe's oldest continuous coffee shop hosted",
"Name one of the greatest German composers",
"Where is Johann Sebastian Bach buried",
"Do you have dreams",
"Hey what's up",
"There are seven days in a week. True or false",
"There are eleven days in a week. True or false",
"January has 31 days. True or false",
"January has 28 days. True or false",
"February has 28 days. True or false",
"February has 31 days. True or false",
"What city are you from",
"Who used first the word Robot",
"What origin has the word Robot"])
self.QUESTIONS.append([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
self.face_cmd = rospy.Publisher('/face_mode', UInt8, queue_size=1, latch=True)
self.sub = rospy.Subscriber("/recognizer_1/output", String, self.callback, queue_size=1)
def execute(self, userdata):
rospy.loginfo('Executing state WaitingQuestion')
self.face_cmd.publish(3)
timeout = time.time() + TIMEOUT_LENGTH # 10 sec
while True:
if max(self.QUESTIONS[1]) > 70:
userdata.WQ_question_out = self.QUESTIONS[0][self.QUESTIONS[1].index(max(self.QUESTIONS[1]))]
for idx in range(len(self.QUESTIONS[1])):
self.QUESTIONS[1][idx] = 0
return 'Question'
'''else:
if len(self.RecoString) < 2:
return 'NotUnderstood' '''
'''if time.time() > timeout:
return 'Timeout' '''
def callback(self, data):
self.RecoString = data.data.split()
for idx in range(len(self.QUESTIONS[1])):
self.QUESTIONS[1][idx] = 0
for RecoWord in self.RecoString:
for idx in range(len(self.QUESTIONS[1])):
if self.QUESTIONS[0][idx].lower().find(RecoWord) != -1:
self.QUESTIONS[1][idx] += 1
for idx in range(len(self.QUESTIONS[1])):
self.QUESTIONS[1][idx] = self.QUESTIONS[1][idx]*100/len(self.QUESTIONS[0][idx].split())
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.tts_pub.publish(ToSay_str)
def request_preempt(self):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(self)
rospy.logwarn("Preempted!")
# define state AnswerQuestion
class AnswerQuestion(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['Done'],
input_keys=['AQ_question_in'])
self.ANSWERS = {"What is your name":"Mon nom est Sara, ce qui signifie Systeme dassistance robotiser autonome",
"Do a little presentation":"Je suis un robot dassistance robotiser autonome. Jai eter concu par le club Walking Machine de ler-cole de technologie superieure specialement pour la comper-tition Robocup at Home.",
"Who are the inventors of the C programming language": "Les inventeur du language de programmation C sont Ken Thompson et Dennis Ritchie",
"Who is the inventor of the Python programming language": "Linventeur du language de programation python est Guido van Rossum",
"Which robot was the star in the movie Wall-E": "Le robot qui est lacteur principale dans le film Wall-E est Wall-E",
"Where does the term computer bug come from": "Le terme bogue informatique vient dun papillon de nuit coince dans un relais",
"What is the name of the round robot in the new Star Wars movie": "Le nom du petit robot rond dans le nouveau film de Star Wars est B B 8",
"How many curry sausages are eaten in Germany each year": "Environ 800 million currywurst par anner",
"Who is president of the galaxy in The Hitchhiker Guide to the Galaxy": "Le president de la galaxie dans le film Le Guide du voyageur galactique est Zaphod Beeblebrox",
"Which robot is the love interest in Wall-E": "Le robot companion de Wall-E est Eve",
"Which company makes ASIMO": "La compagnie qui fabrique ASIMO est Honda",
"What company makes Big Dog": "La compagnie qui fabrique Big Dog est Boston Dynamics",
"What is the funny clumsy character of the Star Wars prequels": "Le personnage drole mais maladroit des prelude de Star Wars est Jar-Jar Binks",
"How many people live in the Germany": "Il y a 80 millions dhabitant en Allemagne ",
"What are the colours of the German flag": "Les couleurs du drapeau de lAllemagne sont rouge, noir et jaune",
"What city is the capital of the Germany": "La capital de lAllemagne est Berlin",
"How many arms do you have": "Jai seulement un bras pour le moment. Veuillez me le redemander lannnee prochain",
"What is the heaviest element": "Lelement le plus lourd est le plutonium lorsquil est mesure par la masse de lelement mais lOsmium est plus dense",
"What did Alan Turing create": "Alan Turing a cree plusieurs choses comme les machines de Turing et le test de Turing",
"Who is the helicopter pilot in the A-Team": "Le pilote dhelicoptere dans A-Team est le capitaine Howling Mad Murdock",
"What Apollo was the last to land on the moon": "Le dernier a avoir atteris sur la lune etait Apollo 17",
"Who was the last man to step on the moon": "Le dernier homme a avoir marcher sur la lune etait Gene Cernan",
"In which county is the play of Hamlet set": "Il etait au Danemark",
"What are names of Donald Duck nephews": "The nom des neveux de Donald Duck etaient Huey Dewey et Louie Duck",
"How many metres are in a mile": "Il y a environ 1609 metres dans un mile",
"Name a dragon in The Lord of the Rings": "Le nom du dragon dans le Seigneur des anneaux etait Smaug",
"Who is the Chancellor of Germany": "La chancelliere de lAllemagne est Angela Merkel",
"Who developed the first industrial robot": "Le premier a developper un robot industriel etait le physicien americain Joseph Engelberg. Il est aussi considere comme le pere de la robotique.",
"What's the difference between a cyborg and an android": "Les cyborgs sont des etres biologiques avec des ameliorations electromecaniques. Les androids sont des robots avec une apparence humaine.",
"Do you know any cyborg": "Le professeur Kevin Warwick. Il a implemente un circuit dans son avant-bras gauche.",
"In which city is this year's RoboCup hosted": "La Robocup 2016 etait a Leipzig en Allemagne",
"Which city hosted last year's RoboCup": "La robocup 2015 etait a Heifei en Chine.",
"In which city will next year's RoboCup be hosted": "Robocup 2017 sera a Nagoya au Japon.",
"Name the main rivers surrounding Leipzig": "La Parthe Pleisse et la White Elster",
"Where is the zoo of this city located": "Le zoo est situe pres de la gare centrale.",
"Where did the peaceful revolution of 1989 start": "La revolution tranquille commenca le 4 septembre 1989 a Leipzig a la leglise Saint Nicholas.",
"Where is the world's oldest trade fair hosted": "La Foire de Leipzig est la plus ancienne du monde",
"Where is one of the world's largest dark music festivals hosted": "La ville de Leipzig accueille lun des plus grand festival de musique gothique du monde",
"Where is Europe's oldest continuous coffee shop hosted": "Le plus ancien cafe deurope ce trouve a Leipzig",
"Name one of the greatest German composers": "Jean Sebastien Bach est le plus grand compositeur dAllemagne",
"Where is Johann Sebastian Bach buried": "La sepulture de Jean Sebastien Bach se trouve a leglise Saint Thomas a Leipzig",
"Do you have dreams": "Je reve de moutons electriques.",
"Hey what's up": "Comment le saurai-je?",
"There are seven days in a week. True or false": "Cest vrais, il y a bel et bien sept jours dans une semaine.",
"There are eleven days in a week. True or false": "Cest faux, il y a plutot sept jours dans une semaine.",
"January has 31 days. True or false": "Cest vrai, le mois de Janvier compte 31 jours.",
"January has 28 days. True or false": "Faux, Janvier contient 31 jours, pas 28",
"February has 28 days. True or false": "Vrai, sauf dans une annee bissextile qui en contient 29",
"February has 31 days. True or false": "Faux, Fevrier a soit 28 jours, ou 29 selon lannee.",
"What city are you from": "Je viens de Mont-rer al",
"Who used first the word Robot": "Le mot robot fut utilise pour la premiere fois par lecrivain tcheque Karel Capek",
"What origin has the word Robot": "Il provient du mot tcheque Robota qui signifie travail force ou esclavage"}
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
def execute(self, userdata):
rospy.loginfo('-- Executing state WaitingConfirmation --')
self.SayX(self.ANSWERS[userdata.AQ_question_in])
return 'Done'
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.tts_pub.publish(ToSay_str)
def request_preempt(self):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(self)
rospy.logwarn("Preempted!")
# define state AskToRepeat
class AskToRepeat(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['Done'])
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
def execute(self, userdata):
rospy.loginfo('-- Executing state AskRepeat --')
self.SayX("Can you repeat the question please?")
rospy.sleep(5)
return 'Done'
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.tts_pub.publish(ToSay_str)
def request_preempt(self):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(self)
rospy.logwarn("Preempted!")
# main
def main():
rospy.init_node('interpreter')
rospy.sleep(5)
tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
neck_pub = rospy.Publisher('neckHead_controller/command', Float64, queue_size=1, latch=True)
neck_cmd = Float64()
neck_cmd.data = 0
neck_pub.publish(neck_cmd)
tts_pub.publish("Bonjour, je suis maintenant prete a repondre a vos questions")
outcomes = ""
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=['success', 'aborted', 'preempted'],
output_keys=[])
with sm:
# Add states to the container
smach.StateMachine.add('WaitingQuestion', WaitingQuestion(),
transitions={'Question': 'AnswerQuestion',
'NotUnderstood': 'AskToRepeat',
'Timeout': 'WaitingQuestion'},
remapping={'WQ_question_out': 'question'})
smach.StateMachine.add('AnswerQuestion', AnswerQuestion(),
transitions={'Done': 'WaitingQuestion'},
remapping={'AQ_question_in': 'question'})
smach.StateMachine.add('AskToRepeat', AskToRepeat(),
transitions={'Done': 'WaitingQuestion'},
)
'''sis = smach_ros.IntrospectionServer('server_name', asw.wrapped_container, '/ASW_ROOT')'''
# Execute SMACH plan
sm.execute()
rospy.spin()
# Request the container to preempt
sm.request_preempt()
if __name__ == '__main__':
main()
|
|
import logging
import itertools
import numpy as np
from copy import deepcopy
from pycqed.measurement.waveform_control import element
from pycqed.measurement.waveform_control import pulse
from pycqed.measurement.waveform_control import sequence
from pycqed.utilities.general import add_suffix_to_dict_keys
from pycqed.measurement.pulse_sequences.standard_elements import multi_pulse_elt
from pycqed.measurement.pulse_sequences.standard_elements import distort_and_compensate
from pycqed.measurement.pulse_sequences.single_qubit_tek_seq_elts import get_pulse_dict_from_pars
from importlib import reload
reload(pulse)
from ..waveform_control import pulse_library
reload(pulse_library)
station = None
reload(element)
kernel_dir = 'kernels/'
# You need to explicitly set this before running any functions from this module
# I guess there are cleaner solutions :)
cached_kernels = {}
def avoided_crossing_spec_seq(operation_dict, q0, q1, RO_target,
verbose=False,
upload=True):
seq_name = 'avoidec_crossing_spec'
seq = sequence.Sequence(seq_name)
station.pulsar.update_channel_settings()
el_list = []
sequencer_config = operation_dict['sequencer_config']
# N.B. Identities not needed in all cases
pulse_combinations = ['X180 '+q0, 'SpecPulse '+q1, 'RO '+RO_target]
pulses = []
for p in pulse_combinations:
pulses += [operation_dict[p]]
el = multi_pulse_elt(0, station, pulses, sequencer_config)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
if upload:
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
return seq, el_list
def two_qubit_off_on(q0_pulse_pars, q1_pulse_pars, RO_pars,
return_seq=False, verbose=False):
seq_name = '2_qubit_OffOn_sequence'
seq = sequence.Sequence(seq_name)
station.pulsar.update_channel_settings()
el_list = []
# Create a dict with the parameters for all the pulses
q0_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q0_pulse_pars), ' q0')
q1_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q1_pulse_pars), ' q1')
RO_dict = {'RO': RO_pars}
pulse_dict = {}
pulse_dict.update(q0_pulses)
pulse_dict.update(q1_pulses)
pulse_dict.update(RO_dict)
# N.B. Identities not needed in all cases
pulse_combinations = [['I q0', 'I q1', 'RO'],
['X180 q0', 'I q1', 'RO'],
['I q0', 'X180 q1', 'RO'],
['X180 q0', 'X180 q1', 'RO']]
for i, pulse_comb in enumerate(pulse_combinations):
pulses = []
for p in pulse_comb:
pulses += [pulse_dict[p]]
el = multi_pulse_elt(i, station, pulses)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
if return_seq:
return seq, el_list
else:
return seq_name
def three_qubit_off_on(q0_pulse_pars, q1_pulse_pars, q2_pulse_pars, RO_pars,
return_seq=False, verbose=False):
seq_name = '3_qubit_OffOn_sequence'
seq = sequence.Sequence(seq_name)
station.pulsar.update_channel_settings()
el_list = []
# Create a dict with the parameters for all the pulses
q0_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q0_pulse_pars), ' q0')
q1_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q1_pulse_pars), ' q1')
q2_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q2_pulse_pars), ' q2')
RO_dict = {'RO': RO_pars}
pulse_dict = {}
pulse_dict.update(q0_pulses)
pulse_dict.update(q1_pulses)
pulse_dict.update(q2_pulses)
pulse_dict.update(RO_dict)
# N.B. Identities not needed in all cases
pulse_combinations = [['I q0', 'I q1', 'I q2', 'RO'],
['X180 q0', 'I q1', 'I q2', 'RO'],
['I q0', 'X180 q1', 'I q2', 'RO'],
['X180 q0', 'X180 q1', 'I q2', 'RO'],
['I q0', 'I q1', 'X180 q2', 'RO'],
['X180 q0', 'I q1', 'X180 q2', 'RO'],
['I q0', 'X180 q1', 'X180 q2', 'RO'],
['X180 q0', 'X180 q1', 'X180 q2', 'RO']]
for i, pulse_comb in enumerate(pulse_combinations):
pulses = []
for p in pulse_comb:
pulses += [pulse_dict[p]]
el = multi_pulse_elt(i, station, pulses)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
if return_seq:
return seq, el_list
else:
return seq_name
def four_qubit_off_on(q0_pulse_pars,
q1_pulse_pars,
q2_pulse_pars,
q3_pulse_pars,
RO_pars,
return_seq=False, verbose=False):
seq_name = '4_qubit_OffOn_sequence'
seq = sequence.Sequence(seq_name)
station.pulsar.update_channel_settings()
el_list = []
# Create a dict with the parameters for all the pulses
q0_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q0_pulse_pars), ' q0')
q1_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q1_pulse_pars), ' q1')
q2_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q2_pulse_pars), ' q2')
q3_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q3_pulse_pars), ' q3')
RO_dict = {'RO': RO_pars}
pulse_dict = {}
pulse_dict.update(q0_pulses)
pulse_dict.update(q1_pulses)
pulse_dict.update(q2_pulses)
pulse_dict.update(q3_pulses)
pulse_dict.update(RO_dict)
# N.B. Identities not needed in all cases
pulse_combinations = [['I q0', 'I q1', 'I q2', 'I q3', 'RO'],
['X180 q0', 'I q1', 'I q2', 'I q3', 'RO'],
['I q0', 'X180 q1', 'I q2', 'I q3', 'RO'],
['X180 q0', 'X180 q1', 'I q2', 'I q3', 'RO'],
['I q0', 'I q1', 'X180 q2', 'I q3', 'RO'],
['X180 q0', 'I q1', 'X180 q2', 'I q3', 'RO'],
['I q0', 'X180 q1', 'X180 q2', 'I q3', 'RO'],
['X180 q0', 'X180 q1', 'X180 q2', 'I q3', 'RO'],
['I q0', 'I q1', 'I q2', 'X180 q3', 'RO'],
['X180 q0', 'I q1', 'I q2', 'X180 q3', 'RO'],
['I q0', 'X180 q1', 'I q2', 'X180 q3', 'RO'],
['X180 q0', 'X180 q1', 'I q2', 'X180 q3', 'RO'],
['I q0', 'I q1', 'X180 q2', 'X180 q3', 'RO'],
['X180 q0', 'I q1', 'X180 q2', 'X180 q3', 'RO'],
['I q0', 'X180 q1', 'X180 q2', 'X180 q3', 'RO'],
['X180 q0', 'X180 q1', 'X180 q2', 'X180 q3', 'RO']]
for i, pulse_comb in enumerate(pulse_combinations):
pulses = []
for p in pulse_comb:
pulses += [pulse_dict[p]]
el = multi_pulse_elt(i, station, pulses)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
if return_seq:
return seq, el_list
else:
return seq_name
def five_qubit_off_on(q0_pulse_pars,
q1_pulse_pars,
q2_pulse_pars,
q3_pulse_pars,
q4_pulse_pars,
RO_pars,
return_seq=False, verbose=False):
seq_name = '5_qubit_OffOn_sequence'
seq = sequence.Sequence(seq_name)
station.pulsar.update_channel_settings()
el_list = []
# Create a dict with the parameters for all the pulses
q0_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q0_pulse_pars), ' q0')
q1_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q1_pulse_pars), ' q1')
q2_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q2_pulse_pars), ' q2')
q3_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q3_pulse_pars), ' q3')
q4_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q4_pulse_pars), ' q4')
RO_dict = {'RO': RO_pars}
pulse_dict = {}
pulse_dict.update(q0_pulses)
pulse_dict.update(q1_pulses)
pulse_dict.update(q2_pulses)
pulse_dict.update(q3_pulses)
pulse_dict.update(q4_pulses)
pulse_dict.update(RO_dict)
# N.B. Identities not needed in all cases
pulse_combinations = [['I q0', 'I q1', 'I q2', 'I q3', 'I q4', 'RO'],
['X180 q0', 'I q1', 'I q2', 'I q3', 'I q4', 'RO'],
['I q0', 'X180 q1', 'I q2', 'I q3', 'I q4', 'RO'],
['X180 q0', 'X180 q1', 'I q2', 'I q3', 'I q4', 'RO'],
['I q0', 'I q1', 'X180 q2', 'I q3', 'I q4', 'RO'],
['X180 q0', 'I q1', 'X180 q2', 'I q3', 'I q4', 'RO'],
['I q0', 'X180 q1', 'X180 q2',
'I q3', 'I q4', 'RO'],
['X180 q0', 'X180 q1', 'X180 q2',
'I q3', 'I q4', 'RO'],
['I q0', 'I q1', 'I q2', 'X180 q3', 'I q4', 'RO'],
['X180 q0', 'I q1', 'I q2', 'X180 q3', 'I q4', 'RO'],
['I q0', 'X180 q1', 'I q2', 'X180 q3', 'I q4', 'RO'],
['X180 q0', 'X180 q1', 'I q2',
'X180 q3', 'I q4', 'RO'],
['I q0', 'I q1', 'X180 q2', 'X180 q3', 'I q4', 'RO'],
['X180 q0', 'I q1', 'X180 q2',
'X180 q3', 'I q4', 'RO'],
['I q0', 'X180 q1', 'X180 q2',
'X180 q3', 'I q4', 'RO'],
['X180 q0', 'X180 q1', 'X180 q2',
'X180 q3', 'I q4', 'RO'],
['I q0', 'I q1', 'I q2', 'I q3', 'X180 q4', 'RO'],
['X180 q0', 'I q1', 'I q2', 'I q3', 'X180 q4', 'RO'],
['I q0', 'X180 q1', 'I q2', 'I q3', 'X180 q4', 'RO'],
['X180 q0', 'X180 q1', 'I q2',
'I q3', 'X180 q4', 'RO'],
['I q0', 'I q1', 'X180 q2', 'I q3', 'X180 q4', 'RO'],
['X180 q0', 'I q1', 'X180 q2',
'I q3', 'X180 q4', 'RO'],
['I q0', 'X180 q1', 'X180 q2',
'I q3', 'X180 q4', 'RO'],
['X180 q0', 'X180 q1', 'X180 q2',
'I q3', 'X180 q4', 'RO'],
['I q0', 'I q1', 'I q2', 'X180 q3', 'X180 q4', 'RO'],
['X180 q0', 'I q1', 'I q2',
'X180 q3', 'X180 q4', 'RO'],
['I q0', 'X180 q1', 'I q2',
'X180 q3', 'X180 q4', 'RO'],
['X180 q0', 'X180 q1', 'I q2',
'X180 q3', 'X180 q4', 'RO'],
['I q0', 'I q1', 'X180 q2',
'X180 q3', 'X180 q4', 'RO'],
['X180 q0', 'I q1', 'X180 q2',
'X180 q3', 'X180 q4', 'RO'],
['I q0', 'X180 q1', 'X180 q2',
'X180 q3', 'X180 q4', 'RO'],
['X180 q0', 'X180 q1', 'X180 q2', 'X180 q3', 'X180 q4', 'RO']]
for i, pulse_comb in enumerate(pulse_combinations):
pulses = []
for p in pulse_comb:
pulses += [pulse_dict[p]]
el = multi_pulse_elt(i, station, pulses)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
if return_seq:
return seq, el_list
else:
return seq_name
def two_qubit_AllXY(operation_dict, q0='q0', q1='q1', RO_target='all',
sequence_type='simultaneous',
replace_q1_pulses_X180=False,
double_points=True,
verbose=False, upload=True,
return_seq=False):
"""
Performs an AllXY sequence on two qubits.
Has the option of replacing pulses on q1 with pi pulses
Args:
operation_dict (dict) : dictionary containing all pulse parameters
q0, q1 (str) : target qubits for the sequence
RO_target (str) : target for the RO, can be a qubit name or 'all'
sequence_type (str) : sequential | interleaved | simultaneous | sandwiched
q0|q0|q1|q1 q0|q1|q0|q1 q01|q01 q1|q0|q0|q1
describes the order of the AllXY pulses
replace_q1_pulses_X180 (bool) : if True replaces all pulses on q1 with
X180 pulses.
double_points (bool) : if True measures each point in the AllXY twice
verbose (bool) : verbose sequence generation
upload (bool) :
"""
seq_name = 'two_qubit_AllXY_{}_{}'.format(q0, q1)
seq = sequence.Sequence(seq_name)
station.pulsar.update_channel_settings()
el_list = []
sequencer_config = operation_dict['sequencer_config']
AllXY_pulse_combinations = [['I ', 'I '], ['X180 ', 'X180 '], ['Y180 ', 'Y180 '],
['X180 ', 'Y180 '], ['Y180 ', 'X180 '],
['X90 ', 'I '], ['Y90 ', 'I '], [
'X90 ', 'Y90 '],
['Y90 ', 'X90 '], ['X90 ', 'Y180 '], [
'Y90 ', 'X180 '],
['X180 ', 'Y90 '], ['Y180 ', 'X90 '], [
'X90 ', 'X180 '],
['X180 ', 'X90 '], ['Y90 ', 'Y180 '], [
'Y180 ', 'Y90 '],
['X180 ', 'I '], ['Y180 ', 'I '], [
'X90 ', 'X90 '],
['Y90 ', 'Y90 ']]
if double_points:
AllXY_pulse_combinations = [val for val in AllXY_pulse_combinations
for _ in (0, 1)]
if sequence_type == 'simultaneous':
operation_dict = deepcopy(operation_dict) # prevents overwriting of dict
for key in operation_dict.keys():
if q1 in key:
operation_dict[key]['refpoint'] = 'start'
operation_dict[key]['pulse_delay'] = 0
pulse_list = []
if not replace_q1_pulses_X180:
for pulse_comb in AllXY_pulse_combinations:
if sequence_type == 'interleaved' or sequence_type == 'simultaneous':
pulse_list += [[pulse_comb[0] + q0] + [pulse_comb[0] + q1] +
[pulse_comb[1] + q0] + [pulse_comb[1] + q1] +
['RO ' + RO_target]]
elif sequence_type == 'sequential':
pulse_list += [[pulse_comb[0] + q0] + [pulse_comb[1] + q0] +
[pulse_comb[0] + q1] + [pulse_comb[1] + q1] +
['RO ' + RO_target]]
elif sequence_type == 'sandwiched':
pulse_list += [[pulse_comb[0] + q1] + [pulse_comb[0] + q0] +
[pulse_comb[1] + q0] + [pulse_comb[1] + q1] +
['RO ' + RO_target]]
else:
raise ValueError("sequence_type {} must be in".format(sequence_type) +
" ['interleaved', simultaneous', 'sequential', 'sandwiched']")
else:
for pulse_comb in AllXY_pulse_combinations:
if sequence_type == 'interleaved' or sequence_type == 'simultaneous':
pulse_list += [[pulse_comb[0] + q0] + ['X180 ' + q1] +
[pulse_comb[1] + q0] + ['X180 ' + q1] +
['RO ' + RO_target]]
elif sequence_type == 'sequential':
pulse_list += [[pulse_comb[0] + q0] + [pulse_comb[1] + q0] +
['X180 ' + q1] + ['X180 ' + q1] +
['RO ' + RO_target]]
elif sequence_type == 'sandwiched':
pulse_list += [['X180 ' + q1] + [pulse_comb[0] + q0] +
[pulse_comb[1] + q0] + ['X180 ' + q1] +
['RO ' + RO_target]]
else:
raise ValueError("sequence_type {} must be in".format(sequence_type) +
" ['interleaved', simultaneous', 'sequential', 'sandwiched']")
for i, pulse_comb in enumerate(pulse_list):
pulses = []
for p in pulse_comb:
pulses += [operation_dict[p]]
el = multi_pulse_elt(i, station, pulses, sequencer_config)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
if upload:
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
if return_seq:
return seq, el_list
else:
return seq_name
def two_qubit_tomo_cardinal(cardinal,
q0_pulse_pars,
q1_pulse_pars,
RO_pars,
timings_dict,
verbose=False,
upload=True,
return_seq=False):
seq_name = '2_qubit_Card_%d_seq' % cardinal
seq = sequence.Sequence(seq_name)
station.pulsar.update_channel_settings()
el_list = []
# Create a dict with the parameters for all the pulses
q0_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q0_pulse_pars), ' q0')
q1_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q1_pulse_pars), ' q1')
RO_dict = {'RO': RO_pars}
pulse_dict = {}
pulse_dict.update(q0_pulses)
pulse_dict.update(q1_pulses)
pulse_dict.update(RO_dict)
# Timings
# FIXME: This dictionary should not be required? -MAR
# NOTE: required in the CPhase tomo as input but not used
QQ_buffer = timings_dict['QQ_buffer']
wait_time = timings_dict['wait_time']
msmt_buffer = timings_dict['msmt_buffer']
tomo_list_q0 = ['I q0', 'X180 q0', 'Y90 q0',
'mY90 q0', 'X90 q0', 'mX90 q0']
tomo_list_q1 = ['I q1', 'X180 q1', 'Y90 q1',
'mY90 q1', 'X90 q1', 'mX90 q1']
# inner loop on q0
prep_idx_q0 = int(cardinal % 6)
prep_idx_q1 = int(((cardinal - prep_idx_q0)/6) % 6)
prep_pulse_q0 = pulse_dict[tomo_list_q0[prep_idx_q0]]
prep_pulse_q1 = pulse_dict[tomo_list_q1[prep_idx_q1]]
prep_pulse_q1['pulse_delay'] = QQ_buffer + (prep_pulse_q0['sigma'] *
prep_pulse_q0['nr_sigma'])
RO_pars['pulse_delay'] += msmt_buffer - (prep_pulse_q1['sigma'] *
prep_pulse_q1['nr_sigma'])
# Calibration points
cal_points = [['I q0', 'I q1', 'RO'],
['I q0', 'I q1', 'RO'],
['I q0', 'I q1', 'RO'],
['I q0', 'I q1', 'RO'],
['I q0', 'I q1', 'RO'],
['I q0', 'I q1', 'RO'],
['I q0', 'I q1', 'RO'],
['X180 q0', 'I q1', 'RO'],
['X180 q0', 'I q1', 'RO'],
['X180 q0', 'I q1', 'RO'],
['X180 q0', 'I q1', 'RO'],
['X180 q0', 'I q1', 'RO'],
['X180 q0', 'I q1', 'RO'],
['X180 q0', 'I q1', 'RO'],
['I q0', 'X180 q1', 'RO'],
['I q0', 'X180 q1', 'RO'],
['I q0', 'X180 q1', 'RO'],
['I q0', 'X180 q1', 'RO'],
['I q0', 'X180 q1', 'RO'],
['I q0', 'X180 q1', 'RO'],
['I q0', 'X180 q1', 'RO'],
['X180 q0', 'X180 q1', 'RO'],
['X180 q0', 'X180 q1', 'RO'],
['X180 q0', 'X180 q1', 'RO'],
['X180 q0', 'X180 q1', 'RO'],
['X180 q0', 'X180 q1', 'RO'],
['X180 q0', 'X180 q1', 'RO'],
['X180 q0', 'X180 q1', 'RO']]
for i in range(36):
tomo_idx_q0 = int(i % 6)
tomo_idx_q1 = int(((i - tomo_idx_q0)/6) % 6)
# print(i,tomo_idx_q0,tomo_idx_q1)
tomo_pulse_q0 = pulse_dict[tomo_list_q0[tomo_idx_q0]]
tomo_pulse_q1 = pulse_dict[tomo_list_q1[tomo_idx_q1]]
tomo_pulse_q0['pulse_delay'] = wait_time + (prep_pulse_q1['sigma'] *
prep_pulse_q1['nr_sigma'])
tomo_pulse_q1['pulse_delay'] = QQ_buffer + (tomo_pulse_q0['sigma'] *
tomo_pulse_q0['nr_sigma'])
pulse_list = [prep_pulse_q0,
prep_pulse_q1,
tomo_pulse_q0,
tomo_pulse_q1,
RO_pars]
el = multi_pulse_elt(i, station, pulse_list)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
for i, pulse_comb in enumerate(cal_points):
pulses = []
for p in pulse_comb:
pulses += [pulse_dict[p]]
el = multi_pulse_elt(35+i, station, pulses)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
if return_seq:
return seq, el_list
else:
return seq_name
def two_qubit_tomo_bell(bell_state,
operation_dict,
qS,
qCZ,
RO_target,
distortion_dict,
CZ_disabled=False,
cal_points_with_flux_pulses=True,
verbose=False,
upload=True):
'''
qS is swap qubit
qCZ is cphase qubit
'''
sequencer_config = operation_dict['sequencer_config']
seq_name = '2_qubit_Bell_Tomo_%d_seq' % bell_state
seq = sequence.Sequence(seq_name)
station.pulsar.update_channel_settings()
el_list = []
tomo_list_qS = []
tomo_list_qCZ = []
# Tomo pulses span a basis covering all the cardinal points
tomo_pulses = ['I ', 'X180 ', 'Y90 ', 'mY90 ', 'X90 ', 'mX90 ']
for tp in tomo_pulses:
tomo_list_qS += [tp+qS]
tomo_list_qCZ += [tp+qCZ]
###########################
# Defining sub sequences #
###########################
# This forms the base sequence, note that gate1, gate2 and after_pulse will
# be replaced to prepare the desired state and tomo1 and tomo2 will be
# replaced with tomography pulses
base_sequence = (
['gate1 ' + qS, 'gate2 ' + qCZ,
'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,
'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,
'after_pulse ' + qCZ, 'tomo1 '+qCZ, 'tomo2 '+qS, 'RO '+RO_target])
# Calibration points
# every calibration point is repeated 7 times to have 64 elts in totalb
cal_points = [['I '+qCZ, 'I '+qS, 'RO '+RO_target]]*7 +\
[['I '+qCZ, 'X180 '+qS, 'RO '+RO_target]]*7 +\
[['X180 '+qCZ, 'I '+qS, 'RO '+RO_target]]*7 +\
[['X180 '+qCZ, 'X180 '+qS, 'RO '+RO_target]]*7
if CZ_disabled:
operation_dict['CZ '+qCZ]['amplitude'] = 0
operation_dict['CZ '+qCZ]['phase_corr_pulse_amp'] = 0
################################################
# Creating additional pulses for this sequence #
################################################
# the recovery SWAP is identical to the regular SWAP operation, unless
# an rSWAP is explicitly contained in the operation dict
if ('rSWAP ' + qS) not in operation_dict.keys():
operation_dict['rSWAP ' + qS] = deepcopy(operation_dict['SWAP ' + qS])
operation_dict['CZ_corr ' + qCZ]['refpoint'] = 'simultaneous'
################
# Bell states #
################
if bell_state == 0: # |Phi_m>=|00>-|11>
gate1 = 'Y90 ' + qS
gate2 = 'Y90 ' + qCZ
after_pulse = 'mY90 ' + qCZ
elif bell_state == 1: # |Phi_p>=|00>+|11>
gate1 = 'mY90 ' + qS
gate2 = 'Y90 ' + qCZ
after_pulse = 'mY90 ' + qCZ
elif bell_state == 2: # |Psi_m>=|01> - |10>
gate1 = 'Y90 ' + qS
gate2 = 'mY90 ' + qCZ
after_pulse = 'mY90 ' + qCZ
elif bell_state == 3: # |Psi_p>=|01> + |10>
gate1 = 'mY90 ' + qS
gate2 = 'mY90 ' + qCZ
after_pulse = 'mY90 ' + qCZ
# Below are states with the initial pulse on the CP-qubit disabled
# these are not Bell states but are used for debugging
elif bell_state == 0+10: # |00>+|11>
gate1 = 'Y90 ' + qS
gate2 = 'I ' + qCZ
after_pulse = 'mY90 ' + qCZ
elif bell_state == 1+10:
gate1 = 'mY90 ' + qS
gate2 = 'I ' + qCZ
after_pulse = 'mY90 ' + qCZ
elif bell_state == 2+10: # |01> - |10>
gate1 = 'Y90 ' + qS
gate2 = 'I ' + qCZ
after_pulse = 'mY90 ' + qCZ
elif bell_state == 3+10:
gate1 = 'mY90 ' + qS
gate2 = 'I ' + qCZ
after_pulse = 'mY90 ' + qCZ
# Below are states with the initial pulse on the SWAP-qubit disabled
# these are not Bell states but are used for debugging
elif bell_state == 0 + 20: # |00>+|11>
gate1 = 'I ' + qS
gate2 = 'Y90 ' + qCZ
after_pulse = 'mY90 ' + qCZ
elif bell_state == 1 + 20: # |01> - |10>
gate1 = 'I ' + qS
gate2 = 'Y90 ' + qCZ
after_pulse = 'mY90 ' + qCZ
elif bell_state == 2 + 20:
gate1 = 'I ' + qS
gate2 = 'mY90 ' + qCZ
after_pulse = 'mY90 ' + qCZ
elif bell_state == 3 + 20:
gate1 = 'mY90 ' + qS
gate2 = 'mY90 ' + qCZ
after_pulse = 'mY90 ' + qCZ
print('Compensation qCP {:.3f}'.format(
operation_dict['CZ_corr ' + qCZ]['amplitude']))
print('Compensation qS {:.3f}'.format(
operation_dict['SWAP_corr ' + qS]['amplitude']))
########################################################
# Here the actual pulses of all elements get defined #
########################################################
# We start by replacing the state prepartion pulses
base_sequence[0] = gate1
base_sequence[1] = gate2
base_sequence[7] = after_pulse
seq_pulse_list = []
for i in range(36):
tomo_idx_qS = int(i % 6)
tomo_idx_qCZ = int(((i - tomo_idx_qS)/6) % 6)
base_sequence[8] = tomo_list_qCZ[tomo_idx_qCZ]
base_sequence[9] = tomo_list_qS[tomo_idx_qS]
seq_pulse_list += [deepcopy(base_sequence)]
print(len(cal_points))
for cal_pulses in cal_points:
if cal_points_with_flux_pulses:
base_sequence[0] = 'I ' + qS
base_sequence[1] = 'I ' + qCZ
base_sequence[7] = 'I ' + qCZ
base_sequence[-3:] = cal_pulses
seq_pulse_list += [deepcopy(base_sequence)]
else:
seq_pulse_list += [cal_pulses]
for i, pulse_list in enumerate(seq_pulse_list):
pulses = []
for p in pulse_list:
pulses += [operation_dict[p]]
el = multi_pulse_elt(i, station, pulses, sequencer_config)
if distortion_dict is not None:
print('\rDistorting element {}/{} '.format(i+1,
len(seq_pulse_list)),
end='')
el = distort_and_compensate(
el, distortion_dict)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
return seq, el_list
def cphase_fringes(phases, q0_pulse_pars, q1_pulse_pars, RO_pars,
swap_pars_q0, cphase_pars_q1, timings_dict,
distortion_dict, verbose=False, upload=True, return_seq=False):
'''
'''
preloaded_kernels_vec = preload_kernels_func(distortion_dict)
original_delay = deepcopy(RO_pars)[0]['pulse_delay']
seq_name = 'CPhase'
seq = sequence.Sequence(seq_name)
station.pulsar.update_channel_settings()
el_list = []
# print(q0_pulse_pars)
q0_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q0_pulse_pars[0]), ' q0')
q1_pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(q1_pulse_pars[0]), ' q1')
pulse_dict = {}
pulse_dict.update(q0_pulses)
pulse_dict.update(q1_pulses)
pulse_dict.update({'RO': RO_pars[0]})
# print({'RO': RO_pars})
# Timings
buffer_mw_flux = timings_dict[0]['buffer_mw_flux']
buffer_flux_mw = timings_dict[0]['buffer_flux_mw']
msmt_buffer = timings_dict[0]['msmt_buffer']
dead_time = timings_dict[0]['dead_time']
# print(buffer_mw_flux,buffer_flux_mw,msmt_buffer,dead_time)
# defining main pulses
exc_pulse = deepcopy(pulse_dict['X180 q0'])
exc_pulse['pulse_delay'] += 0.01e-6
swap_pulse_1 = deepcopy(swap_pars_q0[0])
# print(swap_pulse_1)
swap_pulse_1['pulse_delay'] = buffer_mw_flux + \
exc_pulse['sigma']*exc_pulse['nr_sigma']
ramsey_1 = deepcopy(pulse_dict['Y90 q1'])
ramsey_1['pulse_delay'] = buffer_flux_mw + swap_pulse_1['length']
cphase_pulse = cphase_pars_q1[0]
cphase_amp = cphase_pulse['amplitude']
cphase_pulse['pulse_delay'] = buffer_mw_flux + \
ramsey_1['sigma']*ramsey_1['nr_sigma']
ramsey_2 = deepcopy(pulse_dict['X90 q1'])
ramsey_2['pulse_delay'] = buffer_flux_mw + cphase_pulse['length']
swap_pulse_2 = deepcopy(swap_pars_q0[0])
swap_pulse_2['pulse_delay'] = buffer_mw_flux + \
ramsey_2['sigma']*ramsey_2['nr_sigma']
RO_pars[0]['pulse_delay'] = msmt_buffer + swap_pulse_2['length']
# defining compensation pulses
swap_comp_1 = deepcopy(swap_pulse_1)
swap_pulse_1['pulse_delay'] = RO_pars[0]['length'] + dead_time
cphase_comp = deepcopy(cphase_pulse)
swap_comp_2 = deepcopy(swap_pulse_2)
dead_time_pulse = {'pulse_type': 'SquarePulse',
'pulse_delay': RO_pars[0]['pulse_delay'],
'channel': swap_pars_q0[0]['channel'],
'amplitude': 0,
'length': dead_time}
for i, ph2 in enumerate(phases[0]):
# print(ph2)
ramsey_2['phase'] = ph2
cphase_pulse['amplitude'] = cphase_amp
pulse_list = [exc_pulse,
swap_pulse_1,
ramsey_1,
cphase_pulse,
ramsey_2,
swap_pulse_2,
RO_pars[0],
swap_comp_1,
cphase_comp,
swap_comp_2,
dead_time_pulse]
el = multi_pulse_elt(2*i, station, pulse_list)
el_list.append(el)
cphase_pulse['amplitude'] = 0.
pulse_list = [exc_pulse,
swap_pulse_1,
ramsey_1,
cphase_pulse,
ramsey_2,
swap_pulse_2,
RO_pars[0],
swap_comp_1,
cphase_comp,
swap_comp_2,
dead_time_pulse]
el = multi_pulse_elt(2*i+1, station, pulse_list)
el_list.append(el)
# Compensations
for i, el in enumerate(el_list):
if distortion_dict is not None:
el = distort_and_compensate(
el, distortion_dict, preloaded_kernels_vec)
el_list[i] = el
seq.append_element(el, trigger_wait=True)
cal_points = 4
RO_pars[0]['pulse_delay'] = original_delay
# Calibration points
cal_points = [['I q0', 'I q1', 'RO'],
['I q0', 'I q1', 'RO'],
['X180 q0', 'I q1', 'RO'],
['X180 q0', 'I q1', 'RO'],
['I q0', 'X180 q1', 'RO'],
['I q0', 'X180 q1', 'RO'],
['X180 q0', 'X180 q1', 'RO'],
['X180 q0', 'X180 q1', 'RO']]
for i, pulse_comb in enumerate(cal_points):
pulses = []
for p in pulse_comb:
pulses += [pulse_dict[p]]
pulses[0]['pulse_delay'] += 0.01e-6
el = multi_pulse_elt(2*len(phases)+i, station, pulses)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
# upload
if upload:
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
if return_seq:
return seq, el_list
else:
return seq
def preload_kernels_func(distortion_dict):
output_dict = {ch: [] for ch in distortion_dict['ch_list']}
for ch in distortion_dict['ch_list']:
for kernel in distortion_dict[ch]:
if kernel is not '':
if kernel in cached_kernels.keys():
print('Cached {}'.format(kernel_dir+kernel))
output_dict[ch].append(cached_kernels[kernel])
else:
print('Loading {}'.format(kernel_dir+kernel))
# print(os.path.isfile('kernels/'+kernel))
kernel_vec = np.loadtxt(kernel_dir+kernel)
output_dict[ch].append(kernel_vec)
cached_kernels.update({kernel: kernel_vec})
return output_dict
def two_qubit_tomo_cphase_cardinal(cardinal_state,
operation_dict,
qS,
qCZ,
RO_target,
distortion_dict,
CZ_disabled=False,
cal_points_with_flux_pulses=True,
verbose=False,
upload=True):
'''
qS is swap qubit
qCZ is cphase qubit
'''
sequencer_config = operation_dict['sequencer_config']
seq_name = '2_qubit_CPhase_Cardinal_Tomo_%d_seq' % cardinal_state
seq = sequence.Sequence(seq_name)
station.pulsar.update_channel_settings()
el_list = []
tomo_list_qS = []
tomo_list_qCZ = []
# Tomo pulses span a basis covering all the cardinal points
tomo_pulses = ['I ', 'X180 ', 'Y90 ', 'mY90 ', 'X90 ', 'mX90 ']
for tp in tomo_pulses:
tomo_list_qS += [tp+qS]
tomo_list_qCZ += [tp+qCZ]
###########################
# Defining sub sequences #
###########################
# This forms the base sequence, note that gate1, gate2 and after_pulse will
# be replaced to prepare the desired state and tomo1 and tomo2 will be
# replaced with tomography pulses
base_sequence = (
['gate1 ' + qS, 'gate2 ' + qCZ,
'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,
'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,
'after_pulse ' + qCZ, 'tomo1 '+qCZ, 'tomo2 '+qS, 'RO '+RO_target])
# Calibration points
# every calibration point is repeated 7 times to have 64 elts in total
cal_points = [['I '+qCZ, 'I '+qS, 'RO '+RO_target]]*7 +\
[['I '+qCZ, 'X180 '+qS, 'RO '+RO_target]]*7 +\
[['X180 '+qCZ, 'I '+qS, 'RO '+RO_target]]*7 +\
[['X180 '+qCZ, 'X180 '+qS, 'RO '+RO_target]]*7
if CZ_disabled:
operation_dict['CZ '+qCZ]['amplitude'] = 0
operation_dict['CZ '+qCZ]['phase_corr_pulse_amp'] = 0
################################################
# Creating additional pulses for this sequence #
################################################
# the recovery SWAP is identical to the regular SWAP operation, unless
# an rSWAP is explicitly contained in the operation dict
if ('rSWAP ' + qS) not in operation_dict.keys():
operation_dict['rSWAP ' + qS] = deepcopy(operation_dict['SWAP ' + qS])
operation_dict['CZ_corr ' + qCZ]['refpoint'] = 'simultaneous'
################
# cardinal states #
################
# here select the qubit gates (depending on cardinal_state)
prep_idx_qS = int(cardinal_state % 6)
prep_idx_qCZ = int(((cardinal_state - prep_idx_qS)/6) % 6)
print('Compensation qCP {:.3f}'.format(
operation_dict['CZ_corr ' + qCZ]['amplitude']))
print('Compensation qS {:.3f}'.format(
operation_dict['SWAP_corr ' + qS]['amplitude']))
########################################################
# Here the actual pulses of all elements get defined #
########################################################
# We start by replacing the state prepartion pulses
base_sequence[0] = tomo_list_qS[prep_idx_qS]
base_sequence[1] = tomo_list_qCZ[prep_idx_qCZ]
base_sequence[7] = 'I ' + qCZ
seq_pulse_list = []
for i in range(36):
tomo_idx_qS = int(i % 6)
tomo_idx_qCZ = int(((i - tomo_idx_qS)/6) % 6)
base_sequence[8] = tomo_list_qCZ[tomo_idx_qCZ]
base_sequence[9] = tomo_list_qS[tomo_idx_qS]
seq_pulse_list += [deepcopy(base_sequence)]
print(len(cal_points))
for cal_pulses in cal_points:
if cal_points_with_flux_pulses:
base_sequence[0] = 'I ' + qS
base_sequence[1] = 'I ' + qCZ
base_sequence[7] = 'I ' + qCZ
base_sequence[-3:] = cal_pulses
seq_pulse_list += [deepcopy(base_sequence)]
else:
seq_pulse_list += [cal_pulses]
for i, pulse_list in enumerate(seq_pulse_list):
pulses = []
for p in pulse_list:
pulses += [operation_dict[p]]
el = multi_pulse_elt(i, station, pulses, sequencer_config)
if distortion_dict is not None:
print('\rDistorting element {}/{} '.format(i+1,
len(seq_pulse_list)),
end='')
el = distort_and_compensate(
el, distortion_dict)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
return seq, el_list
def n_qubit_off_on(pulse_pars_list, RO_pars, return_seq=False, verbose=False,
parallel_pulses=False, preselection=False,
RO_spacing=200e-9):
n = len(pulse_pars_list)
seq_name = '{}_qubit_OffOn_sequence'.format(n)
seq = sequence.Sequence(seq_name)
el_list = []
# Create a dict with the parameters for all the pulses
pulse_dict = {'RO': RO_pars}
for i, pulse_pars in enumerate(pulse_pars_list):
pars = pulse_pars.copy()
if i != 0 and parallel_pulses:
pars['refpoint'] = 'start'
pulses = add_suffix_to_dict_keys(
get_pulse_dict_from_pars(pars), ' {}'.format(i))
pulse_dict.update(pulses)
spacerpulse = {'pulse_type': 'SquarePulse',
'channel': RO_pars['acq_marker_channel'],
'amplitude': 0.0,
'length': RO_spacing,
'pulse_delay': 0}
pulse_dict.update({'spacer': spacerpulse})
# Create a list of required pulses
pulse_combinations = []
for pulse_list in itertools.product(*(n*[['I', 'X180']])):
pulse_comb = (n+1)*['']
for i, pulse in enumerate(pulse_list):
pulse_comb[i] = pulse + ' {}'.format(i)
pulse_comb[-1] = 'RO'
if preselection:
pulse_comb = ['RO', 'spacer'] + pulse_comb
pulse_combinations.append(pulse_comb)
for i, pulse_comb in enumerate(pulse_combinations):
pulses = []
for j, p in enumerate(pulse_comb):
pulses += [pulse_dict[p]]
el = multi_pulse_elt(i, station, pulses)
el_list.append(el)
seq.append_element(el, trigger_wait=True)
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
if return_seq:
return seq, el_list
else:
return seq_name
|
|
# Copyright 2013 - Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import sys
from oslo_db import exception as db_exc
from oslo_log import log as logging
import pecan
from pecan import rest
from wsme.rest import json as wsme_json
from wsme import types as wsme_types
import wsmeext.pecan as wsme_pecan
from solum.api.controllers.v1.datamodel import plan
from solum.api.handlers import plan_handler
from solum.common import exception
from solum.common import policy
from solum.common import yamlutils
from solum import objects
LOG = logging.getLogger(__name__)
def init_plan_v1(yml_input_plan):
if not yml_input_plan.get('name'):
raise exception.BadRequest(reason="Name field is missing.")
try:
pp = plan.Plan(**yml_input_plan)
except ValueError as ve:
raise exception.BadRequest(reason=str(ve))
try:
name_regex = re.compile(r'^([a-zA-Z0-9-_]{1,100})$')
assert name_regex.match(pp.name), 'Plan name is invalid.'
except AssertionError as ae:
raise exception.BadRequest(reason=str(ae))
return pp
def init_plan_by_version(input_plan):
version = input_plan.get('version')
if version is None:
raise exception.BadRequest(
reason='Version attribute is missing in Plan')
mod = sys.modules[__name__]
if not hasattr(mod, 'init_plan_v%s' % version):
raise exception.BadRequest(reason='Plan version %s is invalid.'
% version)
return getattr(mod, 'init_plan_v%s' % version)(input_plan)
def init_yml_plan_by_version():
try:
yml_input_plan = yamlutils.load(pecan.request.body)
except ValueError as excp:
LOG.error("Invalid plan.")
raise exception.BadRequest(reason='Plan is invalid. '
+ str(excp))
return init_plan_by_version(yml_input_plan)
def init_json_plan_by_version():
try:
json_input_plan = json.loads(pecan.request.body)
except ValueError as excp:
raise exception.BadRequest(reason='Plan is invalid. '
+ str(excp))
return init_plan_by_version(json_input_plan)
def yaml_content(m):
ref_content = m.refined_content()
host_url = pecan.request.application_url.rstrip('/')
ref_content['uri'] = '%s/v1/plans/%s' % (host_url, m.uuid)
ref_content['trigger_uri'] = ('%s/v1/triggers/%s' %
(host_url, m.trigger_id))
return ref_content
class PlanController(rest.RestController):
"""Manages operations on a single plan."""
def __init__(self, plan_id):
super(PlanController, self).__init__()
self._id = plan_id
@exception.wrap_pecan_controller_exception
@pecan.expose()
def get(self):
"""Return this plan."""
policy.check('show_plan',
pecan.request.security_context)
handler = plan_handler.PlanHandler(pecan.request.security_context)
host_url = pecan.request.application_url.rstrip('/')
if pecan.request.accept is not None and 'yaml' in pecan.request.accept:
plan_serialized = yamlutils.dump(
yaml_content(handler.get(self._id)))
else:
plan_model = plan.Plan.from_db_model(
handler.get(self._id), host_url)
plan_serialized = wsme_json.encode_result(plan_model, plan.Plan)
pecan.response.status = 200
return plan_serialized
@exception.wrap_pecan_controller_exception
@pecan.expose()
def put(self):
"""Modify this plan."""
policy.check('update_plan',
pecan.request.security_context)
# make sure the plan exists before parsing the request
handler = plan_handler.PlanHandler(pecan.request.security_context)
handler.get(self._id)
host_url = pecan.request.application_url.rstrip('/')
if not pecan.request.body or len(pecan.request.body) < 1:
raise exception.BadRequest(reason="No data.")
if (pecan.request.content_type is not None and
'yaml' in pecan.request.content_type):
data = init_yml_plan_by_version()
updated_plan_yml = yamlutils.dump(yaml_content(handler.update(
self._id, data.as_dict(objects.registry.Plan))))
else:
data = init_json_plan_by_version()
plan_obj = handler.update(self._id,
data.as_dict(objects.registry.Plan))
updated_plan_yml = wsme_json.encode_result(plan.Plan.from_db_model(
plan_obj, host_url), plan.Plan)
pecan.response.status = 200
return updated_plan_yml
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(status_code=202)
def delete(self):
"""Delete this plan."""
policy.check('delete_plan',
pecan.request.security_context)
p_handler = plan_handler.PlanHandler(pecan.request.security_context)
try:
p_handler.delete(self._id)
except (db_exc.DBError):
raise exception.PlanStillReferenced(name=self._id)
class PlansController(rest.RestController):
"""Manages operations on the plans collection."""
@pecan.expose()
def _lookup(self, plan_id, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
return PlanController(plan_id), remainder
@exception.wrap_pecan_controller_exception
@pecan.expose()
def post(self):
"""Create a new plan."""
policy.check('create_plan',
pecan.request.security_context)
if not pecan.request.body or len(pecan.request.body) < 1:
raise exception.BadRequest(reason="No data.")
handler = plan_handler.PlanHandler(pecan.request.security_context)
host_url = pecan.request.application_url.rstrip('/')
if (pecan.request.content_type is not None and
'yaml' in pecan.request.content_type):
data = init_yml_plan_by_version()
created_plan = yamlutils.dump(yaml_content(handler.create(
data.as_dict(objects.registry.Plan))))
else:
data = init_json_plan_by_version()
plan_wsme = plan.Plan.from_db_model(handler.create(
data.as_dict(objects.registry.Plan)), host_url)
created_plan = wsme_json.encode_result(plan_wsme, plan.Plan)
pecan.response.status = 201
return created_plan
@exception.wrap_pecan_controller_exception
@pecan.expose()
def get_all(self):
"""Return all plans, based on the query provided."""
policy.check('get_plans',
pecan.request.security_context)
handler = plan_handler.PlanHandler(pecan.request.security_context)
host_url = pecan.request.application_url.rstrip('/')
if pecan.request.accept is not None and 'yaml' in pecan.request.accept:
plan_serialized = yamlutils.dump([yaml_content(obj)
for obj in handler.get_all()
if obj and obj.raw_content])
else:
plan_serialized = wsme_json.encode_result(
[plan.Plan.from_db_model(obj, host_url)
for obj in handler.get_all()],
wsme_types.ArrayType(plan.Plan))
pecan.response.status = 200
return plan_serialized
|
|
from __future__ import division, absolute_import, print_function
import re
import os
import sys
import warnings
import platform
import tempfile
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import msvc_runtime_library
from numpy.distutils.compat import get_exception
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
_EXTRAFLAGS = []
else:
_EXTRAFLAGS = []
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77',)
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
version_string = version_string[version_string.find('\n')+1:]
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
# gfortrans may still return long version strings (``-dumpversion`` was
# an alias for ``--version``)
if len(version_string) <= 20:
# Try to find a valid version string
m = re.search(r'([0-9.]+)', version_string)
if m:
# g77 provides a longer version string that starts with GNU
# Fortran
if version_string.startswith('GNU Fortran'):
return ('g77', m.group(1))
# gfortran only outputs a version string such as #.#.#, so check
# if the match is at the start of the string
elif m.start() == 0:
return ('gfortran', m.group(1))
else:
# Output probably from --version, try harder:
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
# the '0' is for early g77's
return ('g77', v)
else:
# at some point in the 4.x series, the ' 95' was dropped
# from the version string
return ('gfortran', v)
# If still nothing, raise an error to make the problem easy to find.
err = 'A valid Fortran version was not found in this string:\n'
raise ValueError(err + version_string)
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'g77':
return None
return v[1]
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "-dumpversion"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
}
module_dir_switch = None
module_include_switch = None
# Cygwin: f771: warning: -fPIC ignored for target (all code is
# position independent)
if os.name != 'nt' and sys.platform != 'cygwin':
pic_flags = ['-fPIC']
# use -mno-cygwin for g77 when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
executables[key].append('-mno-cygwin')
g2c = 'g2c'
suggested_f90_compiler = 'gnu95'
def get_flags_linker_so(self):
opt = self.linker_so[1:]
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
# Makefile used to build Python. We let disutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
# we try to get it first from the Python Makefile and then we
# fall back to setting it to 10.3 to maximize the set of
# versions we can work with. This is a reasonable default
# even when using the official Python dist and those derived
# from it.
import distutils.sysconfig as sc
g = {}
try:
get_makefile_filename = sc.get_makefile_filename
except AttributeError:
pass # i.e. PyPy
else:
filename = get_makefile_filename()
sc.parse_makefile(filename, g)
target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
if target == '10.3':
s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3'
warnings.warn(s)
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
# ignore the problem, use the -mimpure-text flag. It isn't
# the safest thing, but seems to work. 'man gcc' says:
# ".. Instead of using -mimpure-text, you should compile all
# source code with -fpic or -fPIC."
opt.append('-mimpure-text')
return opt
def get_libgcc_dir(self):
status, output = exec_command(self.compiler_f77 +
['-print-libgcc-file-name'],
use_tee=0)
if not status:
return os.path.dirname(output)
return None
def get_library_dirs(self):
opt = []
if sys.platform[:5] != 'linux':
d = self.get_libgcc_dir()
if d:
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
root = os.path.join(d, *((os.pardir,)*4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
opt.append(d2)
opt.append(d)
return opt
def get_libraries(self):
opt = []
d = self.get_libgcc_dir()
if d is not None:
g2c = self.g2c + '-pic'
f = self.static_lib_format % (g2c, self.static_lib_extension)
if not os.path.isfile(os.path.join(d, f)):
g2c = self.g2c
else:
g2c = self.g2c
if g2c is not None:
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
c_compiler.compiler_type == 'msvc':
# the following code is not needed (read: breaks) when using MinGW
# in case want to link F77 compiled code with MSVC
opt.append('gcc')
runtime_lib = msvc_runtime_library()
if runtime_lib:
opt.append(runtime_lib)
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
v = self.get_version()
if v and v <= '3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
else:
opt = ['-O3']
opt.append('-funroll-loops')
return opt
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
from distutils import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
return []
arch_re = re.compile(r"-arch\s+(\w+)")
arch_flags = []
for arch in arch_re.findall(cflags):
arch_flags += ['-arch', arch]
return arch_flags
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
return '-Wl,-rpath="%s"' % dir
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
compiler_aliases = ('gfortran',)
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'gfortran':
return None
v = v[1]
if v >= '4.':
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe']:
self.executables[key].append('-mno-cygwin')
return v
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_f90' : [None, "-Wall", "-g",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
}
module_dir_switch = '-J'
module_include_switch = '-I'
g2c = 'gfortran'
def _universal_flags(self, cmd):
"""Return a list of -arch flags for every supported architecture."""
if not sys.platform == 'darwin':
return []
arch_flags = []
# get arches the C compiler gets.
c_archs = self._c_arch_flags()
if "i386" in c_archs:
c_archs[c_archs.index("i386")] = "i686"
# check the arches the Fortran compiler supports, and compare with
# arch flags from C compiler
for arch in ["ppc", "i686", "x86_64", "ppc64"]:
if _can_target(cmd, arch) and arch in c_archs:
arch_flags.extend(["-arch", arch])
return arch_flags
def get_flags(self):
flags = GnuFCompiler.get_flags(self)
arch_flags = self._universal_flags(self.compiler_f90)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_flags_linker_so(self):
flags = GnuFCompiler.get_flags_linker_so(self)
arch_flags = self._universal_flags(self.linker_so)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_library_dirs(self):
opt = GnuFCompiler.get_library_dirs(self)
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, *((os.pardir,)*4))
path = os.path.join(root, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
return opt
def get_libraries(self):
opt = GnuFCompiler.get_libraries(self)
if sys.platform == 'darwin':
opt.remove('cc_dynamic')
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
opt.insert(i+1, "mingwex")
opt.insert(i+1, "mingw32")
# XXX: fix this mess, does not work for mingw
if is_win64():
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
return []
else:
pass
return opt
def get_target(self):
status, output = exec_command(self.compiler_f77 +
['-v'],
use_tee=0)
if not status:
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
def get_flags_opt(self):
if is_win64():
return ['-O0']
else:
return GnuFCompiler.get_flags_opt(self)
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
try:
d = os.path.dirname(filename)
output = os.path.splitext(filename)[0] + ".o"
try:
newcmd.extend(["-arch", arch, "-c", filename])
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
p.communicate()
return p.returncode == 0
finally:
if os.path.exists(output):
os.remove(output)
finally:
os.remove(filename)
return False
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
compiler = GnuFCompiler()
compiler.customize()
print(compiler.get_version())
try:
compiler = Gnu95FCompiler()
compiler.customize()
print(compiler.get_version())
except Exception:
msg = get_exception()
print(msg)
|
|
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
import copy
import pytest
import requests
from generic_test_code.common import (
generic_correct_upstream_dest_test,
generic_correct_upstream_request_test,
generic_upstream_headers_verify_test,
generic_no_slash_redirect_test,
)
class TestExhibitorEndpoint:
def test_redirect_req_without_slash(self, master_ar_process):
generic_no_slash_redirect_test(master_ar_process, '/exhibitor')
def test_if_exhibitor_endpoint_handles_redirects_properly(
self, master_ar_process, mocker, valid_user_header):
location_sent = 'http://127.0.0.1/exhibitor/v1/ui/index.html'
location_expected = 'http://127.0.0.1/exhibitor/exhibitor/v1/ui/index.html'
mocker.send_command(endpoint_id='http://127.0.0.1:8181',
func_name='always_redirect',
aux_data=location_sent)
url = master_ar_process.make_url_from_path("/exhibitor/v1/ui/index.html")
r = requests.get(url, allow_redirects=False, headers=valid_user_header)
assert r.status_code == 307
assert r.headers['Location'] == location_expected
def test_if_request_is_sent_to_correct_upstream(self,
master_ar_process,
valid_user_header):
generic_correct_upstream_dest_test(master_ar_process,
valid_user_header,
'/exhibitor/some/path',
'http://127.0.0.1:8181',
)
def test_if_upstream_request_is_correct(self,
master_ar_process,
valid_user_header):
generic_correct_upstream_request_test(master_ar_process,
valid_user_header,
'/exhibitor/some/path',
'/some/path',
)
def test_if_upstream_headers_are_correct(self,
master_ar_process,
valid_user_header):
generic_upstream_headers_verify_test(master_ar_process,
valid_user_header,
'/exhibitor/some/path',
)
agent_prefix = '/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S1'
class TestAgentEndpoint:
# FIXME: Figure out how we can test disable-request-response-buffering.conf
def test_if_request_is_sent_to_correct_upstream(self,
master_ar_process,
valid_user_header):
generic_correct_upstream_dest_test(master_ar_process,
valid_user_header,
agent_prefix + "/foo/bar",
'http://127.0.0.2:15001',
)
@pytest.mark.parametrize("path_given,path_expected",
[("/foo/bar", "/foo/bar"),
("", "/"),
("/", "/"),
])
def test_if_upstream_request_is_correct(self,
master_ar_process,
valid_user_header,
path_given,
path_expected):
prefixed_pg = agent_prefix + path_given
generic_correct_upstream_request_test(master_ar_process,
valid_user_header,
prefixed_pg,
path_expected,
http_ver="HTTP/1.1",
)
def test_if_upstream_headers_are_correct(self,
master_ar_process,
valid_user_header):
path = '/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S1/logs/v1/foo/bar'
generic_upstream_headers_verify_test(master_ar_process,
valid_user_header,
path,
)
class TestMetricsEndpoint:
def test_redirect_req_without_slash(self, master_ar_process):
generic_no_slash_redirect_test(master_ar_process, '/system/v1/metrics')
def test_if_request_is_sent_to_correct_upstream(self,
master_ar_process,
valid_user_header):
generic_correct_upstream_dest_test(master_ar_process,
valid_user_header,
'/system/v1/metrics/foo/bar',
'http:///run/dcos/dcos-metrics-master.sock',
)
@pytest.mark.parametrize("path_given,path_expected",
[("/system/v1/metrics/foo/bar", "/foo/bar"),
("/system/v1/metrics/", "/"),
])
def test_if_upstream_request_is_correct(self,
master_ar_process,
valid_user_header,
path_given,
path_expected):
generic_correct_upstream_request_test(master_ar_process,
valid_user_header,
path_given,
path_expected,
)
def test_if_upstream_headers_are_correct(self,
master_ar_process,
valid_user_header):
generic_upstream_headers_verify_test(master_ar_process,
valid_user_header,
'/system/v1/metrics/foo/bar',
)
class TestLogsEndpoint:
def test_redirect_req_without_slash(self, master_ar_process):
generic_no_slash_redirect_test(master_ar_process, '/system/v1/logs/v1')
def test_if_request_is_sent_to_correct_upstream(self,
master_ar_process,
valid_user_header):
generic_correct_upstream_dest_test(master_ar_process,
valid_user_header,
'/system/v1/logs/v1/foo/bar',
'http:///run/dcos/dcos-log.sock',
)
@pytest.mark.parametrize("path_given,path_expected",
[("/system/v1/logs/v1/foo/bar", "/foo/bar"),
("/system/v1/logs/v1/", "/"),
])
def test_if_upstream_request_is_correct(self,
master_ar_process,
valid_user_header,
path_given,
path_expected):
generic_correct_upstream_request_test(master_ar_process,
valid_user_header,
path_given,
path_expected,
http_ver="HTTP/1.1"
)
def test_if_upstream_headers_are_correct(self,
master_ar_process,
valid_user_header):
accel_buff_header = {"X-Accel-Buffering": "TEST"}
req_headers = copy.deepcopy(valid_user_header)
req_headers.update(accel_buff_header)
generic_upstream_headers_verify_test(master_ar_process,
req_headers,
'/system/v1/logs/v1/foo/bar',
assert_headers=accel_buff_header,
)
class TestHealthEndpoint:
@pytest.mark.parametrize("path_given,path_expected",
[("/system/health/v1/foo/bar", "/system/health/v1/foo/bar"),
("/system/health/v1/", "/system/health/v1/"),
("/system/health/v1", "/system/health/v1"),
])
def test_if_upstream_request_is_correct(self,
master_ar_process,
valid_user_header,
path_given,
path_expected):
generic_correct_upstream_request_test(master_ar_process,
valid_user_header,
path_given,
path_expected,
)
def test_if_upstream_headers_are_correct(self,
master_ar_process,
valid_user_header):
generic_upstream_headers_verify_test(master_ar_process,
valid_user_header,
'/system/health/v1/foo/bar',
)
class TestSystemAPIAgentProxing:
@pytest.mark.parametrize("prefix", [("/logs/v1"),
("/metrics/v0"),
("/logs/v1/foo/bar"),
("/metrics/v0/baz/baf"),
])
@pytest.mark.parametrize("agent,endpoint", [
("de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S1", 'http://127.0.0.2:61001'),
("de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S0", 'http://127.0.0.3:61001'),
])
def test_if_request_is_sent_to_correct_upstream(self,
master_ar_process,
valid_user_header,
agent,
endpoint,
prefix):
# FIXME - these are very simple tests for now, need to think how to test
# streaming api better. ATM we only test if HTTP is set to 1.1 for streaming
# stuff.
uri_path = '/system/v1/agent/{}{}'.format(agent, prefix)
generic_correct_upstream_dest_test(master_ar_process,
valid_user_header,
uri_path,
endpoint,
)
@pytest.mark.parametrize("prefix", [("/logs/v1"),
("/metrics/v0"),
])
@pytest.mark.parametrize("sent,expected", [('/foo/bar?key=value&var=num',
'/foo/bar?key=value&var=num'),
('/foo/bar/baz',
'/foo/bar/baz'),
('/',
'/'),
('',
''),
])
def test_if_http_11_is_enabled(self,
master_ar_process,
valid_user_header,
sent,
expected,
prefix):
path_sent_fmt = '/system/v1/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S1{}{}'
path_expected_fmt = '/system/v1{}{}'
generic_correct_upstream_request_test(master_ar_process,
valid_user_header,
path_sent_fmt.format(prefix, sent),
path_expected_fmt.format(prefix, expected),
'HTTP/1.1'
)
@pytest.mark.parametrize("prefix", [("/logs/v1"),
("/metrics/v0"),
])
def test_if_upstream_headers_are_correct(self,
master_ar_process,
valid_user_header,
prefix,
):
path_fmt = '/system/v1/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S1{}/foo/bar'
generic_upstream_headers_verify_test(master_ar_process,
valid_user_header,
path_fmt.format(prefix),
)
class TestSystemApiLeaderProxing:
def test_if_request_is_sent_to_the_current_mesos_leader(self,
master_ar_process,
valid_user_header):
# FIXME: using MesosDNS `leader.mesos` alias makes things hard to test.
# Dropping in in favour of cache+API call would improve reliability as
# well. So no "changing the leader and testing results tests for now"
generic_correct_upstream_dest_test(master_ar_process,
valid_user_header,
'/system/v1/leader/mesos/foo/bar',
'http://127.0.0.2:80',
)
def test_if_request_is_sent_to_the_current_marathon_leader(
self, master_ar_process, valid_user_header):
generic_correct_upstream_dest_test(master_ar_process,
valid_user_header,
'/system/v1/leader/marathon/foo/bar',
'http://127.0.0.2:80',
)
# Changing leader is covered in cache tests
@pytest.mark.parametrize("endpoint_type", [("marathon"),
("mesos"),
])
@pytest.mark.parametrize("sent,expected", [('/foo/bar?key=value&var=num',
'/foo/bar?key=value&var=num'),
('/foo/bar/baz',
'/foo/bar/baz'),
('/',
'/'),
('',
''),
])
def test_if_http11_is_enabled(self,
master_ar_process,
valid_user_header,
sent,
expected,
endpoint_type):
path_sent = '/system/v1/leader/mesos' + sent
path_expected = '/system/v1' + expected
generic_correct_upstream_request_test(master_ar_process,
valid_user_header,
path_sent,
path_expected,
http_ver="HTTP/1.1"
)
@pytest.mark.parametrize("endpoint_type", [("marathon"),
("mesos"),
])
def test_if_upstream_headers_are_correct(self,
master_ar_process,
valid_user_header,
endpoint_type,
):
path_fmt = '/system/v1/leader/{}/foo/bar/bzzz'
generic_upstream_headers_verify_test(master_ar_process,
valid_user_header,
path_fmt.format(endpoint_type),
)
|
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet."""
import types
from typing import Mapping, Optional, Sequence, Union, Any
from haiku._src import basic
from haiku._src import batch_norm
from haiku._src import conv
from haiku._src import module
from haiku._src import pool
import jax
import jax.numpy as jnp
# If forking replace this block with `import haiku as hk`.
hk = types.ModuleType("haiku")
hk.Module = module.Module
hk.BatchNorm = batch_norm.BatchNorm
hk.Conv2D = conv.Conv2D
hk.Linear = basic.Linear
hk.max_pool = pool.max_pool
del basic, batch_norm, conv, module, pool
FloatStrOrBool = Union[str, float, bool]
class BlockV1(hk.Module):
"""ResNet V1 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bn_config: Mapping[str, FloatStrOrBool],
bottleneck: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
bn_config = dict(bn_config)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
bn_config.setdefault("decay_rate", 0.999)
if self.use_projection:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding="SAME",
name="shortcut_conv")
self.proj_batchnorm = hk.BatchNorm(name="shortcut_batchnorm", **bn_config)
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1 if bottleneck else stride,
with_bias=False,
padding="SAME",
name="conv_0")
bn_0 = hk.BatchNorm(name="batchnorm_0", **bn_config)
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride if bottleneck else 1,
with_bias=False,
padding="SAME",
name="conv_1")
bn_1 = hk.BatchNorm(name="batchnorm_1", **bn_config)
layers = ((conv_0, bn_0), (conv_1, bn_1))
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding="SAME",
name="conv_2")
bn_2 = hk.BatchNorm(name="batchnorm_2", scale_init=jnp.zeros, **bn_config)
layers = layers + ((conv_2, bn_2),)
self.layers = layers
def __call__(self, inputs, is_training, test_local_stats):
out = shortcut = inputs
if self.use_projection:
shortcut = self.proj_conv(shortcut)
shortcut = self.proj_batchnorm(shortcut, is_training, test_local_stats)
for i, (conv_i, bn_i) in enumerate(self.layers):
out = conv_i(out)
out = bn_i(out, is_training, test_local_stats)
if i < len(self.layers) - 1: # Don't apply relu on last layer
out = jax.nn.relu(out)
return jax.nn.relu(out + shortcut)
class BlockV2(hk.Module):
"""ResNet V2 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bn_config: Mapping[str, FloatStrOrBool],
bottleneck: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
bn_config = dict(bn_config)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
if self.use_projection:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding="SAME",
name="shortcut_conv")
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1 if bottleneck else stride,
with_bias=False,
padding="SAME",
name="conv_0")
bn_0 = hk.BatchNorm(name="batchnorm_0", **bn_config)
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride if bottleneck else 1,
with_bias=False,
padding="SAME",
name="conv_1")
bn_1 = hk.BatchNorm(name="batchnorm_1", **bn_config)
layers = ((conv_0, bn_0), (conv_1, bn_1))
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding="SAME",
name="conv_2")
# NOTE: Some implementations of ResNet50 v2 suggest initializing
# gamma/scale here to zeros.
bn_2 = hk.BatchNorm(name="batchnorm_2", **bn_config)
layers = layers + ((conv_2, bn_2),)
self.layers = layers
def __call__(self, inputs, is_training, test_local_stats):
x = shortcut = inputs
for i, (conv_i, bn_i) in enumerate(self.layers):
x = bn_i(x, is_training, test_local_stats)
x = jax.nn.relu(x)
if i == 0 and self.use_projection:
shortcut = self.proj_conv(x)
x = conv_i(x)
return x + shortcut
class BlockGroup(hk.Module):
"""Higher level block for ResNet implementation."""
def __init__(
self,
channels: int,
num_blocks: int,
stride: Union[int, Sequence[int]],
bn_config: Mapping[str, FloatStrOrBool],
resnet_v2: bool,
bottleneck: bool,
use_projection: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
block_cls = BlockV2 if resnet_v2 else BlockV1
self.blocks = []
for i in range(num_blocks):
self.blocks.append(
block_cls(channels=channels,
stride=(1 if i else stride),
use_projection=(i == 0 and use_projection),
bottleneck=bottleneck,
bn_config=bn_config,
name="block_%d" % (i)))
def __call__(self, inputs, is_training, test_local_stats):
out = inputs
for block in self.blocks:
out = block(out, is_training, test_local_stats)
return out
def check_length(length, value, name):
if len(value) != length:
raise ValueError(f"`{name}` must be of length 4 not {len(value)}")
class ResNet(hk.Module):
"""ResNet model."""
CONFIGS = {
18: {
"blocks_per_group": (2, 2, 2, 2),
"bottleneck": False,
"channels_per_group": (64, 128, 256, 512),
"use_projection": (False, True, True, True),
},
34: {
"blocks_per_group": (3, 4, 6, 3),
"bottleneck": False,
"channels_per_group": (64, 128, 256, 512),
"use_projection": (False, True, True, True),
},
50: {
"blocks_per_group": (3, 4, 6, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
101: {
"blocks_per_group": (3, 4, 23, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
152: {
"blocks_per_group": (3, 8, 36, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
200: {
"blocks_per_group": (3, 24, 36, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
}
BlockGroup = BlockGroup # pylint: disable=invalid-name
BlockV1 = BlockV1 # pylint: disable=invalid-name
BlockV2 = BlockV2 # pylint: disable=invalid-name
def __init__(
self,
blocks_per_group: Sequence[int],
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
bottleneck: bool = True,
channels_per_group: Sequence[int] = (256, 512, 1024, 2048),
use_projection: Sequence[bool] = (True, True, True, True),
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
blocks_per_group: A sequence of length 4 that indicates the number of
blocks created in each group.
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers. By default the
``decay_rate`` is ``0.9`` and ``eps`` is ``1e-5``.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
bottleneck: Whether the block should bottleneck or not. Defaults to
``True``.
channels_per_group: A sequence of length 4 that indicates the number
of channels used for each block in each group.
use_projection: A sequence of length 4 that indicates whether each
residual block should use projection.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(name=name)
self.resnet_v2 = resnet_v2
bn_config = dict(bn_config or {})
bn_config.setdefault("decay_rate", 0.9)
bn_config.setdefault("eps", 1e-5)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
logits_config = dict(logits_config or {})
logits_config.setdefault("w_init", jnp.zeros)
logits_config.setdefault("name", "logits")
# Number of blocks in each group for ResNet.
check_length(4, blocks_per_group, "blocks_per_group")
check_length(4, channels_per_group, "channels_per_group")
check_length(4, strides, "strides")
initial_conv_config = dict(initial_conv_config or {})
initial_conv_config.setdefault("output_channels", 64)
initial_conv_config.setdefault("kernel_shape", 7)
initial_conv_config.setdefault("stride", 2)
initial_conv_config.setdefault("with_bias", False)
initial_conv_config.setdefault("padding", "SAME")
initial_conv_config.setdefault("name", "initial_conv")
self.initial_conv = hk.Conv2D(**initial_conv_config)
if not self.resnet_v2:
self.initial_batchnorm = hk.BatchNorm(name="initial_batchnorm",
**bn_config)
self.block_groups = []
for i, stride in enumerate(strides):
self.block_groups.append(
BlockGroup(channels=channels_per_group[i],
num_blocks=blocks_per_group[i],
stride=stride,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=bottleneck,
use_projection=use_projection[i],
name="block_group_%d" % (i)))
if self.resnet_v2:
self.final_batchnorm = hk.BatchNorm(name="final_batchnorm", **bn_config)
self.logits = hk.Linear(num_classes, **logits_config)
def __call__(self, inputs, is_training, test_local_stats=False):
out = inputs
out = self.initial_conv(out)
if not self.resnet_v2:
out = self.initial_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = hk.max_pool(out,
window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1),
padding="SAME")
for block_group in self.block_groups:
out = block_group(out, is_training, test_local_stats)
if self.resnet_v2:
out = self.final_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = jnp.mean(out, axis=(1, 2))
return self.logits(out)
class ResNet18(ResNet):
"""ResNet18."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[18])
class ResNet34(ResNet):
"""ResNet34."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[34])
class ResNet50(ResNet):
"""ResNet50."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[50])
class ResNet101(ResNet):
"""ResNet101."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[101])
class ResNet152(ResNet):
"""ResNet152."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[152])
class ResNet200(ResNet):
"""ResNet200."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[200])
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Visualization interfaces."""
from io import open # pylint: disable=W0622
from pathlib import Path
import numpy as np
from mriqc.viz.utils import plot_mosaic, plot_segmentation, plot_spikes
from nipype.interfaces.base import (
BaseInterfaceInputSpec,
File,
SimpleInterface,
TraitedSpec,
isdefined,
traits,
)
class PlotContoursInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="File to be plotted")
in_contours = File(
exists=True, mandatory=True, desc="file to pick the contours from"
)
cut_coords = traits.Int(8, usedefault=True, desc="number of slices")
levels = traits.List(
[0.5], traits.Float, usedefault=True, desc="add a contour per level"
)
colors = traits.List(
["r"],
traits.Str,
usedefault=True,
desc="colors to be used for contours",
)
display_mode = traits.Enum(
"ortho",
"x",
"y",
"z",
"yx",
"xz",
"yz",
usedefault=True,
desc="visualization mode",
)
saturate = traits.Bool(False, usedefault=True, desc="saturate background")
out_file = traits.File(exists=False, desc="output file name")
vmin = traits.Float(desc="minimum intensity")
vmax = traits.Float(desc="maximum intensity")
class PlotContoursOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output svg file")
class PlotContours(SimpleInterface):
""" Plot contours """
input_spec = PlotContoursInputSpec
output_spec = PlotContoursOutputSpec
def _run_interface(self, runtime):
in_file_ref = Path(self.inputs.in_file)
if isdefined(self.inputs.out_file):
in_file_ref = Path(self.inputs.out_file)
fname = in_file_ref.name.rstrip("".join(in_file_ref.suffixes))
out_file = (Path(runtime.cwd) / ("plot_%s_contours.svg" % fname)).resolve()
self._results["out_file"] = str(out_file)
vmax = None if not isdefined(self.inputs.vmax) else self.inputs.vmax
vmin = None if not isdefined(self.inputs.vmin) else self.inputs.vmin
plot_segmentation(
self.inputs.in_file,
self.inputs.in_contours,
out_file=str(out_file),
cut_coords=self.inputs.cut_coords,
display_mode=self.inputs.display_mode,
levels=self.inputs.levels,
colors=self.inputs.colors,
saturate=self.inputs.saturate,
vmin=vmin,
vmax=vmax,
)
return runtime
class PlotBaseInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="File to be plotted")
title = traits.Str(desc="a title string for the plot")
annotate = traits.Bool(True, usedefault=True, desc="annotate left/right")
figsize = traits.Tuple(
(11.69, 8.27),
traits.Float,
traits.Float,
usedefault=True,
desc="Figure size",
)
dpi = traits.Int(300, usedefault=True, desc="Desired DPI of figure")
out_file = File("mosaic.svg", usedefault=True, desc="output file name")
cmap = traits.Str("Greys_r", usedefault=True)
class PlotMosaicInputSpec(PlotBaseInputSpec):
bbox_mask_file = File(exists=True, desc="brain mask")
only_noise = traits.Bool(False, desc="plot only noise")
class PlotMosaicOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output pdf file")
class PlotMosaic(SimpleInterface):
"""
Plots slices of a 3D volume into a pdf file
"""
input_spec = PlotMosaicInputSpec
output_spec = PlotMosaicOutputSpec
def _run_interface(self, runtime):
mask = None
if isdefined(self.inputs.bbox_mask_file):
mask = self.inputs.bbox_mask_file
title = None
if isdefined(self.inputs.title):
title = self.inputs.title
plot_mosaic(
self.inputs.in_file,
out_file=self.inputs.out_file,
title=title,
only_plot_noise=self.inputs.only_noise,
bbox_mask_file=mask,
cmap=self.inputs.cmap,
annotate=self.inputs.annotate,
)
self._results["out_file"] = str(
(Path(runtime.cwd) / self.inputs.out_file).resolve()
)
return runtime
class PlotSpikesInputSpec(PlotBaseInputSpec):
in_spikes = File(exists=True, mandatory=True, desc="tsv file of spikes")
in_fft = File(exists=True, mandatory=True, desc="nifti file with the 4D FFT")
class PlotSpikesOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output svg file")
class PlotSpikes(SimpleInterface):
"""
Plot slices of a dataset with spikes
"""
input_spec = PlotSpikesInputSpec
output_spec = PlotSpikesOutputSpec
def _run_interface(self, runtime):
out_file = str((Path(runtime.cwd) / self.inputs.out_file).resolve())
self._results["out_file"] = out_file
spikes_list = np.loadtxt(self.inputs.in_spikes, dtype=int).tolist()
# No spikes
if not spikes_list:
with open(out_file, "w") as f:
f.write("<p>No high-frequency spikes were found in this dataset</p>")
return runtime
spikes_list = [tuple(i) for i in np.atleast_2d(spikes_list).tolist()]
plot_spikes(
self.inputs.in_file,
self.inputs.in_fft,
spikes_list,
out_file=out_file,
)
return runtime
|
|
# -*- coding: utf-8 -*-
import json
import os
import socket
from django import forms
from django.conf import settings
from django.db.models import Q
from django.forms.models import modelformset_factory
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
import commonware
import happyforms
from tower import ugettext as _, ugettext_lazy as _lazy
from quieter_formset.formset import BaseModelFormSet
from access import acl
import amo
import addons.forms
import paypal
from addons.models import (Addon, AddonDependency, AddonUser,
Charity, Preview)
from amo.forms import AMOModelForm
from amo.urlresolvers import reverse
from applications.models import AppVersion
from files.models import File, FileUpload
from files.utils import parse_addon
from translations.widgets import TranslationTextarea, TranslationTextInput
from translations.fields import TransTextarea, TransField
from translations.models import delete_translation, Translation
from translations.forms import TranslationFormMixin
from versions.models import (ApplicationsVersions, License,
VALID_SOURCE_EXTENSIONS, Version)
from . import tasks, utils
paypal_log = commonware.log.getLogger('z.paypal')
class AuthorForm(happyforms.ModelForm):
class Meta:
model = AddonUser
exclude = ('addon',)
class BaseModelFormSet(BaseModelFormSet):
"""
Override the parent's is_valid to prevent deleting all forms.
"""
def is_valid(self):
# clean() won't get called in is_valid() if all the rows are getting
# deleted. We can't allow deleting everything.
rv = super(BaseModelFormSet, self).is_valid()
return rv and not any(self.errors) and not bool(self.non_form_errors())
class BaseAuthorFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
# cleaned_data could be None if it's the empty extra form.
data = filter(None, [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)])
if not any(d['role'] == amo.AUTHOR_ROLE_OWNER for d in data):
raise forms.ValidationError(_('Must have at least one owner.'))
if not any(d['listed'] for d in data):
raise forms.ValidationError(
_('At least one author must be listed.'))
users = [d['user'] for d in data]
if sorted(users) != sorted(set(users)):
raise forms.ValidationError(
_('An author can only be listed once.'))
AuthorFormSet = modelformset_factory(AddonUser, formset=BaseAuthorFormSet,
form=AuthorForm, can_delete=True, extra=0)
class DeleteForm(happyforms.Form):
password = forms.CharField()
reason = forms.CharField(required=False)
def __init__(self, request):
self.user = request.amo_user
super(DeleteForm, self).__init__(request.POST)
def clean_password(self):
data = self.cleaned_data
if not self.user.check_password(data['password']):
raise forms.ValidationError(_('Password incorrect.'))
class AnnotateFileForm(happyforms.Form):
message = forms.CharField()
ignore_duplicates = forms.BooleanField(required=False)
def clean_message(self):
msg = self.cleaned_data['message']
try:
msg = json.loads(msg)
except ValueError:
raise forms.ValidationError(_('Invalid JSON object'))
key = utils.ValidationComparator.message_key(msg)
if key is None:
raise forms.ValidationError(
_('Message not eligible for annotation'))
return msg
class LicenseChoiceRadio(forms.widgets.RadioFieldRenderer):
def __iter__(self):
for i, choice in enumerate(self.choices):
yield LicenseRadioInput(self.name, self.value, self.attrs.copy(),
choice, i)
class LicenseRadioInput(forms.widgets.RadioInput):
def __init__(self, name, value, attrs, choice, index):
super(LicenseRadioInput, self).__init__(name, value, attrs, choice,
index)
license = choice[1] # Choice is a tuple (object.id, object).
link = u'<a class="xx extra" href="%s" target="_blank">%s</a>'
if hasattr(license, 'url'):
details = link % (license.url, _('Details'))
self.choice_label = mark_safe(self.choice_label + details)
class LicenseForm(AMOModelForm):
builtin = forms.TypedChoiceField(
choices=[], coerce=int,
widget=forms.RadioSelect(attrs={'class': 'license'},
renderer=LicenseChoiceRadio))
name = forms.CharField(widget=TranslationTextInput(),
label=_lazy(u"What is your license's name?"),
required=False, initial=_lazy('Custom License'))
text = forms.CharField(widget=TranslationTextarea(), required=False,
label=_lazy(u'Provide the text of your license.'))
def __init__(self, *args, **kw):
addon = kw.pop('addon', None)
self.version = None
if addon:
self.version = addon.latest_version
if self.version:
kw['instance'], kw['initial'] = self.version.license, None
# Clear out initial data if it's a builtin license.
if getattr(kw['instance'], 'builtin', None):
kw['initial'] = {'builtin': kw['instance'].builtin}
kw['instance'] = None
super(LicenseForm, self).__init__(*args, **kw)
cs = [(x.builtin, x)
for x in License.objects.builtins().filter(on_form=True)]
cs.append((License.OTHER, _('Other')))
self.fields['builtin'].choices = cs
if addon and not addon.is_listed:
self.fields['builtin'].required = False
class Meta:
model = License
fields = ('builtin', 'name', 'text')
def clean_name(self):
name = self.cleaned_data['name']
return name.strip() or _('Custom License')
def clean(self):
data = self.cleaned_data
if self.errors:
return data
elif data['builtin'] == License.OTHER and not data['text']:
raise forms.ValidationError(
_('License text is required when choosing Other.'))
return data
def get_context(self):
"""Returns a view context dict having keys license_urls, license_form,
and license_other_val.
"""
license_urls = dict(License.objects.builtins()
.values_list('builtin', 'url'))
return dict(license_urls=license_urls, version=self.version,
license_form=self.version and self,
license_other_val=License.OTHER)
def save(self, *args, **kw):
"""Save all form data.
This will only create a new license if it's not one of the builtin
ones.
Keyword arguments
**log=True**
Set to False if you do not want to log this action for display
on the developer dashboard.
"""
log = kw.pop('log', True)
changed = self.changed_data
builtin = self.cleaned_data['builtin']
if builtin == '': # No license chosen, it must be an unlisted add-on.
return
if builtin != License.OTHER:
license = License.objects.get(builtin=builtin)
else:
# Save the custom license:
license = super(LicenseForm, self).save(*args, **kw)
if self.version:
if changed or license != self.version.license:
self.version.update(license=license)
if log:
amo.log(amo.LOG.CHANGE_LICENSE, license,
self.version.addon)
return license
class PolicyForm(TranslationFormMixin, AMOModelForm):
"""Form for editing the add-ons EULA and privacy policy."""
has_eula = forms.BooleanField(
required=False,
label=_lazy(u'This add-on has an End-User License Agreement'))
eula = TransField(
widget=TransTextarea(), required=False,
label=_lazy(u"Please specify your add-on's "
"End-User License Agreement:"))
has_priv = forms.BooleanField(
required=False, label=_lazy(u"This add-on has a Privacy Policy"))
privacy_policy = TransField(
widget=TransTextarea(), required=False,
label=_lazy(u"Please specify your add-on's Privacy Policy:"))
def __init__(self, *args, **kw):
self.addon = kw.pop('addon', None)
if not self.addon:
raise ValueError('addon keyword arg cannot be None')
kw['instance'] = self.addon
kw['initial'] = dict(has_priv=self._has_field('privacy_policy'),
has_eula=self._has_field('eula'))
super(PolicyForm, self).__init__(*args, **kw)
def _has_field(self, name):
# If there's a eula in any language, this addon has a eula.
n = getattr(self.addon, u'%s_id' % name)
return any(map(bool, Translation.objects.filter(id=n)))
class Meta:
model = Addon
fields = ('eula', 'privacy_policy')
def save(self, commit=True):
ob = super(PolicyForm, self).save(commit)
for k, field in (('has_eula', 'eula'),
('has_priv', 'privacy_policy')):
if not self.cleaned_data[k]:
delete_translation(self.instance, field)
if 'privacy_policy' in self.changed_data:
amo.log(amo.LOG.CHANGE_POLICY, self.addon, self.instance)
return ob
def ProfileForm(*args, **kw):
# If the add-on takes contributions, then both fields are required.
addon = kw['instance']
fields_required = (kw.pop('required', False) or
bool(addon.takes_contributions))
the_reason_label = _('Why did you make this add-on?')
the_future_label = _("What's next for this add-on?")
class _Form(TranslationFormMixin, happyforms.ModelForm):
the_reason = TransField(widget=TransTextarea(),
required=fields_required,
label=the_reason_label)
the_future = TransField(widget=TransTextarea(),
required=fields_required,
label=the_future_label)
class Meta:
model = Addon
fields = ('the_reason', 'the_future')
return _Form(*args, **kw)
class CharityForm(happyforms.ModelForm):
url = Charity._meta.get_field('url').formfield()
class Meta:
model = Charity
fields = ('name', 'url', 'paypal')
def clean_paypal(self):
check_paypal_id(self.cleaned_data['paypal'])
return self.cleaned_data['paypal']
def save(self, commit=True):
# We link to the charity row in contrib stats, so we force all charity
# changes to create a new row so we don't forget old charities.
if self.changed_data and self.instance.id:
self.instance.id = None
return super(CharityForm, self).save(commit)
class ContribForm(TranslationFormMixin, happyforms.ModelForm):
RECIPIENTS = (('dev', _lazy(u'The developers of this add-on')),
('moz', _lazy(u'The Mozilla Foundation')),
('org', _lazy(u'An organization of my choice')))
recipient = forms.ChoiceField(
choices=RECIPIENTS,
widget=forms.RadioSelect(attrs={'class': 'recipient'}))
thankyou_note = TransField(widget=TransTextarea(), required=False)
class Meta:
model = Addon
fields = ('paypal_id', 'suggested_amount', 'annoying',
'enable_thankyou', 'thankyou_note')
widgets = {
'annoying': forms.RadioSelect(),
'suggested_amount': forms.TextInput(attrs={'class': 'short'}),
'paypal_id': forms.TextInput(attrs={'size': '50'})
}
@staticmethod
def initial(addon):
if addon.charity:
recip = 'moz' if addon.charity_id == amo.FOUNDATION_ORG else 'org'
else:
recip = 'dev'
return {'recipient': recip,
'annoying': addon.annoying or amo.CONTRIB_PASSIVE}
def clean(self):
data = self.cleaned_data
try:
if not self.errors and data['recipient'] == 'dev':
check_paypal_id(data['paypal_id'])
except forms.ValidationError, e:
self.errors['paypal_id'] = self.error_class(e.messages)
# thankyou_note is a dict since it's a Translation.
if not (data.get('enable_thankyou') and
any(data.get('thankyou_note').values())):
data['thankyou_note'] = {}
data['enable_thankyou'] = False
return data
def clean_suggested_amount(self):
amount = self.cleaned_data['suggested_amount']
if amount is not None and amount <= 0:
msg = _(u'Please enter a suggested amount greater than 0.')
raise forms.ValidationError(msg)
if amount > settings.MAX_CONTRIBUTION:
msg = _(u'Please enter a suggested amount less than ${0}.').format(
settings.MAX_CONTRIBUTION)
raise forms.ValidationError(msg)
return amount
def check_paypal_id(paypal_id):
if not paypal_id:
raise forms.ValidationError(
_('PayPal ID required to accept contributions.'))
try:
valid, msg = paypal.check_paypal_id(paypal_id)
if not valid:
raise forms.ValidationError(msg)
except socket.error:
raise forms.ValidationError(_('Could not validate PayPal id.'))
class WithSourceMixin(object):
def clean_source(self):
source = self.cleaned_data.get('source')
if source and not source.name.endswith(VALID_SOURCE_EXTENSIONS):
raise forms.ValidationError(
_('Unsupported file type, please upload an archive file '
'{extensions}.'.format(
extensions=VALID_SOURCE_EXTENSIONS))
)
return source
class SourceFileInput(forms.widgets.ClearableFileInput):
"""
We need to customize the URL link.
1. Remove %(initial)% from template_with_initial
2. Prepend the new link (with customized text)
"""
template_with_initial = '%(clear_template)s<br />%(input_text)s: %(input)s'
def render(self, name, value, attrs=None):
output = super(SourceFileInput, self).render(name, value, attrs)
if value and hasattr(value, 'instance'):
url = reverse('downloads.source', args=(value.instance.pk, ))
params = {'url': url, 'output': output, 'label': _('View current')}
output = '<a href="%(url)s">%(label)s</a> %(output)s' % params
return output
class VersionForm(WithSourceMixin, happyforms.ModelForm):
releasenotes = TransField(
widget=TransTextarea(), required=False)
approvalnotes = forms.CharField(
widget=TranslationTextarea(attrs={'rows': 4}), required=False)
source = forms.FileField(required=False, widget=SourceFileInput)
class Meta:
model = Version
fields = ('releasenotes', 'approvalnotes', 'source')
class AppVersionChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.version
class CompatForm(happyforms.ModelForm):
application = forms.TypedChoiceField(choices=amo.APPS_CHOICES,
coerce=int,
widget=forms.HiddenInput)
min = AppVersionChoiceField(AppVersion.objects.none())
max = AppVersionChoiceField(AppVersion.objects.none())
class Meta:
model = ApplicationsVersions
fields = ('application', 'min', 'max')
def __init__(self, *args, **kw):
super(CompatForm, self).__init__(*args, **kw)
if self.initial:
app = self.initial['application']
else:
app = self.data[self.add_prefix('application')]
self.app = amo.APPS_ALL[int(app)]
qs = AppVersion.objects.filter(application=app).order_by('version_int')
self.fields['min'].queryset = qs.filter(~Q(version__contains='*'))
self.fields['max'].queryset = qs.all()
def clean(self):
min = self.cleaned_data.get('min')
max = self.cleaned_data.get('max')
if not (min and max and min.version_int <= max.version_int):
raise forms.ValidationError(_('Invalid version range.'))
return self.cleaned_data
class BaseCompatFormSet(BaseModelFormSet):
def __init__(self, *args, **kw):
super(BaseCompatFormSet, self).__init__(*args, **kw)
# We always want a form for each app, so force extras for apps
# the add-on does not already have.
qs = kw['queryset'].values_list('application', flat=True)
apps = [a for a in amo.APP_USAGE if a.id not in qs]
self.initial = ([{} for _ in qs] +
[{'application': a.id} for a in apps])
self.extra = len(amo.APP_GUIDS) - len(self.forms)
# After these changes, the forms need to be rebuilt. `forms`
# is a cached property, so we delete the existing cache and
# ask for a new one to be built.
del self.forms
self.forms
def clean(self):
if any(self.errors):
return
apps = filter(None, [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)])
if not apps:
raise forms.ValidationError(
_('Need at least one compatible application.'))
CompatFormSet = modelformset_factory(
ApplicationsVersions, formset=BaseCompatFormSet,
form=CompatForm, can_delete=True, extra=0)
class AddonUploadForm(WithSourceMixin, happyforms.Form):
upload = forms.ModelChoiceField(
widget=forms.HiddenInput,
queryset=FileUpload.objects,
error_messages={
'invalid_choice': _lazy(u'There was an error with your '
u'upload. Please try again.')
}
)
admin_override_validation = forms.BooleanField(
required=False, label=_lazy(u'Override failed validation'))
source = forms.FileField(required=False)
is_manual_review = forms.BooleanField(
initial=False, required=False,
label=_lazy(u'Submit my add-on for manual review.'))
def __init__(self, *args, **kw):
self.request = kw.pop('request')
super(AddonUploadForm, self).__init__(*args, **kw)
def _clean_upload(self):
if not (self.cleaned_data['upload'].valid or
self.cleaned_data['upload'].validation_timeout or
self.cleaned_data['admin_override_validation'] and
acl.action_allowed(self.request, 'ReviewerAdminTools',
'View')):
raise forms.ValidationError(_(u'There was an error with your '
u'upload. Please try again.'))
class NewAddonForm(AddonUploadForm):
supported_platforms = forms.TypedMultipleChoiceField(
choices=amo.SUPPORTED_PLATFORMS_CHOICES,
widget=forms.CheckboxSelectMultiple(attrs={'class': 'platform'}),
initial=[amo.PLATFORM_ALL.id],
coerce=int,
error_messages={'required': 'Need at least one platform.'}
)
is_unlisted = forms.BooleanField(
initial=False,
required=False,
label=_lazy(u'Do not list my add-on on this site (beta)'),
help_text=_lazy(
u'Check this option if you intend to distribute your add-on on '
u'your own and only need it to be signed by Mozilla.'))
is_sideload = forms.BooleanField(
initial=False,
required=False,
label=_lazy(u'This add-on will be bundled with an application '
u'installer.'),
help_text=_lazy(u'Add-ons that are bundled with application '
u'installers will be code reviewed '
u'by Mozilla before they are signed and are held to a '
u'higher quality standard.'))
def clean(self):
if not self.errors:
self._clean_upload()
xpi = parse_addon(self.cleaned_data['upload'])
# We don't enforce name uniqueness for unlisted add-ons.
if not self.cleaned_data.get('is_unlisted', False):
addons.forms.clean_name(xpi['name'], addon_type=xpi['type'])
return self.cleaned_data
class NewVersionForm(NewAddonForm):
nomination_type = forms.TypedChoiceField(
choices=(
('', ''),
(amo.STATUS_NOMINATED, _lazy('Full Review')),
(amo.STATUS_UNREVIEWED, _lazy('Preliminary Review')),
),
coerce=int, empty_value=None, required=False,
error_messages={
'required': _lazy(u'Please choose a review nomination type')
})
beta = forms.BooleanField(
required=False,
help_text=_lazy(u'A file with a version ending with '
u'a|alpha|b|beta|pre|rc and an optional number is '
u'detected as beta.'))
def __init__(self, *args, **kw):
self.addon = kw.pop('addon')
super(NewVersionForm, self).__init__(*args, **kw)
if self.addon.status == amo.STATUS_NULL:
self.fields['nomination_type'].required = True
def clean(self):
if not self.errors:
self._clean_upload()
xpi = parse_addon(self.cleaned_data['upload'], self.addon)
# Make sure we don't already have the same non-rejected version.
if self.addon.versions.filter(version=xpi['version']).exclude(
files__status=amo.STATUS_DISABLED):
raise forms.ValidationError(
_(u'Version %s already exists') % xpi['version'])
return self.cleaned_data
class NewFileForm(AddonUploadForm):
platform = forms.TypedChoiceField(
choices=amo.SUPPORTED_PLATFORMS_CHOICES,
widget=forms.RadioSelect(attrs={'class': 'platform'}),
coerce=int,
# We don't want the id value of the field to be output to the user
# when choice is invalid. Make a generic error message instead.
error_messages={
'invalid_choice': _lazy(u'Select a valid choice. That choice is '
u'not one of the available choices.')
}
)
beta = forms.BooleanField(
required=False,
help_text=_lazy(u'A file with a version ending with a|alpha|b|beta and'
u' an optional number is detected as beta.'))
def __init__(self, *args, **kw):
self.addon = kw.pop('addon')
self.version = kw.pop('version')
super(NewFileForm, self).__init__(*args, **kw)
# Reset platform choices to just those compatible with target app.
field = self.fields['platform']
field.choices = sorted((p.id, p.name) for p in
self.version.compatible_platforms().values())
# Don't allow platforms we already have.
to_exclude = set(File.objects.filter(version=self.version)
.values_list('platform', flat=True))
# Don't allow platform=ALL if we already have platform files.
if len(to_exclude):
to_exclude.add(amo.PLATFORM_ALL.id)
field.choices = [p for p in field.choices if p[0] not in to_exclude]
def clean(self):
if not self.version.is_allowed_upload():
raise forms.ValidationError(
_('You cannot upload any more files for this version.'))
# Check for errors in the xpi.
if not self.errors:
xpi = parse_addon(self.cleaned_data['upload'], self.addon)
if xpi['version'] != self.version.version:
raise forms.ValidationError(_("Version doesn't match"))
return self.cleaned_data
class FileForm(happyforms.ModelForm):
platform = File._meta.get_field('platform').formfield()
class Meta:
model = File
fields = ('platform',)
def __init__(self, *args, **kw):
super(FileForm, self).__init__(*args, **kw)
if kw['instance'].version.addon.type == amo.ADDON_SEARCH:
del self.fields['platform']
else:
compat = kw['instance'].version.compatible_platforms()
pid = int(kw['instance'].platform)
plats = [(p.id, p.name) for p in compat.values()]
if pid not in compat:
plats.append([pid, amo.PLATFORMS[pid].name])
self.fields['platform'].choices = plats
def clean_DELETE(self):
if any(self.errors):
return
delete = self.cleaned_data['DELETE']
if (delete and not self.instance.version.is_all_unreviewed):
error = _('You cannot delete a file once the review process has '
'started. You must delete the whole version.')
raise forms.ValidationError(error)
return delete
class BaseFileFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
files = [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)]
if self.forms and 'platform' in self.forms[0].fields:
platforms = [f['platform'] for f in files]
if amo.PLATFORM_ALL.id in platforms and len(files) > 1:
raise forms.ValidationError(
_('The platform All cannot be combined '
'with specific platforms.'))
if sorted(platforms) != sorted(set(platforms)):
raise forms.ValidationError(
_('A platform can only be chosen once.'))
FileFormSet = modelformset_factory(File, formset=BaseFileFormSet,
form=FileForm, can_delete=True, extra=0)
class ReviewTypeForm(forms.Form):
_choices = [(k, Addon.STATUS_CHOICES[k]) for k in
(amo.STATUS_UNREVIEWED, amo.STATUS_NOMINATED)]
review_type = forms.TypedChoiceField(
choices=_choices, widget=forms.HiddenInput,
coerce=int, empty_value=None,
error_messages={'required': _lazy(u'A review type must be selected.')})
class Step3Form(addons.forms.AddonFormBasic):
description = TransField(widget=TransTextarea, required=False)
class Meta:
model = Addon
fields = ('name', 'slug', 'summary', 'tags', 'description',
'homepage', 'support_email', 'support_url')
class PreviewForm(happyforms.ModelForm):
caption = TransField(widget=TransTextarea, required=False)
file_upload = forms.FileField(required=False)
upload_hash = forms.CharField(required=False)
def save(self, addon, commit=True):
if self.cleaned_data:
self.instance.addon = addon
if self.cleaned_data.get('DELETE'):
# Existing preview.
if self.instance.id:
self.instance.delete()
# User has no desire to save this preview.
return
super(PreviewForm, self).save(commit=commit)
if self.cleaned_data['upload_hash']:
upload_hash = self.cleaned_data['upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'preview',
upload_hash)
tasks.resize_preview.delay(upload_path, self.instance,
set_modified_on=[self.instance])
class Meta:
model = Preview
fields = ('caption', 'file_upload', 'upload_hash', 'id', 'position')
class BasePreviewFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
PreviewFormSet = modelformset_factory(Preview, formset=BasePreviewFormSet,
form=PreviewForm, can_delete=True,
extra=1)
class AdminForm(happyforms.ModelForm):
_choices = [(k, v) for k, v in amo.ADDON_TYPE.items()
if k != amo.ADDON_ANY]
type = forms.ChoiceField(choices=_choices)
# Request is needed in other ajax forms so we're stuck here.
def __init__(self, request=None, *args, **kw):
super(AdminForm, self).__init__(*args, **kw)
class Meta:
model = Addon
fields = ('trusted', 'type', 'guid',
'target_locale', 'locale_disambiguation')
widgets = {
'guid': forms.TextInput(attrs={'size': '50'})
}
class InlineRadioRenderer(forms.widgets.RadioFieldRenderer):
def render(self):
return mark_safe(''.join(force_unicode(w) for w in self))
class CheckCompatibilityForm(happyforms.Form):
application = forms.ChoiceField(
label=_lazy(u'Application'),
choices=[(a.id, a.pretty) for a in amo.APP_USAGE])
app_version = forms.ChoiceField(
label=_lazy(u'Version'),
choices=[('', _lazy(u'Select an application first'))])
def __init__(self, *args, **kw):
super(CheckCompatibilityForm, self).__init__(*args, **kw)
w = self.fields['application'].widget
# Get the URL after the urlconf has loaded.
w.attrs['data-url'] = reverse('devhub.compat_application_versions')
def version_choices_for_app_id(self, app_id):
versions = AppVersion.objects.filter(application=app_id)
return [(v.id, v.version) for v in versions]
def clean_application(self):
app_id = int(self.cleaned_data['application'])
app = amo.APPS_IDS.get(app_id)
self.cleaned_data['application'] = app
choices = self.version_choices_for_app_id(app_id)
self.fields['app_version'].choices = choices
return self.cleaned_data['application']
def clean_app_version(self):
v = self.cleaned_data['app_version']
return AppVersion.objects.get(pk=int(v))
def DependencyFormSet(*args, **kw):
addon_parent = kw.pop('addon')
# Add-ons: Required add-ons cannot include apps nor personas.
# Apps: Required apps cannot include any add-ons.
qs = (Addon.objects.reviewed().exclude(id=addon_parent.id).
exclude(type__in=[amo.ADDON_PERSONA]))
class _Form(happyforms.ModelForm):
addon = forms.CharField(required=False, widget=forms.HiddenInput)
dependent_addon = forms.ModelChoiceField(qs, widget=forms.HiddenInput)
class Meta:
model = AddonDependency
fields = ('addon', 'dependent_addon')
def clean_addon(self):
return addon_parent
class _FormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
form_count = len([f for f in self.forms
if not f.cleaned_data.get('DELETE', False)])
if form_count > 3:
error = _('There cannot be more than 3 required add-ons.')
raise forms.ValidationError(error)
FormSet = modelformset_factory(AddonDependency, formset=_FormSet,
form=_Form, extra=0, can_delete=True)
return FormSet(*args, **kw)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime as dt
import json
import uuid
import mock
from oslo_utils import timeutils
import six
from heat.common import exception
from heat.common import identifier
from heat.common import template_format
from heat.engine import api
from heat.engine import event
from heat.engine import parameters
from heat.engine import stack as parser
from heat.engine import template
from heat.rpc import api as rpc_api
from heat.tests import common
from heat.tests import utils
datetime = dt.datetime
class FormatTest(common.HeatTestCase):
def setUp(self):
super(FormatTest, self).setUp()
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'generic1': {'Type': 'GenericResourceType'},
'generic2': {
'Type': 'GenericResourceType',
'DependsOn': 'generic1'},
'generic3': {'Type': 'ResWithShowAttrType'},
'generic4': {'Type': 'StackResourceType'}
}
})
self.stack = parser.Stack(utils.dummy_context(), 'test_stack',
tmpl, stack_id=str(uuid.uuid4()))
def _dummy_event(self, event_id):
resource = self.stack['generic1']
return event.Event(utils.dummy_context(), self.stack, 'CREATE',
'COMPLETE', 'state changed',
'z3455xyc-9f88-404d-a85b-5315293e67de',
resource.properties, resource.name, resource.type(),
uuid='abc123yc-9f88-404d-a85b-531529456xyz',
id=event_id)
def test_format_stack_resource(self):
self.stack.created_time = datetime(2015, 8, 3, 17, 5, 1)
self.stack.updated_time = datetime(2015, 8, 3, 17, 6, 2)
res = self.stack['generic1']
resource_keys = set((
rpc_api.RES_CREATION_TIME,
rpc_api.RES_UPDATED_TIME,
rpc_api.RES_NAME,
rpc_api.RES_PHYSICAL_ID,
rpc_api.RES_ACTION,
rpc_api.RES_STATUS,
rpc_api.RES_STATUS_DATA,
rpc_api.RES_TYPE,
rpc_api.RES_ID,
rpc_api.RES_STACK_ID,
rpc_api.RES_STACK_NAME,
rpc_api.RES_REQUIRED_BY,
))
resource_details_keys = resource_keys.union(set((
rpc_api.RES_DESCRIPTION,
rpc_api.RES_METADATA,
rpc_api.RES_ATTRIBUTES,
)))
formatted = api.format_stack_resource(res, True)
self.assertEqual(resource_details_keys, set(six.iterkeys(formatted)))
formatted = api.format_stack_resource(res, False)
self.assertEqual(resource_keys, set(six.iterkeys(formatted)))
self.assertEqual(self.stack.created_time.isoformat(),
formatted[rpc_api.RES_CREATION_TIME])
self.assertEqual(self.stack.updated_time.isoformat(),
formatted[rpc_api.RES_UPDATED_TIME])
self.assertEqual(res.INIT, formatted[rpc_api.RES_ACTION])
def test_format_stack_resource_no_attrs(self):
res = self.stack['generic1']
formatted = api.format_stack_resource(res, True, with_attr=False)
self.assertNotIn(rpc_api.RES_ATTRIBUTES, formatted)
self.assertIn(rpc_api.RES_METADATA, formatted)
def test_format_stack_resource_has_been_deleted(self):
# assume the stack and resource have been deleted,
# to test the resource's action inherit from stack
self.stack.state_set(self.stack.DELETE, self.stack.COMPLETE,
'test_delete')
res = self.stack['generic1']
formatted = api.format_stack_resource(res, False)
self.assertEqual(res.DELETE, formatted[rpc_api.RES_ACTION])
def test_format_stack_resource_has_been_rollback(self):
# Rollback a stack, the resources perhaps have not been
# created yet or have been deleted when rollback.
# To test the resource's action inherit from stack
self.stack.state_set(self.stack.ROLLBACK, self.stack.COMPLETE,
'test_rollback')
res = self.stack['generic1']
formatted = api.format_stack_resource(res, False)
self.assertEqual(res.ROLLBACK, formatted[rpc_api.RES_ACTION])
@mock.patch.object(api, 'format_resource_properties')
def test_format_stack_resource_with_props(self, mock_format_props):
mock_format_props.return_value = 'formatted_res_props'
res = self.stack['generic1']
formatted = api.format_stack_resource(res, True, with_props=True)
formatted_props = formatted[rpc_api.RES_PROPERTIES]
self.assertEqual('formatted_res_props', formatted_props)
@mock.patch.object(api, 'format_resource_attributes')
def test_format_stack_resource_with_attributes(self, mock_format_attrs):
mock_format_attrs.return_value = 'formatted_resource_attrs'
res = self.stack['generic1']
formatted = api.format_stack_resource(res, True, with_attr=['a', 'b'])
formatted_attrs = formatted[rpc_api.RES_ATTRIBUTES]
self.assertEqual('formatted_resource_attrs', formatted_attrs)
def test_format_resource_attributes(self):
res = self.stack['generic1']
# the _resolve_attribute method of 'generic1' returns map with all
# attributes except 'show' (because it's None in this test)
formatted_attributes = api.format_resource_attributes(res)
expected = {'foo': 'generic1', 'Foo': 'generic1'}
self.assertEqual(expected, formatted_attributes)
def test_format_resource_attributes_show_attribute(self):
res = self.stack['generic3']
res.resource_id = 'generic3_id'
formatted_attributes = api.format_resource_attributes(res)
self.assertEqual(3, len(formatted_attributes))
self.assertIn('foo', formatted_attributes)
self.assertIn('Foo', formatted_attributes)
self.assertIn('Another', formatted_attributes)
def test_format_resource_attributes_show_attribute_with_attr(self):
res = self.stack['generic3']
res.resource_id = 'generic3_id'
formatted_attributes = api.format_resource_attributes(
res, with_attr=['c'])
self.assertEqual(4, len(formatted_attributes))
self.assertIn('foo', formatted_attributes)
self.assertIn('Foo', formatted_attributes)
self.assertIn('Another', formatted_attributes)
self.assertIn('c', formatted_attributes)
def _get_formatted_resource_properties(self, res_name):
tmpl = template.Template(template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: ResWithComplexPropsAndAttrs
resource2:
type: ResWithComplexPropsAndAttrs
properties:
a_string: foobar
resource3:
type: ResWithComplexPropsAndAttrs
properties:
a_string: { get_attr: [ resource2, string] }
'''))
stack = parser.Stack(utils.dummy_context(), 'test_stack_for_preview',
tmpl, stack_id=str(uuid.uuid4()))
res = stack[res_name]
return api.format_resource_properties(res)
def test_format_resource_properties_empty(self):
props = self._get_formatted_resource_properties('resource1')
self.assertIsNone(props['a_string'])
self.assertIsNone(props['a_list'])
self.assertIsNone(props['a_map'])
def test_format_resource_properties_direct_props(self):
props = self._get_formatted_resource_properties('resource2')
self.assertEqual('foobar', props['a_string'])
def test_format_resource_properties_get_attr(self):
props = self._get_formatted_resource_properties('resource3')
self.assertEqual('', props['a_string'])
def test_format_stack_resource_with_nested_stack(self):
res = self.stack['generic4']
nested_id = {'foo': 'bar'}
res.nested = mock.Mock()
res.nested.return_value.identifier.return_value = nested_id
formatted = api.format_stack_resource(res, False)
self.assertEqual(nested_id, formatted[rpc_api.RES_NESTED_STACK_ID])
def test_format_stack_resource_with_nested_stack_none(self):
res = self.stack['generic4']
res.nested = mock.Mock()
res.nested.return_value = None
resource_keys = set((
rpc_api.RES_CREATION_TIME,
rpc_api.RES_UPDATED_TIME,
rpc_api.RES_NAME,
rpc_api.RES_PHYSICAL_ID,
rpc_api.RES_ACTION,
rpc_api.RES_STATUS,
rpc_api.RES_STATUS_DATA,
rpc_api.RES_TYPE,
rpc_api.RES_ID,
rpc_api.RES_STACK_ID,
rpc_api.RES_STACK_NAME,
rpc_api.RES_REQUIRED_BY))
formatted = api.format_stack_resource(res, False)
self.assertEqual(resource_keys, set(six.iterkeys(formatted)))
def test_format_stack_resource_with_nested_stack_not_found(self):
res = self.stack['generic4']
self.patchobject(parser.Stack, 'load',
side_effect=exception.NotFound())
resource_keys = set((
rpc_api.RES_CREATION_TIME,
rpc_api.RES_UPDATED_TIME,
rpc_api.RES_NAME,
rpc_api.RES_PHYSICAL_ID,
rpc_api.RES_ACTION,
rpc_api.RES_STATUS,
rpc_api.RES_STATUS_DATA,
rpc_api.RES_TYPE,
rpc_api.RES_ID,
rpc_api.RES_STACK_ID,
rpc_api.RES_STACK_NAME,
rpc_api.RES_REQUIRED_BY))
formatted = api.format_stack_resource(res, False)
# 'nested_stack_id' is not in formatted
self.assertEqual(resource_keys, set(six.iterkeys(formatted)))
def test_format_stack_resource_with_nested_stack_empty(self):
res = self.stack['generic4']
nested_id = {'foo': 'bar'}
res.nested = mock.MagicMock()
res.nested.return_value.identifier.return_value = nested_id
res.nested.return_value.__len__.return_value = 0
formatted = api.format_stack_resource(res, False)
res.nested.return_value.identifier.assert_called_once_with()
self.assertEqual(nested_id, formatted[rpc_api.RES_NESTED_STACK_ID])
def test_format_stack_resource_required_by(self):
res1 = api.format_stack_resource(self.stack['generic1'])
res2 = api.format_stack_resource(self.stack['generic2'])
self.assertEqual(['generic2'], res1['required_by'])
self.assertEqual([], res2['required_by'])
def test_format_stack_resource_with_parent_stack(self):
res = self.stack['generic1']
res.stack.parent_resource_name = 'foobar'
formatted = api.format_stack_resource(res, False)
self.assertEqual('foobar', formatted[rpc_api.RES_PARENT_RESOURCE])
def test_format_event_identifier_uuid(self):
self._test_format_event('abc123yc-9f88-404d-a85b-531529456xyz')
def _test_format_event(self, event_id):
event = self._dummy_event(event_id)
event_keys = set((
rpc_api.EVENT_ID,
rpc_api.EVENT_STACK_ID,
rpc_api.EVENT_STACK_NAME,
rpc_api.EVENT_TIMESTAMP,
rpc_api.EVENT_RES_NAME,
rpc_api.EVENT_RES_PHYSICAL_ID,
rpc_api.EVENT_RES_ACTION,
rpc_api.EVENT_RES_STATUS,
rpc_api.EVENT_RES_STATUS_DATA,
rpc_api.EVENT_RES_TYPE,
rpc_api.EVENT_RES_PROPERTIES))
formatted = api.format_event(event)
self.assertEqual(event_keys, set(six.iterkeys(formatted)))
event_id_formatted = formatted[rpc_api.EVENT_ID]
event_identifier = identifier.EventIdentifier(
event_id_formatted['tenant'],
event_id_formatted['stack_name'],
event_id_formatted['stack_id'],
event_id_formatted['path'])
self.assertEqual(event_id, event_identifier.event_id)
@mock.patch.object(api, 'format_stack_resource')
def test_format_stack_preview(self, mock_fmt_resource):
def mock_format_resources(res, **kwargs):
return 'fmt%s' % res
mock_fmt_resource.side_effect = mock_format_resources
resources = [1, [2, [3]]]
self.stack.preview_resources = mock.Mock(return_value=resources)
stack = api.format_stack_preview(self.stack)
self.assertIsInstance(stack, dict)
self.assertIsNone(stack.get('status'))
self.assertIsNone(stack.get('action'))
self.assertIsNone(stack.get('status_reason'))
self.assertEqual('test_stack', stack['stack_name'])
self.assertIn('resources', stack)
resources = list(stack['resources'])
self.assertEqual('fmt1', resources[0])
resources = list(resources[1])
self.assertEqual('fmt2', resources[0])
resources = list(resources[1])
self.assertEqual('fmt3', resources[0])
kwargs = mock_fmt_resource.call_args[1]
self.assertTrue(kwargs['with_props'])
def test_format_stack(self):
self.stack.created_time = datetime(1970, 1, 1)
info = api.format_stack(self.stack)
aws_id = ('arn:openstack:heat::test_tenant_id:'
'stacks/test_stack/' + self.stack.id)
expected_stack_info = {
'capabilities': [],
'creation_time': '1970-01-01T00:00:00',
'description': 'No description',
'disable_rollback': True,
'notification_topics': [],
'stack_action': 'CREATE',
'stack_name': 'test_stack',
'stack_owner': 'test_username',
'stack_status': 'IN_PROGRESS',
'stack_status_reason': '',
'stack_user_project_id': None,
'outputs': [],
'template_description': 'No description',
'timeout_mins': None,
'tags': None,
'parameters': {
'AWS::Region': 'ap-southeast-1',
'AWS::StackId': aws_id,
'AWS::StackName': 'test_stack'},
'stack_identity': {
'path': '',
'stack_id': self.stack.id,
'stack_name': 'test_stack',
'tenant': 'test_tenant_id'},
'updated_time': None,
'parent': None}
self.assertEqual(expected_stack_info, info)
def test_format_stack_created_time(self):
self.stack.created_time = None
info = api.format_stack(self.stack)
self.assertIsNotNone(info['creation_time'])
def test_format_stack_updated_time(self):
self.stack.updated_time = None
info = api.format_stack(self.stack)
self.assertIsNone(info['updated_time'])
self.stack.updated_time = datetime(1970, 1, 1)
info = api.format_stack(self.stack)
self.assertEqual('1970-01-01T00:00:00', info['updated_time'])
@mock.patch.object(api, 'format_stack_outputs')
def test_format_stack_adds_outputs(self, mock_fmt_outputs):
mock_fmt_outputs.return_value = 'foobar'
self.stack.action = 'CREATE'
self.stack.status = 'COMPLETE'
info = api.format_stack(self.stack)
self.assertEqual('foobar', info[rpc_api.STACK_OUTPUTS])
@mock.patch.object(api, 'format_stack_outputs')
def test_format_stack_without_resolving_outputs(self, mock_fmt_outputs):
mock_fmt_outputs.return_value = 'foobar'
self.stack.action = 'CREATE'
self.stack.status = 'COMPLETE'
info = api.format_stack(self.stack, resolve_outputs=False)
self.assertIsNone(info.get(rpc_api.STACK_OUTPUTS))
def test_format_stack_outputs(self):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'generic': {'Type': 'GenericResourceType'}
},
'Outputs': {
'correct_output': {
'Description': 'Good output',
'Value': {'Fn::GetAtt': ['generic', 'Foo']}
},
'incorrect_output': {
'Value': {'Fn::GetAtt': ['generic', 'Bar']}
}
}
})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
tmpl, stack_id=str(uuid.uuid4()))
stack.action = 'CREATE'
stack.status = 'COMPLETE'
stack['generic'].action = 'CREATE'
stack['generic'].status = 'COMPLETE'
info = api.format_stack_outputs(stack, stack.outputs,
resolve_value=True)
expected = [{'description': 'No description given',
'output_error': 'The Referenced Attribute (generic Bar) '
'is incorrect.',
'output_key': 'incorrect_output',
'output_value': None},
{'description': 'Good output',
'output_key': 'correct_output',
'output_value': 'generic'}]
self.assertEqual(expected, sorted(info, key=lambda k: k['output_key'],
reverse=True))
def test_format_stack_outputs_unresolved(self):
tmpl = template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'generic': {'Type': 'GenericResourceType'}
},
'Outputs': {
'correct_output': {
'Description': 'Good output',
'Value': {'Fn::GetAtt': ['generic', 'Foo']}
},
'incorrect_output': {
'Value': {'Fn::GetAtt': ['generic', 'Bar']}
}
}
})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
tmpl, stack_id=str(uuid.uuid4()))
stack.action = 'CREATE'
stack.status = 'COMPLETE'
stack['generic'].action = 'CREATE'
stack['generic'].status = 'COMPLETE'
info = api.format_stack_outputs(stack, stack.outputs)
expected = [{'description': 'No description given',
'output_key': 'incorrect_output'},
{'description': 'Good output',
'output_key': 'correct_output'}]
self.assertEqual(expected, sorted(info, key=lambda k: k['output_key'],
reverse=True))
class FormatValidateParameterTest(common.HeatTestCase):
base_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test",
"Parameters" : {
%s
}
}
'''
base_template_hot = '''
{
"heat_template_version" : "2013-05-23",
"description" : "test",
"parameters" : {
%s
}
}
'''
scenarios = [
('simple',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('default',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"Default": "dummy"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'Default': 'dummy',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_length_constraint',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"MinLength": 4
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('max_length_constraint',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"MaxLength": 10
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MaxLength': 10,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_max_length_constraint',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"MinLength": 4,
"MaxLength": 10
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'MaxLength': 10,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_value_constraint',
dict(template=base_template,
param_name='MyNumber',
param='''
"MyNumber": {
"Type": "Number",
"Description": "A number",
"MinValue": 4
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MinValue': 4,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('max_value_constraint',
dict(template=base_template,
param_name='MyNumber',
param='''
"MyNumber": {
"Type": "Number",
"Description": "A number",
"MaxValue": 10
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MaxValue': 10,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('min_max_value_constraint',
dict(template=base_template,
param_name='MyNumber',
param='''
"MyNumber": {
"Type": "Number",
"Description": "A number",
"MinValue": 4,
"MaxValue": 10
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MinValue': 4,
'MaxValue': 10,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('allowed_values_constraint',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"AllowedValues": [ "foo", "bar", "blub" ]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'AllowedValues': ['foo', 'bar', 'blub'],
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('allowed_pattern_constraint',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"AllowedPattern": "[a-zA-Z0-9]+"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'AllowedPattern': "[a-zA-Z0-9]+",
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('multiple_constraints',
dict(template=base_template,
param_name='KeyName',
param='''
"KeyName": {
"Type": "String",
"Description": "Name of SSH key pair",
"MinLength": 4,
"MaxLength": 10,
"AllowedValues": [
"foo", "bar", "blub"
],
"AllowedPattern": "[a-zA-Z0-9]+"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'MaxLength': 10,
'AllowedValues': ['foo', 'bar', 'blub'],
'AllowedPattern': "[a-zA-Z0-9]+",
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('simple_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('default_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"default": "dummy"
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'Default': 'dummy',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_length_constraint_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "min": 4} }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('max_length_constraint_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "max": 10} }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MaxLength': 10,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_max_length_constraint_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "min":4, "max": 10} }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'MaxLength': 10,
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('min_value_constraint_hot',
dict(template=base_template_hot,
param_name='MyNumber',
param='''
"MyNumber": {
"type": "number",
"description": "A number",
"constraints": [
{ "range": { "min": 4} }
]
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MinValue': 4,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('max_value_constraint_hot',
dict(template=base_template_hot,
param_name='MyNumber',
param='''
"MyNumber": {
"type": "number",
"description": "A number",
"constraints": [
{ "range": { "max": 10} }
]
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MaxValue': 10,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('min_max_value_constraint_hot',
dict(template=base_template_hot,
param_name='MyNumber',
param='''
"MyNumber": {
"type": "number",
"description": "A number",
"constraints": [
{ "range": { "min": 4, "max": 10} }
]
}
''',
expected={
'Type': 'Number',
'Description': 'A number',
'MinValue': 4,
'MaxValue': 10,
'NoEcho': 'false',
'Label': 'MyNumber'
})
),
('allowed_values_constraint_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "allowed_values": [
"foo", "bar", "blub"
]
}
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'AllowedValues': ['foo', 'bar', 'blub'],
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('allowed_pattern_constraint_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "allowed_pattern": "[a-zA-Z0-9]+" }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'AllowedPattern': "[a-zA-Z0-9]+",
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('multiple_constraints_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "min": 4, "max": 10} },
{ "allowed_values": [
"foo", "bar", "blub"
]
},
{ "allowed_pattern": "[a-zA-Z0-9]+" }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'MaxLength': 10,
'AllowedValues': ['foo', 'bar', 'blub'],
'AllowedPattern': "[a-zA-Z0-9]+",
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('constraint_description_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "min": 4},
"description": "Big enough" }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'ConstraintDescription': 'Big enough',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('constraint_multiple_descriptions_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Name of SSH key pair",
"constraints": [
{ "length": { "min": 4},
"description": "Big enough." },
{ "allowed_pattern": "[a-zA-Z0-9]+",
"description": "Only letters." }
]
}
''',
expected={
'Type': 'String',
'Description': 'Name of SSH key pair',
'MinLength': 4,
'AllowedPattern': "[a-zA-Z0-9]+",
'ConstraintDescription': 'Big enough. Only letters.',
'NoEcho': 'false',
'Label': 'KeyName'
})
),
('constraint_custom_hot',
dict(template=base_template_hot,
param_name='KeyName',
param='''
"KeyName": {
"type": "string",
"description": "Public Network",
"constraints": [
{ "custom_constraint": "neutron.network" }
]
}
''',
expected={
'Type': 'String',
'Description': 'Public Network',
'NoEcho': 'false',
'Label': 'KeyName',
'CustomConstraint': 'neutron.network'
})
)
]
def test_format_validate_parameter(self):
"""Test format of a parameter."""
t = template_format.parse(self.template % self.param)
tmpl = template.Template(t)
tmpl_params = parameters.Parameters(None, tmpl)
tmpl_params.validate(validate_value=False)
param = tmpl_params.params[self.param_name]
param_formated = api.format_validate_parameter(param)
self.assertEqual(self.expected, param_formated)
class FormatSoftwareConfigDeploymentTest(common.HeatTestCase):
def _dummy_software_config(self):
config = mock.Mock()
self.now = timeutils.utcnow()
config.name = 'config_mysql'
config.group = 'Heat::Shell'
config.id = str(uuid.uuid4())
config.created_at = self.now
config.config = {
'inputs': [{'name': 'bar'}],
'outputs': [{'name': 'result'}],
'options': {},
'config': '#!/bin/bash\n'
}
return config
def _dummy_software_deployment(self):
config = self._dummy_software_config()
deployment = mock.Mock()
deployment.config = config
deployment.id = str(uuid.uuid4())
deployment.server_id = str(uuid.uuid4())
deployment.input_values = {'bar': 'baaaaa'}
deployment.output_values = {'result': '0'}
deployment.action = 'INIT'
deployment.status = 'COMPLETE'
deployment.status_reason = 'Because'
deployment.created_at = config.created_at
deployment.updated_at = config.created_at
return deployment
def test_format_software_config(self):
config = self._dummy_software_config()
result = api.format_software_config(config)
self.assertIsNotNone(result)
self.assertEqual([{'name': 'bar'}], result['inputs'])
self.assertEqual([{'name': 'result'}], result['outputs'])
self.assertEqual([{'name': 'result'}], result['outputs'])
self.assertEqual({}, result['options'])
self.assertEqual(self.now.isoformat(), result['creation_time'])
def test_format_software_config_none(self):
self.assertIsNone(api.format_software_config(None))
def test_format_software_deployment(self):
deployment = self._dummy_software_deployment()
result = api.format_software_deployment(deployment)
self.assertIsNotNone(result)
self.assertEqual(deployment.id, result['id'])
self.assertEqual(deployment.config.id, result['config_id'])
self.assertEqual(deployment.server_id, result['server_id'])
self.assertEqual(deployment.input_values, result['input_values'])
self.assertEqual(deployment.output_values, result['output_values'])
self.assertEqual(deployment.action, result['action'])
self.assertEqual(deployment.status, result['status'])
self.assertEqual(deployment.status_reason, result['status_reason'])
self.assertEqual(self.now.isoformat(), result['creation_time'])
self.assertEqual(self.now.isoformat(), result['updated_time'])
def test_format_software_deployment_none(self):
self.assertIsNone(api.format_software_deployment(None))
class TestExtractArgs(common.HeatTestCase):
def test_timeout_extract(self):
p = {'timeout_mins': '5'}
args = api.extract_args(p)
self.assertEqual(5, args['timeout_mins'])
def test_timeout_extract_zero(self):
p = {'timeout_mins': '0'}
args = api.extract_args(p)
self.assertNotIn('timeout_mins', args)
def test_timeout_extract_garbage(self):
p = {'timeout_mins': 'wibble'}
args = api.extract_args(p)
self.assertNotIn('timeout_mins', args)
def test_timeout_extract_none(self):
p = {'timeout_mins': None}
args = api.extract_args(p)
self.assertNotIn('timeout_mins', args)
def test_timeout_extract_negative(self):
p = {'timeout_mins': '-100'}
error = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid timeout value', six.text_type(error))
def test_timeout_extract_not_present(self):
args = api.extract_args({})
self.assertNotIn('timeout_mins', args)
def test_adopt_stack_data_extract_present(self):
p = {'adopt_stack_data': json.dumps({'Resources': {}})}
args = api.extract_args(p)
self.assertTrue(args.get('adopt_stack_data'))
def test_invalid_adopt_stack_data(self):
params = {'adopt_stack_data': json.dumps("foo")}
exc = self.assertRaises(ValueError, api.extract_args, params)
self.assertIn('Invalid adopt data', six.text_type(exc))
def test_adopt_stack_data_extract_not_present(self):
args = api.extract_args({})
self.assertNotIn('adopt_stack_data', args)
def test_disable_rollback_extract_true(self):
args = api.extract_args({'disable_rollback': True})
self.assertIn('disable_rollback', args)
self.assertTrue(args.get('disable_rollback'))
args = api.extract_args({'disable_rollback': 'True'})
self.assertIn('disable_rollback', args)
self.assertTrue(args.get('disable_rollback'))
args = api.extract_args({'disable_rollback': 'true'})
self.assertIn('disable_rollback', args)
self.assertTrue(args.get('disable_rollback'))
def test_disable_rollback_extract_false(self):
args = api.extract_args({'disable_rollback': False})
self.assertIn('disable_rollback', args)
self.assertFalse(args.get('disable_rollback'))
args = api.extract_args({'disable_rollback': 'False'})
self.assertIn('disable_rollback', args)
self.assertFalse(args.get('disable_rollback'))
args = api.extract_args({'disable_rollback': 'false'})
self.assertIn('disable_rollback', args)
self.assertFalse(args.get('disable_rollback'))
def test_disable_rollback_extract_bad(self):
self.assertRaises(ValueError, api.extract_args,
{'disable_rollback': 'bad'})
def test_tags_extract(self):
p = {'tags': ["tag1", "tag2"]}
args = api.extract_args(p)
self.assertEqual(['tag1', 'tag2'], args['tags'])
def test_tags_extract_not_present(self):
args = api.extract_args({})
self.assertNotIn('tags', args)
def test_tags_extract_not_map(self):
p = {'tags': {"foo": "bar"}}
exc = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid tags, not a list: ', six.text_type(exc))
def test_tags_extract_not_string(self):
p = {'tags': ["tag1", 2]}
exc = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid tag, "2" is not a string', six.text_type(exc))
def test_tags_extract_over_limit(self):
p = {'tags': ["tag1", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"]}
exc = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid tag, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" is longer '
'than 80 characters', six.text_type(exc))
def test_tags_extract_comma(self):
p = {'tags': ["tag1", 'tag2,']}
exc = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid tag, "tag2," contains a comma',
six.text_type(exc))
class TranslateFilterTest(common.HeatTestCase):
scenarios = [
(
'single+single',
dict(inputs={'stack_status': 'COMPLETE', 'status': 'FAILED'},
expected={'status': ['COMPLETE', 'FAILED']})
), (
'none+single',
dict(inputs={'name': 'n1'},
expected={'name': 'n1'})
), (
'single+none',
dict(inputs={'stack_name': 'n1'},
expected={'name': 'n1'})
), (
'none+list',
dict(inputs={'action': ['a1', 'a2']},
expected={'action': ['a1', 'a2']})
), (
'list+none',
dict(inputs={'stack_action': ['a1', 'a2']},
expected={'action': ['a1', 'a2']})
), (
'single+list',
dict(inputs={'stack_owner': 'u1', 'username': ['u2', 'u3']},
expected={'username': ['u1', 'u2', 'u3']})
), (
'list+single',
dict(inputs={'parent': ['s1', 's2'], 'owner_id': 's3'},
expected={'owner_id': ['s1', 's2', 's3']})
), (
'list+list',
dict(inputs={'stack_name': ['n1', 'n2'], 'name': ['n3', 'n4']},
expected={'name': ['n1', 'n2', 'n3', 'n4']})
), (
'full_status_split',
dict(inputs={'stack_status': 'CREATE_COMPLETE'},
expected={'action': 'CREATE', 'status': 'COMPLETE'})
), (
'full_status_split_merge',
dict(inputs={'stack_status': 'CREATE_COMPLETE',
'status': 'CREATE_FAILED'},
expected={'action': 'CREATE',
'status': ['COMPLETE', 'FAILED']})
), (
'action_status_merge',
dict(inputs={'action': ['UPDATE', 'CREATE'],
'status': 'CREATE_FAILED'},
expected={'action': ['CREATE', 'UPDATE'],
'status': 'FAILED'})
)
]
def test_stack_filter_translate(self):
actual = api.translate_filters(self.inputs)
self.assertEqual(self.expected, actual)
class ParseStatusTest(common.HeatTestCase):
scenarios = [
(
'single_bogus',
dict(inputs='bogus status',
expected=(set(), set()))
), (
'list_bogus',
dict(inputs=['foo', 'bar'],
expected=(set(), set()))
), (
'single_partial',
dict(inputs='COMPLETE',
expected=(set(), set(['COMPLETE'])))
), (
'multi_partial',
dict(inputs=['FAILED', 'COMPLETE'],
expected=(set(), set(['FAILED', 'COMPLETE'])))
), (
'multi_partial_dup',
dict(inputs=['FAILED', 'FAILED'],
expected=(set(), set(['FAILED'])))
), (
'single_full',
dict(inputs=['DELETE_FAILED'],
expected=(set(['DELETE']), set(['FAILED'])))
), (
'multi_full',
dict(inputs=['DELETE_FAILED', 'CREATE_COMPLETE'],
expected=(set(['CREATE', 'DELETE']),
set(['COMPLETE', 'FAILED'])))
), (
'mix_bogus_partial',
dict(inputs=['delete_failed', 'COMPLETE'],
expected=(set(), set(['COMPLETE'])))
), (
'mix_bogus_full',
dict(inputs=['delete_failed', 'action_COMPLETE'],
expected=(set(['action']), set(['COMPLETE'])))
), (
'mix_bogus_full_incomplete',
dict(inputs=['delete_failed', '_COMPLETE'],
expected=(set(), set(['COMPLETE'])))
), (
'mix_partial_full',
dict(inputs=['FAILED', 'b_COMPLETE'],
expected=(set(['b']),
set(['COMPLETE', 'FAILED'])))
), (
'mix_full_dup',
dict(inputs=['a_FAILED', 'a_COMPLETE'],
expected=(set(['a']),
set(['COMPLETE', 'FAILED'])))
), (
'mix_full_dup_2',
dict(inputs=['a_FAILED', 'b_FAILED'],
expected=(set(['a', 'b']), set(['FAILED'])))
)
]
def test_stack_parse_status(self):
actual = api._parse_object_status(self.inputs)
self.assertEqual(self.expected, actual)
|
|
import warnings
from collections import namedtuple, defaultdict
#Node = namedtuple('Node', ('id', 'data', 'edges', 'in_edges'))
#Edge = namedtuple('Edge', ('start', 'end', 'label', 'data', 'directed'))
class MiniGraphError(Exception): pass
class MiniGraphWarning(Warning): pass
# todo: consider functools.lru_cache for the retrieval methods
class MiniGraph(object):
__slots__ = ('_graph',)
def __init__(self, nodes=None, edges=None):
self._graph = {}
# nodes
if nodes is None:
nodes = {}
self.add_nodes(nodes)
# edges
if edges is None:
edges = {}
self.add_edges(edges)
@classmethod
def fast_init(cls, nodes=None, edges=None):
"""
Initializes the graph without argument checking of edges, which
means that all edges must be 5-tuples of:
(start, end, label, data, directed)
"""
mg = cls(nodes)
if edges is not None:
mg._fast_add_edges1(edges)
return mg
@classmethod
def fast_init2(cls, nodes, edges=None):
"""
Initializes the graph without argument checking of edges, which
means that all edges must be 5-tuples of:
(start, end, label, data, directed)
Furthermore, all edges must only uses nodes specified in the
nodes argument.
"""
mg = cls(nodes)
if edges is not None:
mg._fast_add_edges2(edges)
return mg
def __getitem__(self, idx):
"""
Fancy graph queries:
- if idx is an integer, return the node given by idx
- if idx is a slice, return the edges matching
start:end:label. Note that not specifying the label uses
the label of None, which is a valid label. If you want to
consider all labels, use Ellipsis: (g[0:1:...]). All edges
can be retrieved with g[::...].
"""
try:
start, end, label = idx.start, idx.stop, idx.step
if label is Ellipsis:
return self.find_edges(start, end)
else:
return self.find_edges(start, end, label=label)
except AttributeError:
return (idx, self.nodes[idx])
def add_node(self, nodeid, data=None):
# if nodeid in self.nodes:
# raise MiniGraphError('Node already exists: {}'.format(nodeid))
#self.nodes[nodeid] = dict(data or [])
if data is None:
data = {}
if nodeid in self._graph:
self._graph[nodeid][1].update(data)
else:
self._graph[nodeid] = (nodeid, data, {}, {})
def add_nodes(self, nodes):
for node in nodes:
try:
node, data = node
except TypeError:
data = {}
self.add_node(node, data=data)
def remove_node(self, nodeid):
g = self._graph
if nodeid not in g:
raise KeyError(nodeid)
_prune_edges(g, nodeid)
del g[nodeid]
def node(self, nodeid):
return self._graph[nodeid]
def nodes(self):
return [(nid, n[1]) for nid, n in self._graph.items()]
def add_edge(self, start, end, label=None, data=None, directed=True):
self.add_edges([(start, end, label, data, directed)])
#@profile
def add_edges(self, edges):
g = self._graph
add_edge = _add_edge
for edge in edges:
edgelen = len(edge)
if edgelen == 5:
start, end, label, data, directed = edge
elif edgelen == 2:
start, end = edge; label = data = None; directed = True
elif edgelen == 4:
start, end, label, data = edge; directed = True
elif edgelen == 3:
start, end, label = edge; data = None; directed = True
else:
raise MiniGraphError('Invalid edge: {}'.format(edge))
if data is None: data = {}
if start not in g: g[start] = (start, {}, {}, {})
if end not in g: g[end] = (end, {}, {}, {})
e = (start, end, label, data, directed)
#add_edge(g[start][2], label, end, e)
d = g[start][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if end not in innerdict:
innerdict[end] = e
else:
if innerdict[end][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[end][3].update(e[3])
#add_edge(g[end][3], label, start, e)
d = g[end][3]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if start not in innerdict:
innerdict[start] = e
else:
if innerdict[start][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[start][3].update(e[3])
if directed is False:
#add_edge(g[end][2], label, start, e)
d = g[end][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if start not in innerdict:
innerdict[start] = e
else:
if innerdict[start][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[start][3].update(e[3])
#add_edge(g[start][3], label, end, e)
d = g[start][3]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if end not in innerdict:
innerdict[end] = e
else:
if innerdict[end][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[end][3].update(e[3])
def _fast_add_edges1(self, edges):
g = self._graph
add_edge = _add_edge
for e in edges:
start = e[0]
end = e[1]
label = e[2]
directed = e[4]
if start not in g:
g[start] = (start, {}, {}, {})
if end not in g:
g[end] = (end, {}, {}, {})
#add_edge(g[start][2], label, end, e)
d = g[start][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if end not in innerdict:
innerdict[end] = e
else:
if innerdict[end][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[end][3].update(e[3])
#add_edge(g[end][3], label, start, e)
d = g[end][3]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if start not in innerdict:
innerdict[start] = e
else:
if innerdict[start][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[start][3].update(e[3])
if directed is False:
#add_edge(g[end][2], label, start, e)
d = g[end][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if start not in innerdict:
innerdict[start] = e
else:
if innerdict[start][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[start][3].update(e[3])
#add_edge(g[start][3], label, end, e)
d = g[start][3]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if end not in innerdict:
innerdict[end] = e
else:
if innerdict[end][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[end][3].update(e[3])
def _fast_add_edges2(self, edges):
g = self._graph
add_edge = _add_edge
for e in edges:
start = e[0]
end = e[1]
label = e[2]
directed = e[4]
add_edge(g[start][2], label, end, e)
add_edge(g[end][3], label, start, e)
if directed is False:
add_edge(g[end][2], label, start, e)
add_edge(g[start][3], label, end, e)
def remove_edge(self, start, end, label=None, directed=None):
g = self._graph
if start not in g: raise KeyError(start)
edges = g[start][2]
if label not in edges: raise KeyError(label)
if end not in edges[label]: raise KeyError(end)
_dir = g[start][2][label][end][4]
if directed is not None:
assert _dir == directed
try:
in_edges = g[end][3]
del edges[label][end]
if len(edges[label]) == 0:
del edges[label]
del in_edges[label][start]
if len(in_edges[label]) == 0:
del in_edges[label]
# undirected links are listed twice (except simple loops)
if not _dir and start != end:
edges = g[end][2]
in_edges = g[start][3]
del edges[label][start]
if len(edges[label]) == 0:
del edges[label]
del in_edges[label][end]
if len(in_edges[label]) == 0:
del in_edges[label]
except KeyError:
raise
warnings.warn(
'Unexpected KeyError while removing {} edge ({}, {}, {})'
.format('directed' if directed else 'undirected',
start, end, label),
MiniGraphWarning
)
def edge(self, start, end, label=None, directed=None):
e = self._graph[start][2][label][end]
if directed is not None:
assert e[4] == directed
return e
def edges(self):
return [e
for nid, n in self._graph.items()
for ed in n[2].values()
for e in ed.values()
# only include undirected links from the source node (whatever
# the source node was when it was instantiated)
if e[4] or e[0] == nid
]
def find_edges(self, start=None, end=None, **kwargs):
if start is Ellipsis: start = None
if end is Ellipsis: end = None
# get appropriate edge dicts (both if 'directed' is unspecified)
if 'directed' in kwargs:
if kwargs['directed'] is True: xs = [self.edges]
elif kwargs['directed'] is False: xs = [self.uedges]
else: xs = [self.edges, self.uedges]
# filter by start, if specified
if start is None:
xs = [(s, sd) for d in xs for s, sd in d.items()]
else:
xs = [(start, d[start]) for d in xs if start in d]
# filter by label, if specified
try:
lbl = kwargs['label']
xs = ((s, lbl, sd[lbl]) for s, sd in xs if lbl in sd)
except KeyError:
xs = ((s, lbl, ld) for s, sd in xs for lbl, ld in sd.items())
# filter by end, if specified
if end is None:
xs = ((s, e, lbl, d) for s, lbl, ld in xs for e, d in ld.items())
else:
xs = ((s, end, lbl, ld[end]) for s, lbl, ld in xs if end in ld)
# filter by data, if specified
try:
data = kwargs['data']
xs = filter(
lambda s,e,l,d: all(d.get(k) == v for k, v in data.items()),
xs
)
except KeyError:
pass
return list(xs)
def order(self):
return len(self._graph)
def size(self):
return len(self.edges())
def degree(self, nodeid):
n = self._graph[nodeid]
return (
sum(len(ed) for ed in n[2].values()) +
len([
e for ed in n[3].values() for e in ed.values()
# only count undirected edges here if they are simple loops
if e[4] or e[0] == e[1]
])
)
def out_degree(self, nodeid):
n = self._graph[nodeid]
return sum(len(ed) for ed in n[2].values())
# return (
# sum(len(ed) for ed in n[2].values()) +
# len([e for ed in n[3].values()
# for e in ed.values()
# if e[4] == False and e[0] != e[1]])
# )
def in_degree(self, nodeid):
n = self._graph[nodeid]
return sum(len(ed) for ed in n[3].values())
# return (
# sum(len(ed) for ed in n[3].values()) +
# len([e for ed in n[2].values()
# for e in ed.values()
# if e[4] == False and e[0] != e[1]])
# )
def subgraph(self, nodeids):
g = self._graph
nidset = set(nodeids)
return MiniGraph(
nodes=[(nid, g[nid][1]) for nid in nodeids],
edges=[e for start in nodeids
for label, ed in g[start][2].items()
for end, e in ed.items() if end in nidset]
)
# def connected(self):
# nodeset = set()
# remaining = set(self.nodes.keys())
# for start in self.nodes:
# if node not in nodeset:
# nodeset.add(node)
# def _degree(nodeid, edgedicts):
# ds = []
# for d in edgedicts:
# if nodeid in d:
# ds.append(d[nodeid])
# return sum(len(ld) for d in ds for ld in d.values())
def _prune_edges(graph, nodeid):
g = graph[nodeid]
# forward links; remove reverse links on ends
edict = defaultdict(list)
for ed in g[2].values():
for e in ed.values():
if e[1] != nodeid: # this will get removed anyway
edict[e[1]].append(e)
for end, es in edict.items():
ld = graph[end][3]
for e in es:
del ld[e[2]][e[0]]
if len(ld[e[2]]) == 0:
del ld[e[2]]
# backward links; remove forward links on starts
edict = defaultdict(list)
for ed in g[3].values():
for e in ed.values():
if e[0] != nodeid: # this will get removed anyway
edict[e[0]].append(e)
for start, es in edict.items():
ld = graph[start][2]
for e in es:
del ld[e[2]][e[1]]
if len(ld[e[2]]) == 0:
del ld[e[2]]
# for a bit more speed, this can be inlined directly
def _add_edge(d, label, idx, e):
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if idx not in innerdict:
innerdict[idx] = e
else:
if innerdict[idx][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[idx][3].update(e[3])
|
|
import tkinter as tk
from tkinter import ttk
import tkinter.font as tkFont
import tkinter.filedialog as tkfd
import tkinter.messagebox as tkmb
import os
import string
import pkgutil
from sol.config import GlobalConfig
C = GlobalConfig()
import sol.themes
class SetupGui:
def __init__(self, rootwin=None, parent=None):
self.rootwin, self.parent = rootwin, parent
self.root_frame = tk.Toplevel(takefocus=True)
self.root_frame.title('setup')
self.root_frame.focus_force()
self.menubar = tk.Menu(self.root_frame)
self.menubar.add_command(
label="undo all changes", command=self.undo_all)
self.menubar.add_command(
label="load defaults", command=self.reset_default)
self.root_frame.config(menu=self.menubar)
if self.rootwin is not None:
self.rootwin.withdraw()
self.major_changes = []
def close_fun(*args):
self.close()
self.rootwin.destroy()
else:
self.parent.root.call('wm', 'attributes', '.', '-topmost', '0')
# baddies
self.major_changes = ['NO_LAYERS', 'NO_Q', 'NO_LP', 'MODEL_SELECT',
'OSC_PORT', 'MTGUI_ENABLED', 'MTGUI_IP_ADDR',
'XTERNAL_PLAYER_SELECT', 'SELECTED_THEME']
def close_fun(*args):
self.close()
self.parent.setup_gui = None
self.parent.toggle_on_top()
self.root_frame.protocol("WM_DELETE_WINDOW", close_fun)
# self.generate_font_measurements()
self.instruction_to_fun = {
'folder_select': self.add_folder_select,
'file_select': self.add_file_select,
'int_choice': self.add_int_choice,
'bool_choice': self.add_bool_choice,
'float_choice': self.add_float_choice,
'list_choice': self.add_list_choice,
'list_enter': self.add_list_enter,
'str_enter': self.add_str_enter,
}
self.name_to_var = {}
self.name_to_frame = {}
self.config_book = ttk.Notebook(self.root_frame)
self.config_book.pack(expand=True, fill=tk.BOTH)
# generate theme info
sol_theme_path = os.path.dirname(sol.themes.__file__)
self.theme_names = [name for _, name, _ in pkgutil.iter_modules([sol_theme_path])]
# tabs
self.reset()
self.generate_font_measurements()
def reset(self, to_default=False):
if to_default:
C.load()
cur_no_tabs = len(self.config_book.tabs())
reopen_tab = -1
if cur_no_tabs > 0:
cur_tab_id = self.config_book.select()
reopen_tab = self.config_book.index(cur_tab_id)
for _ in range(cur_no_tabs):
self.config_book.forget(0)
self.param_tab = None
self.video_tab = None
self.gui_tab = None
tab_padding = '5 5 5 5'
self.param_tab = ttk.Frame(self.root_frame, padding=tab_padding)
self.network_tab = ttk.Frame(self.root_frame, padding=tab_padding)
self.video_tab = ttk.Frame(self.root_frame, padding=tab_padding)
self.gui_tab = ttk.Frame(self.root_frame, padding=tab_padding)
for tab_name in [(self.param_tab, 'sol config'),
(self.network_tab, 'network config'),
(self.video_tab, 'video player'),
(self.gui_tab, 'gui config')]:
self.config_book.add(tab_name[0], text=tab_name[1])
# construction instructions
# instruction sets are (instruction, hint text, variable name, [any
# extra variables])
param_tab_instr = [
('label_frame', 'default folders', '', []),
('folder_select', 'savedata directory', 'SAVEDATA_DIR', []),
('folder_select', 'screenshot directory', 'SCROT_DIR', []),
('label_frame', 'sol parameters', '', []),
('int_choice', '# of layers', 'NO_LAYERS', []),
('int_choice', '# of cue points', 'NO_Q', []),
('int_choice', '# of loop ranges', 'NO_LP', []),
('list_enter', 'ignored directories', 'IGNORED_DIRS', []),
('bool_choice', 'print debug info', 'DEBUG', []),
('label_frame', 'midi options', '', []),
('bool_choice', 'midi enabled', 'MIDI_ENABLED', []),
('bool_choice', 'separate keys for cue/loop points', 'SEPARATE_QP_LP', []),
('bool_choice', 'separate keys for de/activation', 'SEPARATE_DELETE', []),
('float_choice', 'default control sensitivity', 'DEFAULT_SENSITIVITY', []),
]
network_tab_instr = [
('label_frame', 'server config', '', []),
('int_choice', 'osc port', 'OSC_PORT', []),
('label_frame', 'multitouch client', '', []),
('bool_choice', 'enabled', 'MTGUI_ENABLED', []),
('str_enter', 'receiving ip', 'MTGUI_IP_ADDR', [])
]
video_tab_instr = [
('label_frame', 'video software config', '', []),
('list_choice', 'vj software', 'MODEL_SELECT', C.MODEL_SELECT_OPTIONS),
('folder_select', 'composition directory', 'RESOLUME_SAVE_DIR', []),
('list_enter', 'supported filetypes', 'SUPPORTED_FILETYPES', []),
('label_frame', 'external player config', '', []),
('list_choice', 'external player', 'XTERNAL_PLAYER_SELECT',
C.EXTERNAL_PLAYER_SELECT_OPTIONS),
('file_select', 'mpv script', 'MEMEPV_SCRIPT_PATH', []),
('str_enter', 'external command', 'EXTERNAL_PLAYER_COMMAND', []),
('label_frame', 'ffmpeg options', '', []),
('folder_select', 'ffmpeg directory (leave blank if in path)',
'FFMPEG_PATH', []),
('int_choice', '# of thumbnails to generate', 'NO_FRAMES', []),
('int_choice', 'thumbnail width', 'THUMBNAIL_WIDTH', []),
]
gui_tab_instr = [
('label_frame', 'sol options', '', []),
('list_choice', 'theme', 'SELECTED_THEME', self.theme_names),
('bool_choice', 'always on top', 'ALWAYS_ON_TOP', []),
('label_frame', 'thumbnail options', '', []),
('int_choice', 'display width', 'THUMB_W', []),
('int_choice', 'hover refresh interval (ms)', 'REFRESH_INTERVAL', []),
]
self.compile_config_page(param_tab_instr, self.param_tab)
self.compile_config_page(network_tab_instr, self.network_tab)
self.compile_config_page(video_tab_instr, self.video_tab)
self.compile_config_page(gui_tab_instr, self.gui_tab)
self.name_to_var['NO_LP'][0].trace('w', self.loop_lim_check)
if reopen_tab >= 0:
self.config_book.select(self.config_book.tabs()[reopen_tab])
self.root_frame.update_idletasks()
self.root_frame.after_idle(
lambda: self.root_frame.minsize(max(500, self.root_frame.winfo_width()),
self.root_frame.winfo_height()))
def undo_all(self):
self.reset()
def reset_default(self):
self.reset(True)
def close(self):
type_to_fun = {
'int': int,
'bool': bool,
'str': str,
'float': float,
'list': lambda sl: [s.strip() for s in sl.split(',')]
}
any_major = False
for k, (v_var, v_type) in self.name_to_var.items():
try:
new_val = type_to_fun[v_type](v_var.get())
if k in self.major_changes:
if C.dict[k] != new_val:
any_major = True
C.dict[k] = new_val
except:
pass
C.save()
if any_major:
tkmb.showwarning(
'', 'you may have to restart sol\nfor your changes to take effect')
self.root_frame.destroy()
def generate_font_measurements(self):
font = tkFont.Font()
# height
C.dict['FONT_HEIGHT'] = font.metrics("linespace")
# measure font widths
char_widths = {}
for c in string.printable:
char_widths[c] = font.measure(c)
if 'FONT_WIDTHS' in C.dict:
for k, v in char_widths.items():
C.dict['FONT_WIDTHS'][k] = v
else:
C.dict['FONT_WIDTHS'] = char_widths
count, running_sum = 0, 0
for _, v in C.dict['FONT_WIDTHS'].items():
count += 1
running_sum += v
C.dict['FONT_AVG_WIDTH'] = running_sum / count
def hide_unhide(self, selection, var_names):
keys_we_want = []
for k in self.name_to_frame.keys():
if '_' in k:
if any([v in k for v in var_names]):
keys_we_want.append(k)
for k in keys_we_want:
if selection in k:
self.name_to_frame[k].pack(
side=tk.TOP, expand=False, fill=tk.X, anchor='n')
else:
self.name_to_frame[k].pack_forget()
def compile_config_page(self, instruction_set, parent_tab):
last_label_frame = None
starting_optionals = []
for instruction in instruction_set:
instr_type, instr_text, instr_varn, instr_extr = instruction
if instr_type == 'label_frame':
last_label_frame = self.add_label_frame(instr_text, parent_tab)
elif instr_type in self.instruction_to_fun:
starting_choice = None
if instr_varn in C.dict:
starting_choice = C.dict[instr_varn]
if last_label_frame is not None:
new_var, var_type, new_frame = self.instruction_to_fun[instr_type](
instr_text, last_label_frame, starting_choice, instr_extr)
self.name_to_var[instr_varn] = (new_var, var_type)
self.name_to_frame[instr_varn] = new_frame
if instr_type == 'list_choice':
starting_optionals.append(
(starting_choice, instr_extr))
for sop in starting_optionals:
# print(sop)
self.hide_unhide(*sop)
#######################
# COMPILER HELPER FUNS
def add_label_frame(self, frame_name, parent_tab):
new_label_frame = ttk.LabelFrame(
parent_tab, text=frame_name, padding='5 5 5 5')
new_label_frame.pack(side=tk.TOP, expand=False, fill=tk.X, anchor='n')
return new_label_frame
def add_choice_row(self, parent_frame, hint_text):
new_frame = ttk.Frame(parent_frame)
new_frame.pack(fill=tk.X)
desc_label = ttk.Label(new_frame, text='{} :'.format(
hint_text), anchor='w', padding='0 5 0 5')
desc_label.pack(side=tk.LEFT)
return new_frame
def add_folder_select(self, hint_text, parent_frame, starting_choice=None, extra_args=None):
new_frame = self.add_choice_row(parent_frame, hint_text)
new_var = tk.StringVar()
if starting_choice is not None:
new_var.set(str(starting_choice))
def change_folder():
ask_fun = tkfd.askdirectory
new_folder_path = ask_fun(
parent=parent_frame, title='select folder', mustexist=True)
if new_folder_path:
new_folder_path = os.sep.join(new_folder_path.split('/'))
new_var.set(new_folder_path)
dot_but = ttk.Button(new_frame, text='..', command=change_folder, width=1, takefocus=False)
dot_but.pack(side=tk.RIGHT, anchor='e')
current_path_label = ttk.Label(
new_frame, textvar=new_var, anchor='w', relief='sunken')
current_path_label.pack(side=tk.RIGHT, fill=tk.X,
anchor='e', expand=True)
return new_var, 'str', new_frame
def add_file_select(self, hint_text, parent_frame, starting_choice=None, extra_args=None):
new_frame = self.add_choice_row(parent_frame, hint_text)
new_var = tk.StringVar()
if starting_choice is not None:
new_var.set(str(starting_choice))
def change_file():
ask_fun = tkfd.askopenfilename
new_file_path = ask_fun(parent=parent_frame, title='select file')
if new_file_path:
new_file_path = os.sep.join(new_file_path.split('/'))
new_var.set(new_file_path)
dot_but = ttk.Button(new_frame, text='..', command=change_file, width=1, takefocus=False)
dot_but.pack(side=tk.RIGHT, anchor='e')
current_path_label = ttk.Label(
new_frame, textvar=new_var, anchor='w', relief='sunken')
current_path_label.pack(side=tk.RIGHT, fill=tk.X,
anchor='e', expand=True)
return new_var, 'str', new_frame
def add_int_choice(self, hint_text, parent_frame, starting_choice=None, extra_args=None):
new_frame = self.add_choice_row(parent_frame, hint_text)
new_var = tk.StringVar()
if starting_choice is not None:
new_var.set(str(starting_choice))
no_entry = tk.Spinbox(new_frame, from_=0, to=9999,
textvariable=new_var, justify='left', width=5)
no_entry.pack(side=tk.RIGHT, anchor='e')
return new_var, 'int', new_frame
def add_float_choice(self, hint_text, parent_frame, starting_choice=None, extra_args=None):
new_frame = self.add_choice_row(parent_frame, hint_text)
new_var = tk.StringVar()
if starting_choice is not None:
new_var.set(str(starting_choice))
no_entry = tk.Spinbox(new_frame, from_=0, to=2, increment=0.005,
textvariable=new_var, justify=tk.RIGHT, width=5)
no_entry.pack(side=tk.RIGHT, anchor='e')
return new_var, 'float', new_frame
def add_bool_choice(self, hint_text, parent_frame, starting_choice=None, extra_args=None):
new_frame = self.add_choice_row(parent_frame, hint_text)
new_var = tk.IntVar()
if starting_choice is not None:
new_var.set(int(starting_choice))
check_but = ttk.Checkbutton(new_frame, variable=new_var, takefocus=False)
check_but.pack(side=tk.RIGHT, anchor='e')
return new_var, 'bool', new_frame
def add_list_choice(self, hint_text, parent_frame, starting_choice=None, extra_args=None):
new_frame = self.add_choice_row(parent_frame, hint_text)
new_var = tk.StringVar()
pos_values = ' '.join(extra_args)
selector = ttk.Combobox(new_frame, textvariable=new_var, values=pos_values)
selector.config(state='readonly')
selector.pack(side=tk.RIGHT, anchor='e')
if starting_choice is not None:
new_var.set(str(starting_choice))
def gen_hide_callback():
dis_var = new_var
x_args = extra_args
def callback(*args):
self.hide_unhide(dis_var.get(), x_args)
return callback
hide_cb = gen_hide_callback()
new_var.trace("w", hide_cb)
return new_var, 'str', new_frame
def add_list_enter(self, hint_text, parent_frame, starting_choice=None, extra_args=None):
new_frame = self.add_choice_row(parent_frame, hint_text)
new_var = tk.StringVar()
list_entry = ttk.Entry(new_frame, textvariable=new_var, justify="left")
list_entry.pack(side=tk.RIGHT, fill=tk.X, anchor='e', expand=True)
if starting_choice is not None:
starting_text = ", ".join(starting_choice)
new_var.set(starting_text)
return new_var, 'list', new_frame
def add_str_enter(self, hint_text, parent_frame, starting_choice=None, extra_args=None):
new_frame = self.add_choice_row(parent_frame, hint_text)
new_var = tk.StringVar()
if starting_choice is not None:
new_var.set(str(starting_choice))
str_entry = ttk.Entry(new_frame, textvariable=new_var, justify="left")
str_entry.pack(side=tk.RIGHT, fill=tk.X, anchor='e', expand=True)
return new_var, 'str', new_frame
################
# EXTRA CHECKS
def loop_lim_check(self, *args):
try:
no_lp, no_qp = int(self.name_to_var['NO_LP'][0].get()), int(self.name_to_var['NO_Q'][0].get())
if no_lp > no_qp:
self.name_to_var['NO_LP'][0].set(no_qp)
except:
pass
if __name__ == '__main__':
root = tk.Tk()
root.title('sol')
SetupGui(root)
root.mainloop()
|
|
import json
import random
import uuid
import zipfile
from django import forms
from django.conf import settings
from django.core.cache import cache
from django.db.models import Q
from django.utils.translation import ugettext
import olympia.core.logger
from olympia import amo
from olympia.lib.cache import memoize, memoize_key
from olympia.amo.utils import normalize_string
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.discovery.utils import call_recommendation_server
from olympia.translations.fields import LocaleList, LocaleValidationError
log = olympia.core.logger.getLogger('z.redis')
def generate_addon_guid():
return '{%s}' % str(uuid.uuid4())
def clear_get_featured_ids_cache(*args, **kwargs):
cache_key = memoize_key('addons:featured', *args, **kwargs)
cache.delete(cache_key)
@memoize('addons:featured', timeout=60 * 10)
def get_featured_ids(app=None, lang=None, type=None, types=None):
from olympia.addons.models import Addon
ids = []
is_featured = Q(collections__featuredcollection__isnull=False)
if app:
is_featured &= Q(collections__featuredcollection__application=app.id)
qs = Addon.objects.valid()
if type:
qs = qs.filter(type=type)
elif types:
qs = qs.filter(type__in=types)
if lang:
has_locale = qs.filter(
is_featured &
Q(collections__featuredcollection__locale__iexact=lang))
if has_locale.exists():
ids += list(has_locale.distinct().values_list('id', flat=True))
none_qs = qs.filter(
is_featured &
Q(collections__featuredcollection__locale__isnull=True))
blank_qs = qs.filter(is_featured &
Q(collections__featuredcollection__locale=''))
qs = none_qs | blank_qs
else:
qs = qs.filter(is_featured)
other_ids = list(qs.distinct().values_list('id', flat=True))
random.shuffle(ids)
random.shuffle(other_ids)
ids += other_ids
return map(int, ids)
@memoize('addons:creatured', timeout=60 * 10)
def get_creatured_ids(category, lang=None):
from olympia.addons.models import Addon
from olympia.bandwagon.models import FeaturedCollection
if lang:
lang = lang.lower()
per_locale = set()
if isinstance(category, int):
category = CATEGORIES_BY_ID[category]
app_id = category.application
others = (Addon.objects.public()
.filter(
Q(collections__featuredcollection__locale__isnull=True) |
Q(collections__featuredcollection__locale=''),
collections__featuredcollection__isnull=False,
collections__featuredcollection__application=app_id,
category=category.id)
.distinct()
.values_list('id', flat=True))
if lang is not None and lang != '':
possible_lang_match = FeaturedCollection.objects.filter(
locale__icontains=lang,
application=app_id,
collection__addons__category=category.id).distinct()
for fc in possible_lang_match:
if lang in fc.locale.lower().split(','):
per_locale.update(
fc.collection.addons
.filter(category=category.id)
.values_list('id', flat=True))
others = list(others)
per_locale = list(per_locale)
random.shuffle(others)
random.shuffle(per_locale)
return map(int, filter(None, per_locale + others))
def verify_mozilla_trademark(name, user):
skip_trademark_check = (
user and user.is_authenticated() and user.email and
user.email.endswith(amo.ALLOWED_TRADEMARK_SUBMITTING_EMAILS))
def _check(name):
name = normalize_string(name, strip_puncutation=True).lower()
for symbol in amo.MOZILLA_TRADEMARK_SYMBOLS:
violates_trademark = (
name.count(symbol) > 1 or (
name.count(symbol) >= 1 and not
name.endswith(' for {}'.format(symbol))))
if violates_trademark:
raise forms.ValidationError(ugettext(
u'Add-on names cannot contain the Mozilla or '
u'Firefox trademarks.'))
if not skip_trademark_check:
errors = LocaleList()
if not isinstance(name, dict):
_check(name)
else:
for locale, localized_name in name.items():
try:
_check(localized_name)
except forms.ValidationError as exc:
errors.extend(exc.messages, locale)
if errors:
raise LocaleValidationError(errors)
return name
TAAR_LITE_FALLBACKS = [
'enhancerforyoutube@maximerf.addons.mozilla.org', # /enhancer-for-youtube/
'{2e5ff8c8-32fe-46d0-9fc8-6b8986621f3c}', # /search_by_image/
'uBlock0@raymondhill.net', # /ublock-origin/
'newtaboverride@agenedia.com'] # /new-tab-override/
TAAR_LITE_OUTCOME_REAL_SUCCESS = 'recommended'
TAAR_LITE_OUTCOME_REAL_FAIL = 'recommended_fallback'
TAAR_LITE_OUTCOME_CURATED = 'curated'
TAAR_LITE_FALLBACK_REASON_TIMEOUT = 'timeout'
TAAR_LITE_FALLBACK_REASON_EMPTY = 'no_results'
TAAR_LITE_FALLBACK_REASON_INVALID = 'invalid_results'
def get_addon_recommendations(guid_param, taar_enable):
guids = None
fail_reason = None
if taar_enable:
guids = call_recommendation_server(
guid_param, {},
settings.TAAR_LITE_RECOMMENDATION_ENGINE_URL)
outcome = (TAAR_LITE_OUTCOME_REAL_SUCCESS if guids
else TAAR_LITE_OUTCOME_REAL_FAIL)
if not guids:
fail_reason = (TAAR_LITE_FALLBACK_REASON_EMPTY if guids == []
else TAAR_LITE_FALLBACK_REASON_TIMEOUT)
else:
outcome = TAAR_LITE_OUTCOME_CURATED
if not guids:
guids = TAAR_LITE_FALLBACKS
return guids, outcome, fail_reason
def is_outcome_recommended(outcome):
return outcome == TAAR_LITE_OUTCOME_REAL_SUCCESS
def get_addon_recommendations_invalid():
return (
TAAR_LITE_FALLBACKS, TAAR_LITE_OUTCOME_REAL_FAIL,
TAAR_LITE_FALLBACK_REASON_INVALID)
def build_static_theme_xpi_from_lwt(lwt, upload_zip):
# create manifest
accentcolor = (('#%s' % lwt.persona.accentcolor) if lwt.persona.accentcolor
else amo.THEME_ACCENTCOLOR_DEFAULT)
textcolor = '#%s' % (lwt.persona.textcolor or '000')
manifest = {
"manifest_version": 2,
"name": unicode(lwt.name or lwt.slug),
"version": '1.0',
"theme": {
"images": {
"headerURL": lwt.persona.header
},
"colors": {
"accentcolor": accentcolor,
"textcolor": textcolor
}
}
}
if lwt.description:
manifest['description'] = unicode(lwt.description)
# build zip with manifest and background file
with zipfile.ZipFile(upload_zip, 'w', zipfile.ZIP_DEFLATED) as dest:
dest.writestr('manifest.json', json.dumps(manifest))
dest.write(lwt.persona.header_path, arcname=lwt.persona.header)
|
|
from android.runnable import run_on_ui_thread
from jnius import autoclass
from jnius import java_method
from jnius import PythonJavaClass
from plyer.facades import STT
from plyer.platforms.android import activity
ArrayList = autoclass('java.util.ArrayList')
Bundle = autoclass('android.os.Bundle')
Context = autoclass('android.content.Context')
Intent = autoclass('android.content.Intent')
RecognizerIntent = autoclass('android.speech.RecognizerIntent')
RecognitionListener = autoclass('android.speech.RecognitionListener')
SpeechRecognizer = autoclass('android.speech.SpeechRecognizer')
SpeechResults = SpeechRecognizer.RESULTS_RECOGNITION
class SpeechListener(PythonJavaClass):
__javainterfaces__ = ['android/speech/RecognitionListener']
# class variables because PythonJavaClass class failed
# to see them later in getters and setters
_error_callback = None
_result_callback = None
_partial_result_callback = None
_volume_callback = None
def __init__(self):
super().__init__()
# overwrite class variables in the object
self._error_callback = None
self._result_callback = None
self._partial_result_callback = None
self._volume_callback = None
# error handling
@property
def error_callback(self):
return self._error_callback
@error_callback.setter
def error_callback(self, callback):
'''
Set error callback. It is called when error occurs.
:param callback: function with one parameter for error message
'''
self._error_callback = callback
# result handling
@property
def result_callback(self):
return self._result_callback
@result_callback.setter
def result_callback(self, callback):
'''
Set result callback. It is called when results are received.
:param callback: function with one parameter for lists of strings
'''
self._result_callback = callback
@property
def partial_result_callback(self):
return self._partial_result_callback
@partial_result_callback.setter
def partial_result_callback(self, callback):
'''
Set partial result callback. It is called when partial results are
received while the listener is still in listening mode.
:param callback: function with one parameter for lists of strings
'''
self._partial_result_callback = callback
# voice changes handling
@property
def volume_callback(self):
return self._volume_callback
@volume_callback.setter
def volume_callback(self, callback):
'''
Set volume voice callback.
It is called when loudness of the voice changes.
:param callback: function with one parameter for volume RMS dB (float).
'''
self._volume_callback = callback
# Implementation Java Interfaces
@java_method('()V')
def onBeginningOfSpeech(self):
pass
@java_method('([B)V')
def onBufferReceived(self, buffer):
pass
@java_method('()V')
def onEndOfSpeech(self):
pass
@java_method('(I)V')
def onError(self, error):
msg = ''
if error == SpeechRecognizer.ERROR_AUDIO:
msg = 'audio'
if error == SpeechRecognizer.ERROR_CLIENT:
msg = 'client'
if error == SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
msg = 'insufficient_permissions'
if error == SpeechRecognizer.ERROR_NETWORK:
msg = 'network'
if error == SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
msg = 'network_timeout'
if error == SpeechRecognizer.ERROR_NO_MATCH:
msg = 'no_match'
if error == SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
msg = 'recognizer_busy'
if error == SpeechRecognizer.ERROR_SERVER:
msg = 'server'
if error == SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
msg = 'speech_timeout'
if msg and self.error_callback:
self.error_callback('error:' + msg)
@java_method('(ILandroid/os/Bundle;)V')
def onEvent(self, event_type, params):
pass
@java_method('(Landroid/os/Bundle;)V')
def onPartialResults(self, results):
texts = []
matches = results.getStringArrayList(SpeechResults)
for match in matches.toArray():
if isinstance(match, bytes):
match = match.decode('utf-8')
texts.append(match)
if texts and self.partial_result_callback:
self.partial_result_callback(texts)
@java_method('(Landroid/os/Bundle;)V')
def onReadyForSpeech(self, params):
pass
@java_method('(Landroid/os/Bundle;)V')
def onResults(self, results):
texts = []
matches = results.getStringArrayList(SpeechResults)
for match in matches.toArray():
if isinstance(match, bytes):
match = match.decode('utf-8')
texts.append(match)
if texts and self.result_callback:
self.result_callback(texts)
@java_method('(F)V')
def onRmsChanged(self, rmsdB):
if self.volume_callback:
self.volume_callback(rmsdB)
class AndroidSpeech(STT):
'''
Android Speech Implementation.
Android class `SpeechRecognizer`'s listening deactivates automatically.
Class methods `_on_error()`, `_on_result()` listeners. You can find
documentation here:
https://developer.android.com/reference/android/speech/RecognitionListener
'''
def _on_error(self, msg):
self.errors.append(msg)
self.stop()
def _on_result(self, messages):
self.results.extend(messages)
self.stop()
def _on_partial(self, messages):
self.partial_results.extend(messages)
@run_on_ui_thread
def _start(self):
intent = Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH)
intent.putExtra(
RecognizerIntent.EXTRA_CALLING_PACKAGE,
activity.getPackageName()
)
# language preferences
intent.putExtra(
RecognizerIntent.EXTRA_LANGUAGE_PREFERENCE, self.language
)
intent.putExtra(
RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_WEB_SEARCH
)
# results settings
intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, 1000)
intent.putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS, True)
if self.prefer_offline:
intent.putExtra(RecognizerIntent.EXTRA_PREFER_OFFLINE, True)
# listener and callbacks
listener = SpeechListener()
listener.error_callback = self._on_error
listener.result_callback = self._on_result
listener.partial_result_callback = self._on_partial
# create recognizer and start
self.speech = SpeechRecognizer.createSpeechRecognizer(activity)
self.speech.setRecognitionListener(listener)
self.speech.startListening(intent)
@run_on_ui_thread
def _stop(self):
if not self.speech:
return
# stop listening
self.speech.stopListening()
# free object
self.speech.destroy()
self.speech = None
def _exist(self):
return bool(
SpeechRecognizer.isRecognitionAvailable(activity)
)
def instance():
return AndroidSpeech()
|
|
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from interceptor.openstack.common import gettextutils
from interceptor.openstack.common import importutils
gettextutils.install('interceptor')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def raise_extension_exception(extmanager, ep, err):
raise
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
on_load_failure_callback=raise_extension_exception
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
hostname = socket.gethostname()
fqdn = socket.getfqdn()
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (hostname, fqdn):
if 'host' in name:
return 'interceptor'
elif value.endswith(hostname):
return value.replace(hostname, 'interceptor')
elif value.endswith(fqdn):
return value.replace(fqdn, 'interceptor')
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
import json
from optparse import OptionParser
import os
import shlex
import shutil
import signal
import socket
from subprocess import Popen, CalledProcessError, PIPE
import sys
import time
import urllib
import MySQLdb
options = None
devnull = open('/dev/null', 'w')
vttop = os.environ['VTTOP']
vtroot = os.environ['VTROOT']
vtdataroot = os.environ.get('VTDATAROOT', '/vt')
hostname = socket.gethostname()
vtportstart = int(os.environ.get('VTPORTSTART', '6700'))
class TestError(Exception):
pass
class Break(Exception):
pass
# tmp files management: all under /vt/tmp
tmp_root = os.path.join(vtdataroot, 'tmp')
try:
os.makedirs(tmp_root)
except OSError:
# directory already exists
pass
def debug(msg):
if options.verbose:
print msg
sys.stdout.flush()
def get_args():
global options
parser = OptionParser(usage="usage: %prog [options] [test_names]")
parser.add_option('-v', '--verbose', action='store_true', help='show a lot of logs')
parser.add_option('-d', '--debug', action='store_true', help='utils.pause() statements will wait for user input')
parser.add_option('--no-build', action='store_true', help='skip the build commands')
parser.add_option('--skip-teardown', action='store_true', help='do not kill processes after the tests are done')
(options, args) = parser.parse_args()
if not args:
args = ['run_all']
return args
def test_case(fn):
def body():
debug("========== " + fn.__name__ + " ==========")
fn()
return body
def remove_tmp_files():
try:
shutil.rmtree(tmp_root)
except OSError as e:
if options.verbose:
print >> sys.stderr, e, tmp_root
def pause(prompt):
if options.debug:
raw_input(prompt)
# port management: reserve count consecutive ports, returns the first one
def reserve_ports(count):
global vtportstart
result = vtportstart
vtportstart += count
return result
# sub-process management
pid_map = {}
already_killed = []
def _add_proc(proc):
pid_map[proc.pid] = proc
with open(tmp_root+'/test-pids', 'a') as f:
print >> f, proc.pid, os.path.basename(proc.args[0])
def kill_sub_processes():
for proc in pid_map.values():
if proc.pid and proc.returncode is None:
proc.kill()
if not os.path.exists(tmp_root+'/test-pids'):
return
with open(tmp_root+'/test-pids') as f:
for line in f:
try:
parts = line.strip().split()
pid = int(parts[0])
proc = pid_map.get(pid)
if not proc or (proc and proc.pid and proc.returncode is None):
if pid not in already_killed:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if options.verbose:
print >> sys.stderr, e
def kill_sub_process(proc):
pid = proc.pid
proc.kill()
if pid and pid in pid_map:
del pid_map[pid]
already_killed.append(pid)
# run in foreground, possibly capturing output
def run(cmd, trap_output=False, raise_on_error=True, **kargs):
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
if trap_output:
kargs['stdout'] = PIPE
kargs['stderr'] = PIPE
if options.verbose:
print "run:", cmd, ', '.join('%s=%s' % x for x in kargs.iteritems())
proc = Popen(args, **kargs)
proc.args = args
stdout, stderr = proc.communicate()
if proc.returncode:
if raise_on_error:
raise TestError('cmd fail:', args, stdout, stderr)
else:
if options.verbose:
print 'cmd fail:', args, stdout, stderr
return stdout, stderr
# run sub-process, expects failure
def run_fail(cmd, **kargs):
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
kargs['stdout'] = PIPE
kargs['stderr'] = PIPE
if options.verbose:
print "run: (expect fail)", cmd, ', '.join('%s=%s' % x for x in kargs.iteritems())
proc = Popen(args, **kargs)
proc.args = args
stdout, stderr = proc.communicate()
if proc.returncode == 0:
debug("stdout:\n" + stdout + "stderr:\n" + stderr)
raise TestError('expected fail:', args, stdout, stderr)
return stdout, stderr
# run a daemon - kill when this script exits
def run_bg(cmd, **kargs):
if options.verbose:
print "run:", cmd, ', '.join('%s=%s' % x for x in kargs.iteritems())
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
proc = Popen(args=args, **kargs)
proc.args = args
_add_proc(proc)
return proc
def wait_procs(proc_list, raise_on_error=True):
for proc in proc_list:
pid = proc.pid
if pid:
already_killed.append(pid)
for proc in proc_list:
proc.wait()
for proc in proc_list:
if proc.returncode:
if options.verbose and proc.returncode not in (-9,):
sys.stderr.write("proc failed: %s %s\n" % (proc.returncode, proc.args))
if raise_on_error:
raise CalledProcessError(proc.returncode, ' '.join(proc.args))
def run_procs(cmds, raise_on_error=True):
procs = []
for cmd in cmds:
procs.append(run_bg(cmd))
wait_procs(procs, raise_on_error=raise_on_error)
# compile command line programs
compiled_progs = []
def prog_compile(names):
for name in names:
if name in compiled_progs:
continue
compiled_progs.append(name)
if options.no_build:
debug('Skipping build of '+name)
else:
run('go build', cwd=vttop+'/go/cmd/'+name)
# background zk process
# (note the zkocc addresses will only work with an extra zkocc process)
zk_port_base = reserve_ports(3)
zkocc_port_base = reserve_ports(3)
def zk_setup():
global zk_port_base
global zkocc_port_base
zk_ports = ":".join([str(zk_port_base), str(zk_port_base+1), str(zk_port_base+2)])
prog_compile(['zkctl', 'zk'])
run(vtroot+'/bin/zkctl -zk.cfg 1@'+hostname+':'+zk_ports+' init')
config = tmp_root+'/test-zk-client-conf.json'
with open(config, 'w') as f:
zk_cell_mapping = {'test_nj': 'localhost:%u'%(zk_port_base+2),
'test_ny': 'localhost:%u'%(zk_port_base+2),
'test_ca': 'localhost:%u'%(zk_port_base+2),
'global': 'localhost:%u'%(zk_port_base+2),
'test_nj:_zkocc': 'localhost:%u,localhost:%u,localhost:%u'%(zkocc_port_base,zkocc_port_base+1,zkocc_port_base+2),
'test_ny:_zkocc': 'localhost:%u'%(zkocc_port_base),
'test_ca:_zkocc': 'localhost:%u'%(zkocc_port_base),
'global:_zkocc': 'localhost:%u'%(zkocc_port_base),}
json.dump(zk_cell_mapping, f)
os.putenv('ZK_CLIENT_CONFIG', config)
run(vtroot+'/bin/zk touch -p /zk/test_nj/vt')
run(vtroot+'/bin/zk touch -p /zk/test_ny/vt')
run(vtroot+'/bin/zk touch -p /zk/test_ca/vt')
def zk_teardown():
global zk_port_base
zk_ports = ":".join([str(zk_port_base), str(zk_port_base+1), str(zk_port_base+2)])
run(vtroot+'/bin/zkctl -zk.cfg 1@'+hostname+':'+zk_ports+' teardown', raise_on_error=False)
def zk_wipe():
# Work around safety check on recursive delete.
run(vtroot+'/bin/zk rm -rf /zk/test_nj/vt/*')
run(vtroot+'/bin/zk rm -rf /zk/test_ny/vt/*')
run(vtroot+'/bin/zk rm -rf /zk/global/vt/*')
run(vtroot+'/bin/zk rm -f /zk/test_nj/vt')
run(vtroot+'/bin/zk rm -f /zk/test_ny/vt')
run(vtroot+'/bin/zk rm -f /zk/global/vt')
def validate_topology(ping_tablets=False):
if ping_tablets:
run_vtctl('Validate -ping-tablets')
else:
run_vtctl('Validate')
def zk_ls(path):
out, err = run(vtroot+'/bin/zk ls '+path, trap_output=True)
return sorted(out.splitlines())
def zk_cat(path):
out, err = run(vtroot+'/bin/zk cat '+path, trap_output=True)
return out
# vars helpers
def get_vars(port):
"""
Returns the dict for vars, from a vtxxx process, or None
if we can't get them.
"""
try:
f = urllib.urlopen('http://localhost:%u/debug/vars' % port)
data = f.read()
f.close()
except:
return None
return json.loads(data)
# zkocc helpers
def zkocc_start(cells=['test_nj'], extra_params=[]):
global zkocc_port_base
prog_compile(['zkocc'])
logfile = tmp_root + '/zkocc_%u.log' % zkocc_port_base
args = [vtroot+'/bin/zkocc',
'-port', str(zkocc_port_base),
'-logfile', logfile,
'-log.level', 'INFO',
] + extra_params + cells
sp = run_bg(args)
# wait for vars
timeout = 5.0
while True:
v = get_vars(zkocc_port_base)
if v == None:
debug(" zkocc not answering at /debug/vars, waiting...")
else:
break
debug("sleeping a bit while we wait")
time.sleep(0.1)
timeout -= 0.1
if timeout <= 0:
raise TestError("timeout waiting for zkocc")
return sp
def zkocc_kill(sp):
kill_sub_process(sp)
sp.wait()
# vtctl helpers
def run_vtctl(clargs, log_level='WARNING', auto_log=False, **kwargs):
if auto_log:
if options.verbose:
log_level='INFO'
else:
log_level='ERROR'
prog_compile(['vtctl'])
args = [vtroot+'/bin/vtctl',
'-log.level='+log_level,
'-logfile=/dev/null']
if isinstance(clargs, str):
cmd = " ".join(args) + ' ' + clargs
else:
cmd = args + clargs
return run(cmd, **kwargs)
# vtclient2 helpers
# driver is one of:
# - vttablet (default), vttablet-streaming
# - vtdb, vtdb-streaming
# - vtdb-zkocc, vtdb-streaming-zkocc
# path is either: keyspace/shard for vttablet* or zk path for vtdb*
def vtclient2(uid, path, query, bindvars=None, user=None, password=None, driver=None,
verbose=False, raise_on_error=True):
prog_compile(['vtclient2'])
if (user is None) != (password is None):
raise TypeError("you should provide either both or none of user and password")
# for ZK paths to not have // in the path, that confuses things
if path.startswith('/'):
path = path[1:]
server = "localhost:%u/%s" % (uid, path)
if user is not None:
server = "%s:%s@%s" % (user, password, server)
cmdline = [vtroot+'/bin/vtclient2', '-server', server]
if bindvars:
cmdline.extend(['-bindvars', bindvars])
if driver:
cmdline.extend(['-driver', driver])
if verbose:
cmdline.append('-verbose')
cmdline.append(query)
return run(cmdline, raise_on_error=raise_on_error, trap_output=True)
# mysql helpers
def mysql_query(uid, dbname, query):
conn = MySQLdb.Connect(user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (vtdataroot, uid),
db=dbname)
cursor = conn.cursor()
cursor.execute(query)
try:
return cursor.fetchall()
finally:
conn.close()
def mysql_write_query(uid, dbname, query):
conn = MySQLdb.Connect(user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (vtdataroot, uid),
db=dbname)
cursor = conn.cursor()
conn.begin()
cursor.execute(query)
conn.commit()
try:
return cursor.fetchall()
finally:
conn.close()
def check_db_var(uid, name, value):
conn = MySQLdb.Connect(user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (vtdataroot, uid))
cursor = conn.cursor()
cursor.execute("show variables like '%s'" % name)
row = cursor.fetchone()
if row != (name, value):
raise TestError('variable not set correctly', name, row)
conn.close()
def check_db_read_only(uid):
return check_db_var(uid, 'read_only', 'ON')
def check_db_read_write(uid):
return check_db_var(uid, 'read_only', 'OFF')
def wait_db_read_only(uid):
for x in xrange(3):
try:
check_db_read_only(uid)
return
except TestError as e:
print >> sys.stderr, 'WARNING: ', e
time.sleep(1.0)
raise e
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import alerts
import internal_alerts
import json
import random
import string
import unittest
import webtest
from google.appengine.api import memcache
from google.appengine.ext import testbed
class InternalAlertsTest(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_all_stubs()
self.testapp = webtest.TestApp(internal_alerts.app)
def tearDown(self):
self.testbed.deactivate()
def user_helper(self, email, uid):
self.testbed.setup_env(
USER_EMAIL=email,
USER_ID=uid,
USER_IS_ADMIN='0',
overwrite=True)
def check_json_headers(self, res):
self.user_helper('tester@google.com', '123')
self.assertEqual(res.content_type, 'application/json')
# This is necessary for cross-site tools to retrieve internal alerts.
self.assertEqual(res.headers['access-control-allow-origin'], '*')
def test_get_no_data_cached(self):
self.user_helper('tester@google.com', '123')
res = self.testapp.get('/internal-alerts')
self.check_json_headers(res)
self.assertEqual(res.body, '{}')
def test_happy_path(self):
self.user_helper('tester@google.com', '123')
# Set it.
params = {'content': '{"alerts": ["hello", "world"]}'}
self.testapp.post('/internal-alerts', params)
def happy_path():
# Get it.
res = self.testapp.get('/internal-alerts')
self.check_json_headers(res)
data = json.loads(res.body)
# The server should have stuck a 'date' on there.
self.assertTrue('date' in data)
self.assertEqual(type(data['date']), int)
self.assertEqual(data['alerts'], ['hello', 'world'])
happy_path()
memcache.Client().flush_all()
happy_path()
def test_post_invalid_data_not_reflected(self):
self.user_helper('tester@google.com', '123')
params = {'content': '[{"this is not valid JSON'}
self.testapp.post('/internal-alerts', params, status=400)
res = self.testapp.get('/internal-alerts')
self.assertEqual(res.body, '{}')
def test_post_invalid_data_does_not_overwrite_valid_data(self):
self.user_helper('tester@google.com', '123')
# Populate the cache with something valid
params = {'content': '{"alerts": "everything is OK"}'}
self.testapp.post('/internal-alerts', params)
self.testapp.post('/internal-alerts', {'content': 'woozlwuzl'},
status=400)
res = self.testapp.get('/internal-alerts')
self.check_json_headers(res)
data = json.loads(res.body)
self.assertEqual(data['alerts'], 'everything is OK')
def test_internal_alerts_stored_in_history_have_correct_type(self):
test_alerts1 = {'alerts': ['hello', 'world', '1']}
test_alerts2 = {'alerts': ['hello', 'world', '2']}
self.testapp.post('/internal-alerts',
{'content': json.dumps(test_alerts1)})
self.testapp.post('/internal-alerts',
{'content': json.dumps(test_alerts2)})
alerts_query = alerts.AlertsJSON.query().order(alerts.AlertsJSON.date)
stored_alerts = alerts_query.fetch(limit=3)
self.assertEqual(2, len(stored_alerts))
self.assertEqual(stored_alerts[0].type, 'internal-alerts')
self.assertEqual(stored_alerts[1].type, 'internal-alerts')
def test_internal_alerts_same_as_last_alerts_are_added_to_history(self):
test_alerts = {'alerts': ['hello', 'world']}
alerts.AlertsJSON(json=json.dumps(test_alerts), type='alerts').put()
self.testapp.post('/internal-alerts',
{'content': json.dumps(test_alerts)})
alerts_query = alerts.AlertsJSON.query()
self.assertEqual(2, alerts_query.count(limit=3))
def test_large_number_of_internal_alerts(self):
self.user_helper('tester@google.com', '123')
# This generates ~2.5MB of JSON that compresses to ~750K. Real
# data compresses about 6x better.
random.seed(0xf00f00)
put_internal_alerts = self.generate_fake_internal_alerts(4000)
params = {'content': json.dumps(put_internal_alerts)}
self.testapp.post('/internal-alerts', params)
res = self.testapp.get('/internal-alerts')
got_internal_alerts = json.loads(res.body)
self.assertEquals(got_internal_alerts['alerts'],
put_internal_alerts['alerts'])
def test_alerts_too_big_for_memcache(self):
random.seed(0xf00f00)
big_alerts = self.generate_fake_internal_alerts(10000)
content = json.dumps(big_alerts)
self.assertTrue(len(content) > alerts.AlertsHandler.MAX_JSON_SIZE)
params = {'content': content}
self.testapp.post('/internal-alerts', params)
res = self.testapp.get('/internal-alerts')
got_alerts = json.loads(res.body)
self.assertEquals(got_alerts['alerts'], big_alerts['alerts'])
alerts_type = internal_alerts.InternalAlertsHandler.ALERTS_TYPE
self.assertEquals(memcache.get(alerts_type), None)
def test_no_user(self):
# Get it.
res = self.testapp.get('/internal-alerts')
self.check_json_headers(res)
data = json.loads(res.body)
# The server should have stuck a 'date' on there.
self.assertTrue('date' in data)
self.assertEqual(type(data['date']), int)
self.assertTrue('redirect-url' in data)
self.assertEqual(type(data['redirect-url']), unicode)
def test_invalid_user(self):
self.user_helper('tester@chromium.org', '123')
# Get it.
self.testapp.get('/internal-alerts', status=403)
def generate_fake_internal_alerts(self, n):
self.user_helper('tester@google.com', '123')
return {'alerts': [self.generate_fake_alert() for _ in range(n)]}
@staticmethod
def generate_fake_alert():
# fake labels
labels = [['', 'last_', 'latest_', 'failing_', 'passing_'],
['build', 'builder', 'revision'],
['', 's', '_url', '_reason', '_name']]
def label():
return string.join(map(random.choice, labels), '')
# fake values
def time():
return random.randint(1407976107614, 1408076107614) / 101.0
def build():
return random.randint(2737, 2894)
def revision():
return random.randint(288849, 289415)
tests = [['Activity', 'Async', 'Browser', 'Content', 'Input'],
['Manager', 'Card', 'Sandbox', 'Container'],
['Test.'],
['', 'Basic', 'Empty', 'More'],
['Mouse', 'App', 'Selection', 'Network', 'Grab'],
['Input', 'Click', 'Failure', 'Capture']]
def test():
return string.join(map(random.choice, tests), '')
def literal_array():
generator = random.choice([time, build, revision])
return [generator() for _ in range(random.randint(0, 10))]
def literal_map():
generators = [build, revision, test, literal_array]
obj = {}
for _ in range(random.randint(3, 9)):
obj[label()] = random.choice(generators)()
return obj
def value():
generators = [time, build, revision, test, literal_array,
literal_map]
return random.choice(generators)()
alert = {}
for _ in range(random.randint(6, 9)):
alert[label()] = value()
return alert
|
|
"""Nearly exact trust-region optimization subproblem."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg import (norm, get_lapack_funcs, solve_triangular,
cho_solve)
from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
__all__ = ['_minimize_trustregion_exact',
'estimate_smallest_singular_value',
'singular_leading_submatrix',
'IterativeSubproblem']
def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None,
**trust_region_options):
"""
Minimization of scalar function of one or more variables using
a nearly exact trust-region algorithm.
Options
-------
initial_tr_radius : float
Initial trust-region radius.
max_tr_radius : float
Maximum value of the trust-region radius. No steps that are longer
than this value will be proposed.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than ``gtol`` before successful
termination.
"""
if jac is None:
raise ValueError('Jacobian is required for trust region '
'exact minimization.')
if hess is None:
raise ValueError('Hessian matrix is required for trust region '
'exact minimization.')
return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
subproblem=IterativeSubproblem,
**trust_region_options)
def estimate_smallest_singular_value(U):
"""Given upper triangular matrix ``U`` estimate the smallest singular
value and the correspondent right singular vector in O(n**2) operations.
Parameters
----------
U : ndarray
Square upper triangular matrix.
Returns
-------
s_min : float
Estimated smallest singular value of the provided matrix.
z_min : ndarray
Estimatied right singular vector.
Notes
-----
The procedure is based on [1]_ and is done in two steps. First it finds
a vector ``e`` with components selected from {+1, -1} such that the
solution ``w`` from the system ``U.T w = e`` is as large as possible.
Next it estimate ``U v = w``. The smallest singular value is close
to ``norm(w)/norm(v)`` and the right singular vector is close
to ``v/norm(v)``.
The estimation will be better more ill-conditioned is the matrix.
References
----------
.. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H.
An estimate for the condition number of a matrix. 1979.
SIAM Journal on Numerical Analysis, 16(2), 368-375.
"""
U = np.atleast_2d(U)
m, n = U.shape
if m != n:
raise ValueError("A square triangular matrix should be provided.")
# A vector `e` with components selected from {+1, -1}
# is selected so that the solution `w` to the system
# `U.T w = e` is as large as possible. Implementation
# based on algorithm 3.5.1, p. 142, from reference [2]
# adapted for lower triangular matrix.
p = np.zeros(n)
w = np.empty(n)
# Implemented according to: Golub, G. H., Van Loan, C. F. (2013).
# "Matrix computations". Forth Edition. JHU press. pp. 140-142.
for k in range(n):
wp = (1-p[k]) / U.T[k, k]
wm = (-1-p[k]) / U.T[k, k]
pp = p[k+1:] + U.T[k+1:, k]*wp
pm = p[k+1:] + U.T[k+1:, k]*wm
if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1):
w[k] = wp
p[k+1:] = pp
else:
w[k] = wm
p[k+1:] = pm
# The system `U v = w` is solved using backward substitution.
v = solve_triangular(U, w)
v_norm = norm(v)
w_norm = norm(w)
# Smallest singular value
s_min = w_norm / v_norm
# Associated vector
z_min = v / v_norm
return s_min, z_min
def gershgorin_bounds(H):
"""
Given a square matrix ``H`` compute upper
and lower bounds for its eigenvalues (Gregoshgorin Bounds).
Defined ref. [1].
References
----------
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
Trust region methods. 2000. Siam. pp. 19.
"""
H_diag = np.diag(H)
H_diag_abs = np.abs(H_diag)
H_row_sums = np.sum(np.abs(H), axis=1)
lb = np.min(H_diag + H_diag_abs - H_row_sums)
ub = np.max(H_diag - H_diag_abs + H_row_sums)
return lb, ub
def singular_leading_submatrix(A, U, k):
"""
Compute term that makes the leading ``k`` by ``k``
submatrix from ``A`` singular.
Parameters
----------
A : ndarray
Symmetric matrix that is not positive definite.
U : ndarray
Upper triangular matrix resulting of an incomplete
Cholesky decomposition of matrix ``A``.
k : int
Positive integer such that the leading k by k submatrix from
`A` is the first non-positive definite leading submatrix.
Returns
-------
delta : float
Amount that should be added to the element (k, k) of the
leading k by k submatrix of ``A`` to make it singular.
v : ndarray
A vector such that ``v.T B v = 0``. Where B is the matrix A after
``delta`` is added to its element (k, k).
"""
# Compute delta
delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1]
n = len(A)
# Inicialize v
v = np.zeros(n)
v[k-1] = 1
# Compute the remaining values of v by solving a triangular system.
if k != 1:
v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1])
return delta, v
class IterativeSubproblem(BaseQuadraticSubproblem):
"""Quadratic subproblem solved by nearly exact iterative method.
Notes
-----
This subproblem solver was based on [1]_, [2]_ and [3]_,
which implement similar algorithms. The algorithm is basically
that of [1]_ but ideas from [2]_ and [3]_ were also used.
References
----------
.. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods",
Siam, pp. 169-200, 2000.
.. [2] J. Nocedal and S. Wright, "Numerical optimization",
Springer Science & Business Media. pp. 83-91, 2006.
.. [3] J.J. More and D.C. Sorensen, "Computing a trust region step",
SIAM Journal on Scientific and Statistical Computing, vol. 4(3),
pp. 553-572, 1983.
"""
# UPDATE_COEFF appears in reference [1]_
# in formula 7.3.14 (p. 190) named as "theta".
# As recommended there it value is fixed in 0.01.
UPDATE_COEFF = 0.01
EPS = np.finfo(float).eps
def __init__(self, x, fun, jac, hess, hessp=None,
k_easy=0.1, k_hard=0.2):
super(IterativeSubproblem, self).__init__(x, fun, jac, hess)
# When the trust-region shrinks in two consecutive
# calculations (``tr_radius < previous_tr_radius``)
# the lower bound ``lambda_lb`` may be reused,
# facilitating the convergence. To indicate no
# previous value is known at first ``previous_tr_radius``
# is set to -1 and ``lambda_lb`` to None.
self.previous_tr_radius = -1
self.lambda_lb = None
self.niter = 0
# ``k_easy`` and ``k_hard`` are parameters used
# to determine the stop criteria to the iterative
# subproblem solver. Take a look at pp. 194-197
# from reference _[1] for a more detailed description.
self.k_easy = k_easy
self.k_hard = k_hard
# Get Lapack function for cholesky decomposition.
# The implemented Scipy wrapper does not return
# the incomplete factorization needed by the method.
self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,))
# Get info about Hessian
self.dimension = len(self.hess)
self.hess_gershgorin_lb,\
self.hess_gershgorin_ub = gershgorin_bounds(self.hess)
self.hess_inf = norm(self.hess, np.Inf)
self.hess_fro = norm(self.hess, 'fro')
# A constant such that for vectors smaler than that
# backward substituition is not reliable. It was stabilished
# based on Golub, G. H., Van Loan, C. F. (2013).
# "Matrix computations". Forth Edition. JHU press., p.165.
self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf
def _initial_values(self, tr_radius):
"""Given a trust radius, return a good initial guess for
the damping factor, the lower bound and the upper bound.
The values were chosen accordingly to the guidelines on
section 7.3.8 (p. 192) from [1]_.
"""
# Upper bound for the damping factor
lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb,
self.hess_fro,
self.hess_inf))
# Lower bound for the damping factor
lambda_lb = max(0, -min(self.hess.diagonal()),
self.jac_mag/tr_radius - min(self.hess_gershgorin_ub,
self.hess_fro,
self.hess_inf))
# Improve bounds with previous info
if tr_radius < self.previous_tr_radius:
lambda_lb = max(self.lambda_lb, lambda_lb)
# Initial guess for the damping factor
if lambda_lb == 0:
lambda_initial = 0
else:
lambda_initial = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
return lambda_initial, lambda_lb, lambda_ub
def solve(self, tr_radius):
"""Solve quadratic subproblem"""
lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius)
n = self.dimension
hits_boundary = True
already_factorized = False
self.niter = 0
while True:
# Compute Cholesky factorization
if already_factorized:
already_factorized = False
else:
H = self.hess+lambda_current*np.eye(n)
U, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
self.niter += 1
# Check if factorization succeeded
if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO:
# Successful factorization
# Solve `U.T U p = s`
p = cho_solve((U, False), -self.jac)
p_norm = norm(p)
# Check for interior convergence
if p_norm <= tr_radius and lambda_current == 0:
hits_boundary = False
break
# Solve `U.T w = p`
w = solve_triangular(U, p, trans='T')
w_norm = norm(w)
# Compute Newton step accordingly to
# formula (4.44) p.87 from ref [2]_.
delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius
lambda_new = lambda_current + delta_lambda
if p_norm < tr_radius: # Inside boundary
s_min, z_min = estimate_smallest_singular_value(U)
ta, tb = self.get_boundaries_intersections(p, z_min,
tr_radius)
# Choose `step_len` with the smallest magnitude.
# The reason for this choice is explained at
# ref [3]_, p. 6 (Immediately before the formula
# for `tau`).
step_len = min([ta, tb], key=abs)
# Compute the quadratic term (p.T*H*p)
quadratic_term = np.dot(p, np.dot(H, p))
# Check stop criteria
relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambda_current*tr_radius**2)
if relative_error <= self.k_hard:
p += step_len * z_min
break
# Update uncertanty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Compute Cholesky factorization
H = self.hess + lambda_new*np.eye(n)
c, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
# Check if the factorization have succeeded
#
if info == 0: # Successful factorization
# Update damping factor
lambda_current = lambda_new
already_factorized = True
else: # Unsuccessful factorization
# Update uncertanty bounds
lambda_lb = max(lambda_lb, lambda_new)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
else: # Outside boundary
# Check stop criteria
relative_error = abs(p_norm - tr_radius) / tr_radius
if relative_error <= self.k_easy:
break
# Update uncertanty bounds
lambda_lb = lambda_current
# Update damping factor
lambda_current = lambda_new
elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO:
# jac_mag very close to zero
# Check for interior convergence
if lambda_current == 0:
p = np.zeros(n)
hits_boundary = False
break
s_min, z_min = estimate_smallest_singular_value(U)
step_len = tr_radius
# Check stop criteria
if step_len**2 * s_min**2 <= self.k_hard * lambda_current * tr_radius**2:
p = step_len * z_min
break
# Update uncertanty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
else: # Unsuccessful factorization
# Compute auxiliary terms
delta, v = singular_leading_submatrix(H, U, info)
v_norm = norm(v)
# Update uncertanty interval
lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
self.lambda_lb = lambda_lb
self.lambda_current = lambda_current
self.previous_tr_radius = tr_radius
return p, hits_boundary
|
|
#!/usr/bin/env python
"""Parser for reaction strings and files."""
import libsbml
import re
import sympy as sp
from sympy.abc import _clash
from .crncomplex import Complex
from .reaction import Reaction
__author__ = "Elisa Tonello"
__copyright__ = "Copyright (c) 2016, Elisa Tonello"
__license__ = "BSD"
__version__ = "0.0.1"
def parse_reaction(r):
"""Parse a (potentially reversible) reaction written as
reactionid: i1 R1 + ... + in Rn (k\_)<->(k) j1 P1 + ... + jm Pm
where (k\_)< is optional.
Return a two-element tuple with two reactions, if "<" is included
(the reaction is reversible),
or one reaction and None, if it is not reversible.
k, k\_ can be any string representing the rates. They are optional. If present,
they must be enclosed in parenthesis.
No spaces are allowed between the parenthesis and <->.
reactionid is optional. If the reaction is reversible, "_rev"
is added to the reactionid for the reverse reaction.
Everything after a # sign is ignored.
:type r: string
:param r: string of the form "reactionid: i1 R1 + ... + in Rn (k\_)<->(k) j1 P1 + ... + jm Pm".
:rtype: (Reaction, Reaction/None).
"""
# Everything after a # sign is ignored.
reaction = r.split("#")[0]
# Check for reaction id.
colon = reaction.split(":")
if len(colon) > 2:
raise ValueError("Unrecognised reaction. More then one colon in reaction definition.")
if len(colon) == 2:
reactionid = colon[0].strip()
reactionBody = colon[1]
else:
reactionid = None
reactionBody = colon[0]
if reactionBody != "":
pattern = re.compile("(.*?)(?:\((.*)\))?(<)?\->(?:\((.*)\))?(.*)")
m = pattern.match(reactionBody)
if m is None:
raise ValueError("Unrecognised reaction.")
else:
reacts, k_, inv, k, prods = m.groups()
reactants = parse_complex(reacts)
products = parse_complex(prods)
if inv == "<":
if reactionid != None: reactionidRev = reactionid + "_rev"
else: reactionidRev = None
return (Reaction(reactionid, reactants, products, parse_expr(k)), \
Reaction(reactionidRev, Complex(products), Complex(reactants), parse_expr(k_)))
else:
return (Reaction(reactionid, reactants, products, parse_expr(k)), None)
def _valid_species(cexpr):
try:
sp.Symbol(cexpr)
except:
raise ValueError("Could not parse complex {}".format(cexpr))
def parse_complex(complex_string):
"""Parse a string representing a complex.
The string must be of the form "n1 s1 + n2 s2 + ...",
where the ni are the integer stoichiometric coefficients.
Stoichiometric coefficients equal to 1 can be omitted.
:type complex_string: string
:param complex_string: string of the form "n1 s1 + n2 s2 + ...".
:rtype: Complex.
"""
complex_string = complex_string.replace(" ", "").split("+")
pattern = re.compile("(\d*)(?:\*)?(.*)")
parsedComplex = {}
for c in complex_string:
m = pattern.match(c)
if m is None: raise ValueError("Unrecognised complex.")
m = m.groups()
if m[1] != '': _valid_species(m[1])
if m[0] == '' and m[1] != '':
parsedComplex[m[1]] = 1
else:
if m[0] != '':
parsedComplex[m[1]] = int(m[0])
return Complex(parsedComplex)
def param_to_rate(reaction):
"""Multiplies the rate by the reactant mass-action monomial.
:type reaction: Reaction
:rtype: Reaction.
"""
return Reaction(reaction.reactionid, \
reaction.reactant, \
reaction.product, \
reaction.rate * reaction.reactant.ma())
def _read_reactions(reacts):
"""Parse each reaction into forward
and backward reactions."""
rs = []
for row in reacts:
row = row.strip()
if row:
r = parse_reaction(row)
if r:
rs.append(r)
return rs
def parse_reaction_file(filename, rate = False):
"""Read a reaction file, and populate
reaction id and kineticParam if empty.
If rate is False, the expression enclosed in parenthesis
is interpreted as rate / reactant mass-action monomial,
otherwise as the full rate.
:type filename: string
:type rate: boolean
:param filename: path to file.
:param rate: True if the expressions in parenthesis are rates.
:rtype: list of Reactions.
"""
with open(filename) as f:
reactions = parse_reactions(f.readlines(), rate)
return reactions
def parse_reactions(rs, rate = False):
"""Parse a list of reaction strings.
If rate is False, the expression enclosed in parenthesis
is interpreted as rate / reactant mass-action monomial,
otherwise as the full rate.
:type rs: list of strings
:param rs: strings representing the reactions.
:rtype: list of Reactions.
"""
if not isinstance(rs, list):
raise ValueError("Required list of strings")
if rate:
return list(map(add_kinetic_param, add_reaction_id(_read_reactions(rs))))
else:
reacts = _read_reactions(rs)
return list(map(param_to_rate, list(map(add_kinetic_param, add_reaction_id(reacts)))))
def add_reaction_id(reactions):
""" Add a reactionid, if missing, of the form
'ri', where i is the index of the reaction, in the sublist of reactions without reactionid.
If there is a reverse reaction, its id is set to ri_rev.
:type reactions: list of Reactions.
:rtype: list of Reactions.
"""
rids = [r[0].reactionid for r in reactions if r[0].reactionid]
if len(rids) > 0 and len(rids) != len(list(set(rids))):
raise ValueError("Non-unique reaction ids.")
get_id = ("r" + str(j) for j in range(len(reactions))
if "r" + str(j) not in rids)
newreactions = []
for i in range(len(reactions)):
reaction, rev_reaction = reactions[i]
if not reaction.reactionid:
reaction._reactionid = next(get_id)
newreactions.append(reaction)
if rev_reaction:
rev_reaction._reactionid = reaction.reactionid + "_rev"
newreactions.append(rev_reaction)
else:
newreactions.append(reaction)
if rev_reaction: newreactions.append(rev_reaction)
return newreactions
def add_kinetic_param(reaction):
"""Add a kinetic param of the form 'k_reactionid', if missing.
:type reaction: Reaction.
:rtype: Reaction.
"""
if not reaction._rate: reaction._rate = sp.Symbol("k_" + reaction.reactionid)
return reaction
def ast_to_sympy_expr(math, debug = False):
"""Create a sympy expression from a libsbml AST.
:type math: libsbml.ASTNode
:rtype: sympy expression
"""
t = math.getType()
if debug:
print("Node of type {}, {}".format(dtypes[t], math.getName()))
if t == libsbml.AST_NAME:
if debug:
print("Node name: {}".format(math.getName()))
return sp.Symbol(math.getName())
if t == libsbml.AST_NAME_TIME:
return sp.Symbol('time')
if t == libsbml.AST_CONSTANT_PI:
# using same symbol as in SBML formula, rather than sp.pi
return sp.Symbol("Pi")
if t == libsbml.AST_CONSTANT_E:
return sp.E
if math.isInteger():
return sp.Integer(math.getInteger())
if math.isRational():
return sp.Rational(math.getNumerator(), math.getDenominator())
if math.isReal():
return sp.Float(math.getReal())
nc = math.getNumChildren()
children = [ast_to_sympy_expr(math.getChild(c)) for c in range(nc)]
if t == libsbml.AST_PLUS:
return sp.Add(*children)
if t == libsbml.AST_TIMES:
return sp.Mul(*children)
if t == libsbml.AST_POWER or t == libsbml.AST_FUNCTION_POWER:
if len(children) != 2:
raise ValueError("Error parsing SBML kineticLaw: {}", t)
return children[0]**children[1]
if t == libsbml.AST_MINUS:
if len(children) != 1 and len(children) != 2:
raise ValueError("Error parsing SBML kineticLaw: {}", t)
if len(children) == 1:
return - children[0]
else:
return children[0] - children[1]
if t == libsbml.AST_DIVIDE:
if len(children) != 2:
raise ValueError("Error parsing SBML kineticLaw: {}", t)
return children[0] / children[1]
if t == libsbml.AST_FUNCTION_EXP:
if len(children) != 1:
raise ValueError("Error parsing SBML kineticLaw: {}", t)
return sp.E**children[0]
if t == libsbml.AST_FUNCTION_DELAY:
return sp.Function('delay')(*children)
if math.isFunction():
return sp.Function(math.getName())(*children)
raise NotImplementedError("Type {} of AST Node not supported.", t)
def parse_expr(s):
"""Try to convert a string to a sympy expression,
reading the string using the SBML formula parser first,
and converting the libSBML AST to a sympy expression.
"""
if not s:
return None
s = s.replace('**', '^')
ast_tree = libsbml.parseL3Formula(s)
if ast_tree:
return ast_to_sympy_expr(ast_tree)
else:
raise ValueError("Could not parse expression.")
def flux_value(math):
if math.getType() == libsbml.AST_NAME and \
math.getName()[-10:] == "FLUX_VALUE":
return math.getName()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests dense attention layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import dense_attention
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class BaseDenseAttentionTest(test.TestCase):
def test_one_dim_with_mask(self):
# Scores tensor of shape [1, 1, 1]
scores = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 1, 1]
v = np.array([[[1.6]]], dtype=np.float32)
# Scores mask tensor of shape [1, 1, 1]
scores_mask = np.array([[[True]]], dtype=np.bool_)
actual = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected tensor of shape [1, 1, 1].
# expected000 = softmax(scores)[0, 0] * 1.6 = 1.6
expected = np.array([[[1.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_one_dim_no_mask(self):
# Scores tensor of shape [1, 1, 1]
scores = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 1, 1]
v = np.array([[[1.6]]], dtype=np.float32)
actual = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v)
# Expected tensor of shape [1, 1, 1].
# expected000 = softmax(scores)[0, 0] * 1.6 = 1.6
expected = np.array([[[1.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_mask(self):
# Scores tensor of shape [1, 1, 3]
scores = np.array([[[1., 0., 1.]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Scores mask tensor of shape [1, 1, 3]
scores_mask = np.array([[[True, True, False]]], dtype=np.bool_)
actual = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000 = exp(1)/(exp(1) + exp(0)) = 0.73105857863
# attention_distribution001 = exp(0)/(exp(1) + exp(0)) = 0.26894142137
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.73105857863 * 1.6 + 0.26894142137 * 0.7 - 0 * 0.8
# = 1.35795272077
expected = np.array([[[1.35795272077]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_no_mask(self):
# Scores tensor of shape [1, 1, 3]
scores = np.array([[[1., 0., 1.]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
actual = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v)
# Expected attention distribution = softmax(scores).
# => attention_distribution000 = exp(1)/(exp(1) + exp(0) + exp(1))
# = 0.42231879825
# attention_distribution001 = exp(0)/(exp(1) + exp(0) + exp(1))
# = 0.15536240349
# attention_distribution002 = exp(1)/(exp(1) + exp(0) + exp(1))
# = 0.42231879825
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.42231879825 * 1.6 + 0.15536240349 * 0.7
# - 0.42231879825 * 0.8
# = 0.44660872104
expected = np.array([[[0.44660872104]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_one_dim_batch_size_two(self):
# Scores tensor of shape [2, 1, 1]
scores = np.array([[[1.1]], [[2.1]]], dtype=np.float32)
# Value tensor of shape [2, 1, 1]
v = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
# Scpres mask tensor of shape [2, 1, 1]
scores_mask = np.array([[[True]], [[True]]], dtype=np.bool_)
actual = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected tensor of shape [2, 1, 1].
# expected000 = softmax(scores)[0, 0] * 1.6 = 1.6
# expected100 = softmax(scores)[1, 0] * 2.6 = 2.6
expected = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_serialization(self):
# Test serialization with causal
layer = dense_attention.BaseDenseAttention(causal=True)
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.causal, True)
config = layer.get_config()
new_layer = dense_attention.BaseDenseAttention.from_config(config)
self.assertEqual(new_layer.causal, True)
@test_util.run_all_in_graph_and_eager_modes
class AttentionTest(test.TestCase):
def test_calculate_scores_one_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Key tensor of shape [1, 1, 1]
k = np.array([[[1.6]]], dtype=np.float32)
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 1, 1].
# expected000 = 1.1*1.6 = 1.76
expected = np.array([[[1.76]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_multi_dim(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4]))
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 2, 3].
# expected000 = 1.*1.5+1.1*1.6+1.2*1.7+1.3*1.8 = 7.64
# expected001 = 1.*2.5+1.1*2.6+1.2*2.7+1.3*2.8 = 12.24
# expected002 = 1.*3.5+1.1*3.6+1.2*3.7+1.3*3.8 = 16.84
# expected010 = 2.*1.5+2.1*1.6+2.2*1.7+2.3*1.8 = 14.24
# expected011 = 2.*2.5+2.1*2.6+2.2*2.7+2.3*2.8 = 22.84
# expected012 = 2.*3.5+2.1*3.6+2.2*3.7+2.3*3.8 = 31.44
expected = np.array(
[[[7.64, 12.24, 16.84], [14.24, 22.84, 31.44]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_one_dim_batch_size_two(self):
# Query tensor of shape [2, 1, 1]
q = np.array([[[1.1]], [[2.1]]], dtype=np.float32)
# Key tensor of shape [2, 1, 1]
k = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1]))
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [2, 1, 1].
# expected000 = 1.1*1.6 = 1.76
# expected100 = 2.1*2.6 = 5.46
expected = np.array([[[1.76]], [[5.46]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_one_dim_with_scale(self):
"""Tests that scores are multiplied by scale."""
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Key tensor of shape [1, 1, 1]
k = np.array([[[1.6]]], dtype=np.float32)
attention_layer = dense_attention.Attention(use_scale=True)
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
attention_layer.scale = -2.
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 1, 1].
# expected000 = -2*1.1*1.6 = -3.52
expected = np.array([[[-3.52]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_shape(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_shape_with_key(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v, k], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_multi_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v], mask=[None, v_mask])
# Expected scores of shape [1, 1, 3]
# scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))
# = 0.72908792234
# attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))
# = 0.27091207765
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8
# = 1.3561791301
expected = np.array([[[1.3561791301]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_key(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 1]
k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v, k], mask=[None, v_mask])
# Expected scores of shape [1, 1, 3]
# scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))
# = 0.72908792234
# attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))
# = 0.27091207765
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.72908792234 * 0.5 + 0.27091207765 * 0.8 - 0 * 0.3
# = 0.58127362329
expected = np.array([[[0.58127362329]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_query_mask(self):
# Query tensor of shape [1, 2, 1]
q = np.array([[[1.1], [-0.5]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Query mask tensor of shape [1, 2]
q_mask = np.array([[True, False]], dtype=np.bool_)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v], mask=[q_mask, v_mask])
# Expected scores of shape [1, 2, 3]
# scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8], [-0.5*1.6, -0.5*0.7, 0.5*0.8]]]
# = [[[1.76, 0.77, -0.88], [-0.8, -0.35, 0.4]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))
# = 0.72908792234
# attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))
# = 0.27091207765
# attention_distribution002 = 0
# => attention_distribution010 = exp(-0.8)/(exp(-0.8) + exp(-0.35))
# = 0.38936076605
# attention_distribution011 = exp(-0.35)/(exp(-0.8) + exp(-0.35))
# = 0.61063923394
# attention_distribution012 = 0
#
# Expected tensor of shape [1, 2, 1] with zeros where q_mask == False.
# expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8
# = 1.3561791301
# expected000 = 0
expected = np.array([[[1.3561791301], [0.]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_scale_None(self):
"""Tests that scale is None by default."""
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
self.assertIsNone(attention_layer.scale)
def test_scale_init_eager(self):
"""Tests that scale initializes to 1 when use_scale=True."""
with context.eager_mode():
attention_layer = dense_attention.Attention(use_scale=True)
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
self.assertAllClose(1., attention_layer.scale.value())
@test_util.deprecated_graph_mode_only
def test_scale_init_graph(self):
"""Tests that scale initializes to 1 when use_scale=True."""
with self.cached_session() as sess:
attention_layer = dense_attention.Attention(use_scale=True)
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
sess.run(attention_layer.scale.initializer)
self.assertAllClose(1., attention_layer.scale.value())
def test_self_attention_causal(self):
# Query-value tensor of shape [1, 3, 1]
q = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)
attention_layer = dense_attention.Attention(causal=True)
actual = attention_layer([q, q])
# Expected scores of shape [1, 3, 3]
# scores = [[0.25, 0.4, -0.15], [0.4, 0.64, -0.24], [-0.15, -0.24, 0.09]]
# Expected attention distribution = softmax(scores) lower triangular
# => attention_distribution00 = [1., 0., 0.]
# attention_distribution01
# = [exp(0.4), exp(0.64), 0.] / (exp(0.4) + exp(0.64))
# = [0.44028635073, 0.55971364926, 0.]
# attention_distribution02
# = [exp(-0.15), exp(-0.24), exp(0.09)]
# / (exp(-0.15) + exp(-0.24) + exp(0.09))
# = [0.31395396638, 0.28693232061, 0.399113713]
#
# Expected tensor of shape [1, 3, 1].
# expected000 = 0.5
# expected010 = 0.44028635073 * 0.5 + 0.55971364926 * 0.8
# = 0.66791409477
# expected020 = 0.31395396638 * 0.5 +0.28693232061 * 0.8 -0.399113713 * 0.3
# = 0.26678872577
expected = np.array(
[[[0.5], [0.66791409477], [0.26678872577]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_inputs_not_list(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
with self.assertRaisesRegexp(
ValueError, 'Attention layer must be called on a list of inputs'):
attention_layer(q)
def test_inputs_too_short(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
'Attention layer accepts inputs list of length 2 or 3'):
attention_layer([q])
def test_inputs_too_long(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
'Attention layer accepts inputs list of length 2 or 3'):
attention_layer([q, q, q, q])
def test_mask_not_list(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
mask = np.array([[True]], dtype=np.bool_)
with self.assertRaisesRegexp(
ValueError, 'Attention layer mask must be a list'):
attention_layer([q, q], mask=mask)
def test_mask_too_short(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
mask = np.array([[True]], dtype=np.bool_)
with self.assertRaisesRegexp(
ValueError, 'Attention layer mask must be a list of length 2'):
attention_layer([q, q], mask=[mask])
def test_mask_too_long(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
mask = np.array([[True]], dtype=np.bool_)
with self.assertRaisesRegexp(
ValueError, 'Attention layer mask must be a list of length 2'):
attention_layer([q, q], mask=[mask, mask, mask])
def test_override_mask(self):
attention_layer = dense_attention.Attention()
q = core.Masking()(np.array([[[1.1]]], dtype=np.float32))
mask = np.array([[False]], dtype=np.bool_)
actual = attention_layer([q, q], mask=[mask, mask])
self.assertAllClose([[[0]]], actual)
def test_implicit_mask(self):
attention_layer = dense_attention.Attention()
q = core.Masking(1.1)(np.array([[[1.1], [1]]], dtype=np.float32))
v = core.Masking(1.2)(np.array([[[1.2], [1]]], dtype=np.float32))
actual = attention_layer([q, v])
self.assertAllClose([[[0], [1]]], actual)
def test_serialization(self):
# Test serialization with use_scale
layer = dense_attention.Attention(use_scale=True)
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.use_scale, True)
config = layer.get_config()
new_layer = dense_attention.Attention.from_config(config)
self.assertEqual(new_layer.use_scale, True)
@test_util.run_all_in_graph_and_eager_modes
class AdditiveAttentionTest(test.TestCase):
def test_calculate_scores_one_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Key tensor of shape [1, 1, 1]
k = np.array([[[1.6]]], dtype=np.float32)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683
expected = np.array([[[0.49550372683]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_multi_dim(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4]))
# Scale tensor of shape [4]
attention_layer.scale = np.array([[[0.5, 0.6, 0.7, 0.8]]], dtype=np.float32)
actual = attention_layer._calculate_scores(query=q, key=k)
# pylint:disable=line-too-long
# expected000 = 0.5*tanh(1.+1.5) + 0.6*tanh(1.1+1.6) + 0.7*tanh(1.2+1.7) + 0.8*tanh(1.3+1.8) = 2.58044532581
# expected001 = 0.5*tanh(1.+2.5) + 0.6*tanh(1.1+2.6) + 0.7*tanh(1.2+2.7) + 0.8*tanh(1.3+2.8) = 2.59734317449
# expected002 = 0.5*tanh(1.+3.5) + 0.6*tanh(1.1+3.6) + 0.7*tanh(1.2+3.7) + 0.8*tanh(1.3+3.8) = 2.59964024652
# expected010 = 0.5*tanh(2.+1.5) + 0.6*tanh(2.1+1.6) + 0.7*tanh(2.2+1.7) + 0.8*tanh(2.3+1.8) = 2.59734317449
# expected011 = 0.5*tanh(2.+2.5) + 0.6*tanh(2.1+2.6) + 0.7*tanh(2.2+2.7) + 0.8*tanh(2.3+2.8) = 2.59964024652
# expected012 = 0.5*tanh(2.+3.5) + 0.6*tanh(2.1+3.6) + 0.7*tanh(2.2+3.7) + 0.8*tanh(2.3+3.8) = 2.59995130916
# pylint:enable=line-too-long
expected = np.array(
[[[2.58044532581, 2.59734317449, 2.59964024652],
[2.59734317449, 2.59964024652, 2.59995130916]]],
dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_one_dim_batch_size_two(self):
# Query tensor of shape [2, 1, 1]
q = np.array([[[1.1]], [[2.1]]], dtype=np.float32)
# Key tensor of shape [2, 1, 1]
k = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [2, 1, 1].
# expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683
# expected100 = 0.5 * tanh(2.1 + 2.6) = 0.49991728277
expected = np.array(
[[[0.49550372683]], [[0.49991728277]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_shape(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
actual = attention_layer([q, v], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_shape_no_scale(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention(use_scale=False)
actual = attention_layer([q, v], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_shape_with_key(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
actual = attention_layer([q, v, k], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_multi_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v], mask=[None, v_mask])
# pylint:disable=line-too-long
# Expected scores of shape [1, 1, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8
# = 1.15497245968
# pylint:enable=line-too-long
expected = np.array([[[1.15497245968]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_key(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 1]
k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v, k], mask=[None, v_mask])
# pylint:disable=line-too-long
# Expected scores of shape [1, 1, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.50552495521 * 0.5 + 0.49447504478 * 0.8 - 0 * 0.3
# = 0.64834251342
# pylint:enable=line-too-long
expected = np.array([[[0.64834251342]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_query_mask(self):
# Query tensor of shape [1, 2, 1]
q = np.array([[[1.1], [-0.5]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Query mask tensor of shape [1, 2]
q_mask = np.array([[True, False]], dtype=np.bool_)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v], mask=[q_mask, v_mask])
# pylint:disable=line-too-long
# Expected scores of shape [1, 2, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)],
# [0.5 * tanh(-0.5 + 1.6), 0.5 * tanh(-0.5 + 0.7), 0.5 * tanh(-0.5 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622],
# [0.40024951088, 0.09868766011, -0.43086157965]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
# => attention_distribution010
# = exp(0.40024951088)/(exp(0.40024951088) + exp(0.09868766011))
# = 0.57482427975
# attention_distribution011
# = exp(0.09868766011)/(exp(0.40024951088) + exp(0.09868766011))
# = 0.42517572025
# attention_distribution012 = 0
#
# Expected tensor of shape [1, 2, 1] with zeros where q_mask == False.
# expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8
# = 1.15497245968
# expected000 = 0
# pylint:enable=line-too-long
expected = np.array([[[1.15497245968], [0.]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_serialization(self):
# Test serialization with use_scale
layer = dense_attention.AdditiveAttention(use_scale=True)
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.use_scale, True)
config = layer.get_config()
new_layer = dense_attention.AdditiveAttention.from_config(config)
self.assertEqual(new_layer.use_scale, True)
@test_util.run_all_in_graph_and_eager_modes
class LowerTriangularMaskTest(test.TestCase):
def test_square_shape(self):
actual = dense_attention._lower_triangular_mask([3, 3])
expected = np.array(
[[True, False, False], [True, True, False], [True, True, True]],
dtype=np.bool_)
self.assertAllEqual(expected, actual)
def test_orthogonal_shape(self):
actual = dense_attention._lower_triangular_mask([3, 2])
expected = np.array(
[[True, False], [True, True], [True, True]], dtype=np.bool_)
self.assertAllEqual(expected, actual)
def test_three_dim(self):
actual = dense_attention._lower_triangular_mask([1, 3, 3])
expected = np.array(
[[[True, False, False], [True, True, False], [True, True, True]]],
dtype=np.bool_)
self.assertAllEqual(expected, actual)
if __name__ == '__main__':
test.main()
|
|
"""iptables helper functions.
Unlike the `firewall` module, these functions know nothing about PaaSTA and
could effectively be a third-party library. They just make working with
iptables a little bit easier.
"""
import collections
import contextlib
import logging
import iptc
log = logging.getLogger(__name__)
RULE_TARGET_SORT_ORDER = {
# all else defaults to '0'
'LOG': 1,
'REJECT': 2.,
}
_RuleBase = collections.namedtuple(
'Rule', (
'protocol',
'src',
'dst',
'target',
'matches',
'target_parameters',
),
)
class Rule(_RuleBase):
"""Rule representation.
Working with iptc's rule classes directly doesn't work well, since rules
represent actual existing iptables rules, and changes are applied
immediately. They're also difficult to compare.
"""
def __new__(cls, *args, **kwargs):
result = _RuleBase.__new__(cls, *args, **kwargs)
result.validate()
return result
def _replace(self, **kwargs):
result = super(Rule, self)._replace(**kwargs)
result.validate()
return result
def validate(self):
if self.target == 'REJECT':
assert any(
name == 'reject-with' for name, _ in self.target_parameters
), 'REJECT rules must specify reject-with'
assert tuple(sorted(self.matches)) == self.matches, 'matches should be sorted'
for match_name, params in self.matches:
for param_name, param_value in params:
assert '_' not in param_name, 'use dashes instead of underscores in {}'.format(param_name)
assert isinstance(param_value, tuple), 'value of {} should be tuple'.format(param_name)
assert tuple(sorted(self.target_parameters)) == self.target_parameters, 'target_parameters should be sorted'
for param_name, param_value in self.target_parameters:
assert '_' not in param_name, 'use dashes instead of underscores in {}'.format(param_name)
assert isinstance(param_value, tuple), 'value of {} should be tuple'.format(param_name)
@classmethod
def from_iptc(cls, rule):
fields = {
'protocol': rule.protocol,
'src': rule.src,
'dst': rule.dst,
'target': rule.target.name,
'matches': (),
'target_parameters': (),
}
for param_name, param_value in sorted(rule.target.get_all_parameters().items()):
fields['target_parameters'] += ((param_name, tuple(param_value)),)
matches = []
for match in rule.matches:
matches.append((
match.name,
tuple((param, tuple(value)) for param, value in sorted(match.get_all_parameters().items())),
))
# ensure that matches are sorted for consistency with matching
fields['matches'] = tuple(sorted(matches))
return cls(**fields)
def to_iptc(self):
rule = iptc.Rule()
rule.protocol = self.protocol
rule.src = self.src
rule.dst = self.dst
target = rule.create_target(self.target)
for param_name, param_value in self.target_parameters:
target.set_parameter(param_name, param_value)
for name, params in self.matches:
match = rule.create_match(name)
for param_name, param_value in params:
match.set_parameter(param_name, param_value)
return rule
@contextlib.contextmanager
def iptables_txn(table):
"""Temporarily disable autocommit and commit at the end.
If an exception occurs, changes are rolled back.
By default, changes to iptables rules are applied immediately. In some
cases, we want to avoid that.
https://github.com/ldx/python-iptables#autocommit
"""
assert table.autocommit is True, table.autocommit
try:
table.autocommit = False
yield
table.commit()
finally:
table.refresh()
table.autocommit = True
class ChainDoesNotExist(Exception):
pass
def all_chains():
return {chain.name for chain in iptc.Table(iptc.Table.FILTER).chains}
def ensure_chain(chain, rules):
"""Idempotently ensure a chain exists and has an exact set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
"""
try:
current_rules = set(list_chain(chain))
except ChainDoesNotExist:
create_chain(chain)
current_rules = set()
for rule in rules:
if rule not in current_rules:
insert_rule(chain, rule)
extra_rules = current_rules - set(rules)
if extra_rules:
delete_rules(chain, extra_rules)
def _rule_sort_key(rule_tuple):
old_index, rule = rule_tuple
target_name = rule.target
return (RULE_TARGET_SORT_ORDER.get(target_name, 0), old_index)
def reorder_chain(chain_name):
"""Ensure that any REJECT rules are last, and any LOG rules are second-to-last
"""
table = iptc.Table(iptc.Table.FILTER)
with iptables_txn(table):
rules = list_chain(chain_name)
chain = iptc.Chain(table, chain_name)
# sort the rules by rule_key, which uses (RULE_TARGET_SORT_ORDER, idx)
sorted_rules_with_indices = sorted(enumerate(rules), key=_rule_sort_key)
for new_index, (old_index, rule) in enumerate(sorted_rules_with_indices):
if new_index == old_index:
continue
log.debug('reordering chain {} rule {} to #{}'.format(chain_name, rule, new_index))
chain.replace_rule(rule.to_iptc(), new_index)
def ensure_rule(chain, rule):
rules = list_chain(chain)
if rule not in rules:
insert_rule(chain, rule)
def insert_rule(chain_name, rule):
log.debug('adding rule to {}: {}'.format(chain_name, rule))
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), chain_name)
chain.insert_rule(rule.to_iptc())
def delete_rules(chain_name, rules):
log.debug('deleting rules from {}: {}'.format(chain_name, rules))
table = iptc.Table(iptc.Table.FILTER)
with iptables_txn(table):
chain = iptc.Chain(table, chain_name)
for potential_rule in chain.rules:
if Rule.from_iptc(potential_rule) in rules:
chain.delete_rule(potential_rule)
def create_chain(chain_name):
log.debug('creating chain: {}'.format(chain_name))
iptc.Table(iptc.Table.FILTER).create_chain(chain_name)
def delete_chain(chain_name):
log.debug('deleting chain: {}'.format(chain_name))
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), chain_name)
chain.flush()
chain.delete()
def list_chain(chain_name):
"""List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
"""
table = iptc.Table(iptc.Table.FILTER)
chain = iptc.Chain(table, chain_name)
# TODO: is there any way to do this without listing all chains? (probably slow)
# If the chain doesn't exist, chain.rules will be an empty list, so we need
# to make sure the chain actually _does_ exist.
if chain in table.chains:
return tuple(Rule.from_iptc(rule) for rule in chain.rules)
else:
raise ChainDoesNotExist(chain_name)
|
|
# LIBTBX_SET_DISPATCHER_NAME kamo.resolve_indexing_ambiguity
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
from yamtbx.dataproc.auto.multi_merging.resolve_reindex import ReferenceBased, BrehmDiederichs, KabschSelectiveBreeding
from yamtbx.util import read_path_list
from libtbx.utils import multi_out
import iotbx.phil
import libtbx.phil
import sys
import os
master_params_str = """
lstin = None
.type = path
.help = list of XDS_ASCII.HKL
method = brehm_diederichs *selective_breeding reference
.type = choice(multi=False)
.help = Method to resolve ambiguity
logfile = "reindexing.log"
.type = path
.help = logfile name
nproc = 1
.type = int
.help = number of processors
dry_run = False
.type = bool
.help = If true, do not modify files
skip_bad_files = False
.type = bool
.help = "Set true if you want to ignore bad files (too few reflections)"
d_min = 3
.type = float
.help = high resolution cutoff used in the method
min_ios = None
.type = float
.help = minimum I/sigma(I) cutoff used in the method
max_delta = 5
.type = float
.help = maximum obliquity used in determining the lattice symmetry, using a modified Le-Page algorithm.
max_cycles = 100
.type = int(value_min=1)
.help = Maximum number of cycles for selective_breeding algorithm.
reference_file = None
.type = path
.help = Only needed when method=reference
reference_label = None
.type = str
.help = data label of reference_file
"""
def run(params):
log_out = multi_out()
log_out.register("log", open(params.logfile, "w"), atexit_send_to=None)
log_out.register("stdout", sys.stdout)
libtbx.phil.parse(master_params_str).format(params).show(out=log_out, prefix=" ")
xac_files = read_path_list(params.lstin, only_exists=True, err_out=log_out)
if len(xac_files) == 0:
print >>log_out, "No (existing) files in the list: %s" % params.lstin
return
if params.method == "brehm_diederichs":
rb = BrehmDiederichs(xac_files, max_delta=params.max_delta,
d_min=params.d_min, min_ios=params.min_ios,
nproc=params.nproc, log_out=log_out)
elif params.method == "selective_breeding":
rb = KabschSelectiveBreeding(xac_files, max_delta=params.max_delta,
d_min=params.d_min, min_ios=params.min_ios,
nproc=params.nproc, log_out=log_out)
elif params.method == "reference":
import iotbx.file_reader
ref_file = iotbx.file_reader.any_file(params.reference_file)
if ref_file.file_type == "hkl":
ref_arrays = ref_file.file_server.miller_arrays
if not ref_arrays:
raise "No arrays in reference file"
if params.reference_label is not None:
ref_arrays = filter(lambda x: params.reference_label in x.info().labels, ref_arrays)
if not ref_arrays: raise "No arrays matched to specified label (%s)" % params.reference_label
ref_array = ref_arrays[0].as_intensity_array()
else:
ref_array = None
for array in ref_arrays:
if array.is_xray_intensity_array():
ref_array = array
print >>log_out, "Using %s as reference data" % array.info().label_string()
break
elif array.is_xray_amplitude_array():
ref_array = array.f_as_f_sq()
print >>log_out, "Using %s as reference data" % array.info().label_string()
break
elif ref_file.file_type == "pdb":
import mmtbx.utils
xrs = ref_file.file_content.xray_structure_simple()
fmodel_params = mmtbx.command_line.fmodel.fmodel_from_xray_structure_master_params.extract()
fmodel_params.fmodel.k_sol = 0.35
fmodel_params.fmodel.b_sol = 50
fmodel_params.high_resolution = params.d_min
ref_array = mmtbx.utils.fmodel_from_xray_structure(xray_structure=xrs, params=fmodel_params).f_model.as_intensity_array()
else:
raise "input file type invalid"
if ref_array is None:
raise "suitable reference data not found"
rb = ReferenceBased(xac_files, ref_array, max_delta=params.max_delta,
d_min=params.d_min, min_ios=params.min_ios,
nproc=params.nproc, log_out=log_out)
else:
raise "Unknown method: %s" % params.method
if rb.bad_files:
print "%s: %d bad files are included:" % ("WARNING" if params.skip_bad_files else "ERROR", len(rb.bad_files))
for f in rb.bad_files: print " %s" % f
if not params.skip_bad_files:
print
print "You may want to change d_min= or min_ios= parameters to include these files."
print "Alternatively, specify skip_bad_files=true to ignore these files (they are not included in output files)"
return
if params.method == "selective_breeding":
rb.assign_operators(max_cycle=params.max_cycles)
else:
rb.assign_operators()
rb.show_assign_summary()
if params.dry_run:
print >>log_out, "This is dry-run. Exiting here."
else:
out_prefix = os.path.splitext(os.path.basename(params.lstin))[0]
ofs_cell = open(out_prefix+"_reindexed_cells.dat", "w")
new_files = rb.modify_xds_ascii_files(cells_dat_out=ofs_cell)
lstout = out_prefix + "_reindexed.lst"
ofs = open(lstout, "w")
ofs.write("\n".join(new_files)+"\n")
ofs.close()
print >>log_out, "Reindexing done. For merging, use %s instead!" % lstout
if params.method == "brehm_diederichs":
print >>log_out, """
CCTBX-implementation (by Richard Gildea) of the "algorithm 2" of the following paper was used.
For publication, please cite:
Brehm, W. and Diederichs, K. Breaking the indexing ambiguity in serial crystallography.
Acta Cryst. (2014). D70, 101-109
http://dx.doi.org/10.1107/S1399004713025431"""
elif params.method == "selective_breeding":
print >>log_out, """
"Selective breeding" algorithm was used. For publication, please cite:
Kabsch, W. Processing of X-ray snapshots from crystals in random orientations.
Acta Cryst. (2014). D70, 2204-2216
http://dx.doi.org/10.1107/S1399004714013534"""
# run()
def show_help():
print """
Use this command to resolve indexing ambiguity
Case 1) Reference-based (when you have isomorphous data)
kamo.resolve_indexing_ambiguity formerge.lst method=reference reference_file=yourdata.mtz [d_min=3]
Case 2) Using selective-breeding algorithm (when you don't have reference data)
kamo.resolve_indexing_ambiguity formerge.lst method=selective_breeding [d_min=3]
Case 3) Using Brehm & Diederichs algorithm (when you don't have reference data)
kamo.resolve_indexing_ambiguity formerge.lst method=brehm_diederichs [d_min=3]
You can also give min_ios= to cutoff data by I/sigma(I).
"""
iotbx.phil.parse(master_params_str).show(prefix=" ", attributes_level=1)
print
# show_help()
if __name__ == "__main__":
import sys
if "-h" in sys.argv or "--help" in sys.argv:
show_help()
quit()
cmdline = iotbx.phil.process_command_line(args=sys.argv[1:],
master_string=master_params_str)
params = cmdline.work.extract()
args = cmdline.remaining_args
for arg in args:
if os.path.isfile(arg) and params.lstin is None:
params.lstin = arg
if params.lstin is None:
show_help()
print "Error: Give .lst of XDS_ASCII files"
quit()
if params.method is None:
show_help()
print "Error: Give method="
quit()
if params.method == "reference" and params.reference_file is None:
show_help()
print "Error: Give reference_file= when you use params.method=reference"
quit()
if params.method == "brehm_diederichs" and params.reference_file is not None:
show_help()
print "Error: You can't give reference_file= when you use params.method=brehm_diederichs"
quit()
run(params)
|
|
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import poplib
import time
from mailbox import Mailbox, Message
import mailpile.mailboxes
from mailpile.conn_brokers import Master as ConnBroker
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.mailboxes import UnorderedPicklable
from mailpile.util import *
class UnsupportedProtocolError(Exception):
pass
class POP3Mailbox(Mailbox):
"""
Basic implementation of POP3 Mailbox.
"""
def __init__(self, host,
user=None, password=None, use_ssl=True, port=None,
debug=False, conn_cls=None):
"""Initialize a Mailbox instance."""
Mailbox.__init__(self, '/')
self.host = host
self.user = user
self.password = password
self.use_ssl = use_ssl
self.port = port
self.debug = debug
self.conn_cls = conn_cls
self._lock = MboxRLock()
self._pop3 = None
self._connect()
def _connect(self):
with self._lock:
if self._pop3:
try:
self._pop3.noop()
return
except poplib.error_proto:
self._pop3 = None
with ConnBroker.context(need=[ConnBroker.OUTGOING_POP3]):
if self.conn_cls:
self._pop3 = self.conn_cls(self.host, self.port or 110)
self.secure = self.use_ssl
elif self.use_ssl:
self._pop3 = poplib.POP3_SSL(self.host, self.port or 995)
self.secure = True
else:
self._pop3 = poplib.POP3(self.host, self.port or 110)
self.secure = False
if self.debug:
self._pop3.set_debuglevel(self.debug)
self._keys = None
try:
self._pop3.user(self.user)
self._pop3.pass_(self.password)
except poplib.error_proto:
raise AccessError()
def _refresh(self):
with self._lock:
self._keys = None
self.iterkeys()
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def _get(self, key):
with self._lock:
if key not in self.iterkeys():
raise KeyError('Invalid key: %s' % key)
self._connect()
ok, lines, octets = self._pop3.retr(self._km[key])
if not ok.startswith('+OK'):
raise KeyError('Invalid key: %s' % key)
# poplib is stupid in that it loses the linefeeds, so we need to
# do some guesswork to bring them back to what the server provided.
# If we don't do this jiggering, then sizes don't match up, which
# could cause allocation bugs down the line.
have_octets = sum(len(l) for l in lines)
if octets == have_octets + len(lines):
lines.append('')
return '\n'.join(lines)
elif octets == have_octets + 2*len(lines):
lines.append('')
return '\r\n'.join(lines)
elif octets == have_octets + len(lines) - 1:
return '\n'.join(lines)
elif octets == have_octets + 2*len(lines) - 2:
return '\r\n'.join(lines)
else:
raise ValueError('Length mismatch in message %s' % key)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
return Message(self._get(key))
def get_bytes(self, key):
"""Return a byte string representation or raise a KeyError."""
return self._get(key)
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
return StringIO.StringIO(self._get(key))
def get_msg_size(self, key):
with self._lock:
self._connect()
if key not in self.iterkeys():
raise KeyError('Invalid key: %s' % key)
ok, info, octets = self._pop3.list(self._km[key]).split()
return int(octets)
def stat(self):
with self._lock:
self._connect()
return self._pop3.stat()
def iterkeys(self):
"""Return an iterator over keys."""
# Note: POP3 *without UIDL* is useless. We don't support it.
with self._lock:
if self._keys is None:
self._connect()
try:
stat, key_list, octets = self._pop3.uidl()
except poplib.error_proto:
raise UnsupportedProtocolError()
self._keys = [tuple(k.split(' ', 1)) for k in key_list]
self._km = dict([reversed(k) for k in self._keys])
return [k[1] for k in self._keys]
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
return key in self.iterkeys()
def __len__(self):
"""Return a count of messages in the mailbox."""
return len(self.iterkeys())
def flush(self):
"""Write any pending changes to the disk."""
self.close()
def close(self):
"""Flush and close the mailbox."""
try:
if self._pop3:
self._pop3.quit()
finally:
self._pop3 = None
self._keys = None
class MailpileMailbox(UnorderedPicklable(POP3Mailbox)):
UNPICKLABLE = ['_pop3', '_debug']
@classmethod
def parse_path(cls, config, path, create=False):
path = path.split('/')
if path and path[0].lower() in ('pop:', 'pop3:',
'pop3_ssl:', 'pop3s:'):
proto = path[0][:-1].lower()
userpart, server = path[2].rsplit("@", 1)
user, password = userpart.rsplit(":", 1)
if ":" in server:
server, port = server.split(":", 1)
else:
port = 995 if ('s' in proto) else 110
# This is a hack for GMail
if 'recent' in path[3:]:
user = 'recent:' + user
if not config:
debug = False
elif 'pop3' in config.sys.debug:
debug = 99
elif 'rescan' in config.sys.debug:
debug = 1
else:
debug = False
# WARNING: Order must match POP3Mailbox.__init__(...)
return (server, user, password, 's' in proto, int(port), debug)
raise ValueError('Not a POP3 url: %s' % path)
def save(self, *args, **kwargs):
# Do not save state locally
pass
##[ Test code follows ]#######################################################
if __name__ == "__main__":
import doctest
import sys
class _MockPOP3(object):
"""
Base mock that pretends to be a poplib POP3 connection.
>>> pm = POP3Mailbox('localhost', user='bad', conn_cls=_MockPOP3)
Traceback (most recent call last):
...
AccessError
>>> pm = POP3Mailbox('localhost', user='a', password='b',
... conn_cls=_MockPOP3)
>>> pm.stat()
(2, 123456)
>>> pm.iterkeys()
['evil', 'good']
>>> 'evil' in pm, 'bogon' in pm
(True, False)
>>> [msg['subject'] for msg in pm]
['Msg 1', 'Msg 2']
>>> pm.get_msg_size('evil'), pm.get_msg_size('good')
(47, 51)
>>> pm.get_bytes('evil')
'From: test@mailpile.is\\nSubject: Msg 1\\n\\nOh, hi!\\n'
>>> pm['invalid-key']
Traceback (most recent call last):
...
KeyError: ...
"""
TEST_MSG = ('From: test@mailpile.is\r\n'
'Subject: Msg N\r\n'
'\r\n'
'Oh, hi!\r\n')
DEFAULT_RESULTS = {
'user': lambda s, u: '+OK' if (u == 'a') else '-ERR',
'pass_': lambda s, u: '+OK Logged in.' if (u == 'b') else '-ERR',
'stat': (2, 123456),
'noop': '+OK',
'list_': lambda s: ('+OK 2 messages:',
['1 %d' % len(s.TEST_MSG.replace('\r', '')),
'2 %d' % len(s.TEST_MSG)], 0),
'uidl': ('+OK', ['1 evil', '2 good'], 0),
'retr': lambda s, m: ('+OK',
s.TEST_MSG.replace('N', m).splitlines(),
len(s.TEST_MSG)
if m[0] == '2' else
len(s.TEST_MSG.replace('\r', ''))),
}
RESULTS = {}
def __init__(self, *args, **kwargs):
def mkcmd(rval):
def r(rv):
if isinstance(rv, (str, unicode)) and rv[0] != '+':
raise poplib.error_proto(rv)
return rv
def cmd(*args, **kwargs):
if isinstance(rval, (str, unicode, list, tuple, dict)):
return r(rval)
else:
return r(rval(self, *args, **kwargs))
return cmd
for cmd, rval in dict_merge(self.DEFAULT_RESULTS, self.RESULTS
).iteritems():
self.__setattr__(cmd, mkcmd(rval))
def list(self, which=None):
msgs = self.list_()
if which:
return '+OK ' + msgs[1][1-int(which)]
return msgs
def __getattr__(self, attr):
return self.__getattribute__(attr)
class _MockPOP3_Without_UIDL(_MockPOP3):
"""
Mock that lacks the UIDL command.
>>> pm = POP3Mailbox('localhost', user='a', password='b',
... conn_cls=_MockPOP3_Without_UIDL)
>>> pm.iterkeys()
Traceback (most recent call last):
...
UnsupportedProtocolError
"""
RESULTS = {'uidl': '-ERR'}
results = doctest.testmod(optionflags=doctest.ELLIPSIS,
extraglobs={})
print '%s' % (results, )
if results.failed:
sys.exit(1)
if len(sys.argv) > 1:
mbx = MailpileMailbox(*MailpileMailbox.parse_path(None, sys.argv[1]))
print 'Status is: %s' % (mbx.stat(), )
print 'Downloading mail and listing subjects, hit CTRL-C to quit'
for msg in mbx:
print msg['subject']
time.sleep(2)
else:
mailpile.mailboxes.register(10, MailpileMailbox)
|
|
"""Support for water heater devices."""
from datetime import timedelta
import logging
import functools as ft
import voluptuous as vol
from homeassistant.helpers.temperature import display_temp as show_temp
from homeassistant.util.temperature import convert as convert_temperature
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import ( # noqa
PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE)
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, SERVICE_TURN_ON, SERVICE_TURN_OFF,
STATE_ON, STATE_OFF, TEMP_CELSIUS, PRECISION_WHOLE,
PRECISION_TENTHS, TEMP_FAHRENHEIT)
DEFAULT_MIN_TEMP = 110
DEFAULT_MAX_TEMP = 140
DOMAIN = 'water_heater'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SCAN_INTERVAL = timedelta(seconds=60)
SERVICE_SET_AWAY_MODE = 'set_away_mode'
SERVICE_SET_TEMPERATURE = 'set_temperature'
SERVICE_SET_OPERATION_MODE = 'set_operation_mode'
STATE_ECO = 'eco'
STATE_ELECTRIC = 'electric'
STATE_PERFORMANCE = 'performance'
STATE_HIGH_DEMAND = 'high_demand'
STATE_HEAT_PUMP = 'heat_pump'
STATE_GAS = 'gas'
SUPPORT_TARGET_TEMPERATURE = 1
SUPPORT_OPERATION_MODE = 2
SUPPORT_AWAY_MODE = 4
ATTR_MAX_TEMP = 'max_temp'
ATTR_MIN_TEMP = 'min_temp'
ATTR_AWAY_MODE = 'away_mode'
ATTR_OPERATION_MODE = 'operation_mode'
ATTR_OPERATION_LIST = 'operation_list'
ATTR_TARGET_TEMP_HIGH = 'target_temp_high'
ATTR_TARGET_TEMP_LOW = 'target_temp_low'
ATTR_CURRENT_TEMPERATURE = 'current_temperature'
CONVERTIBLE_ATTRIBUTE = [
ATTR_TEMPERATURE,
]
_LOGGER = logging.getLogger(__name__)
ON_OFF_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
})
SET_AWAY_MODE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Required(ATTR_AWAY_MODE): cv.boolean,
})
SET_TEMPERATURE_SCHEMA = vol.Schema(vol.All(
{
vol.Required(ATTR_TEMPERATURE, 'temperature'): vol.Coerce(float),
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Optional(ATTR_OPERATION_MODE): cv.string,
}
))
SET_OPERATION_MODE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Required(ATTR_OPERATION_MODE): cv.string,
})
async def async_setup(hass, config):
"""Set up water_heater devices."""
component = hass.data[DOMAIN] = \
EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_SET_AWAY_MODE, SET_AWAY_MODE_SCHEMA,
async_service_away_mode
)
component.async_register_entity_service(
SERVICE_SET_TEMPERATURE, SET_TEMPERATURE_SCHEMA,
async_service_temperature_set
)
component.async_register_entity_service(
SERVICE_SET_OPERATION_MODE, SET_OPERATION_MODE_SCHEMA,
'async_set_operation_mode'
)
component.async_register_entity_service(
SERVICE_TURN_OFF, ON_OFF_SERVICE_SCHEMA,
'async_turn_off'
)
component.async_register_entity_service(
SERVICE_TURN_ON, ON_OFF_SERVICE_SCHEMA,
'async_turn_on'
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class WaterHeaterDevice(Entity):
"""Representation of a water_heater device."""
@property
def state(self):
"""Return the current state."""
return self.current_operation
@property
def precision(self):
"""Return the precision of the system."""
if self.hass.config.units.temperature_unit == TEMP_CELSIUS:
return PRECISION_TENTHS
return PRECISION_WHOLE
@property
def state_attributes(self):
"""Return the optional state attributes."""
data = {
ATTR_CURRENT_TEMPERATURE: show_temp(
self.hass, self.current_temperature, self.temperature_unit,
self.precision),
ATTR_MIN_TEMP: show_temp(
self.hass, self.min_temp, self.temperature_unit,
self.precision),
ATTR_MAX_TEMP: show_temp(
self.hass, self.max_temp, self.temperature_unit,
self.precision),
ATTR_TEMPERATURE: show_temp(
self.hass, self.target_temperature, self.temperature_unit,
self.precision),
ATTR_TARGET_TEMP_HIGH: show_temp(
self.hass, self.target_temperature_high, self.temperature_unit,
self.precision),
ATTR_TARGET_TEMP_LOW: show_temp(
self.hass, self.target_temperature_low, self.temperature_unit,
self.precision),
}
supported_features = self.supported_features
if supported_features & SUPPORT_OPERATION_MODE:
data[ATTR_OPERATION_MODE] = self.current_operation
if self.operation_list:
data[ATTR_OPERATION_LIST] = self.operation_list
if supported_features & SUPPORT_AWAY_MODE:
is_away = self.is_away_mode_on
data[ATTR_AWAY_MODE] = STATE_ON if is_away else STATE_OFF
return data
@property
def temperature_unit(self):
"""Return the unit of measurement used by the platform."""
raise NotImplementedError
@property
def current_operation(self):
"""Return current operation ie. eco, electric, performance, ..."""
return None
@property
def operation_list(self):
"""Return the list of available operation modes."""
return None
@property
def current_temperature(self):
"""Return the current temperature."""
return None
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return None
@property
def target_temperature_high(self):
"""Return the highbound target temperature we try to reach."""
return None
@property
def target_temperature_low(self):
"""Return the lowbound target temperature we try to reach."""
return None
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return None
def set_temperature(self, **kwargs):
"""Set new target temperature."""
raise NotImplementedError()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
await self.hass.async_add_executor_job(
ft.partial(self.set_temperature, **kwargs))
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
raise NotImplementedError()
async def async_set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
await self.hass.async_add_executor_job(self.set_operation_mode,
operation_mode)
def turn_away_mode_on(self):
"""Turn away mode on."""
raise NotImplementedError()
async def async_turn_away_mode_on(self):
"""Turn away mode on."""
await self.hass.async_add_executor_job(self.turn_away_mode_on)
def turn_away_mode_off(self):
"""Turn away mode off."""
raise NotImplementedError()
async def async_turn_away_mode_off(self):
"""Turn away mode off."""
await self.hass.async_add_executor_job(self.turn_away_mode_off)
@property
def supported_features(self):
"""Return the list of supported features."""
raise NotImplementedError()
@property
def min_temp(self):
"""Return the minimum temperature."""
return convert_temperature(DEFAULT_MIN_TEMP, TEMP_FAHRENHEIT,
self.temperature_unit)
@property
def max_temp(self):
"""Return the maximum temperature."""
return convert_temperature(DEFAULT_MAX_TEMP, TEMP_FAHRENHEIT,
self.temperature_unit)
async def async_service_away_mode(entity, service):
"""Handle away mode service."""
if service.data[ATTR_AWAY_MODE]:
await entity.async_turn_away_mode_on()
else:
await entity.async_turn_away_mode_off()
async def async_service_temperature_set(entity, service):
"""Handle set temperature service."""
hass = entity.hass
kwargs = {}
for value, temp in service.data.items():
if value in CONVERTIBLE_ATTRIBUTE:
kwargs[value] = convert_temperature(
temp,
hass.config.units.temperature_unit,
entity.temperature_unit
)
else:
kwargs[value] = temp
await entity.async_set_temperature(**kwargs)
|
|
#!/usr/bin/python3
# vim: sw=4 et
# Copyright (c) 2016 SUSE LINUX GmbH, Nuernberg, Germany.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function
import argparse
import glob
import os
import platform
import shutil
import sys
import tempfile
import time
import pymod2pkg
import sh
from sh import Command
def pymodule2pkg(spectemplate):
specname = os.path.splitext(spectemplate)[0]
modulename = os.path.splitext(os.path.basename(specname))[0]
pkgname = pymod2pkg.module2package(
modulename, platform.linux_distribution()[0] or 'suse')
if modulename == 'openstack-macros':
pkgname = modulename
return pkgname
def get_osc_user():
import osc.conf
osc.conf.get_config()
return osc.conf.get_apiurl_usr(osc.conf.config['apiurl'])
def upload_meta(project, build_repository, linkproject):
projectlink = ''
if linkproject:
projectlink = '<link project="%s"/>\n' % linkproject
description = ''
if linkproject:
if 'ZUUL_UUID' in os.environ:
description = """
This project tests the following Zuul Change IDs: %(ZUUL_CHANGE_IDS)s\n
Branch used: %(ZUUL_BRANCH)s\n
Project used: %(ZUUL_PROJECT)s
""" % (os.environ)
templ = """
<project name="%(project)s">
<title>Autogenerated CI project</title>
<description>
%(description)s
</description>
<url>
%(url)s
</url>
%(projectlink)s
<person userid="%(user)s" role="maintainer"/>
<publish>
<disable/>
</publish>
%(build_repository)s
</project>""" % ({'project': project,
'user': get_osc_user(),
'description': description,
'url': os.environ.get('BUILD_URL'),
'projectlink': projectlink,
'build_repository': build_repository})
with tempfile.NamedTemporaryFile() as meta:
meta.write(templ.encode('UTF-8'))
meta.flush()
print('Updating meta for ', project)
# work around build service bug that forgets the publish flag
# https://github.com/openSUSE/open-build-service/issues/7126
for success_counter in range(2):
# work around build service bug that triggers a database deadlock
for fail_counter in range(1, 5):
try:
sh.osc('api', '-T', meta.name,
'/source/%s/_meta' % project)
break
except sh.ErrorReturnCode_1:
# Sleep a bit and try again. This has not been
# scientifically proven to be the correct sleep factor,
# but it seems to work
time.sleep(2)
continue
# wait for the source service to catch up with creation
if success_counter == 0:
# Sleep a bit and try again. This has not been scientifically
# proven to be the correct sleep factor, but it seems to work
time.sleep(3)
def upload_meta_enable_repository(project, linkproject):
repository = """
<repository name="standard" %(repoflags)s>
<path project="%(linkproject)s" repository="standard"/>
<arch>x86_64</arch>
</repository>
""" % ({'linkproject': linkproject,
'repoflags': 'rebuild="direct" block="local" linkedbuild="localdep"'})
upload_meta(project, repository, linkproject)
def freeze_project(project):
"""Generate a _frozenlink file for the project"""
result = sh.osc('api', '-X', 'POST', '/source/%s?cmd=freezelink' % project)
if '<status code="ok" />' not in result:
print('WARNING: freeze the project fails: %s' % result)
def create_new_build_project(workdir, project, linkproject):
sh.mkdir('-p', workdir)
olddir = os.getcwd()
try:
os.chdir(workdir)
if linkproject:
upload_meta_enable_repository(project, linkproject)
freeze_project(project)
sh.osc('init', project)
finally:
os.chdir(olddir)
def generate_pkgspec(pkgoutdir, spectemplate, pkgname):
obsservicedir = '/usr/lib/obs/service/'
outdir = ('--outdir', pkgoutdir)
olddir = os.getcwd()
try:
os.chdir(pkgoutdir)
renderspec = Command(os.path.join(obsservicedir, 'renderspec'))
renderspec(
'--input-template', os.path.join(olddir, spectemplate),
'--output-name', pkgname + '.spec', *outdir)
format_spec_file = Command(
os.path.join(obsservicedir, 'format_spec_file'))
format_spec_file(*outdir)
# configure a download cache to avoid downloading the same files
download_env = os.environ.copy()
download_env["CACHEDIRECTORY"] = os.path.join(
os.path.expanduser("~"), ".cache", "download_files")
download_files = Command(os.path.join(obsservicedir, 'download_files'))
download_files(_env=download_env, *outdir)
finally:
os.chdir(olddir)
def osc_mkpac(workdir, packagename):
olddir = os.getcwd()
try:
os.chdir(workdir)
sh.osc('mkpac', packagename)
finally:
os.chdir(olddir)
def spec_is_modified(pkgoutdir, project, pkgname):
specname = pkgname + ".spec"
cached_spec = os.path.join(pkgoutdir, '.osc', specname)
cleanup = False
if not os.path.exists(cached_spec):
cleanup = True
sh.osc('api', '/source/%s/%s/%s.spec' % (
project, pkgname, pkgname), _out=cached_spec)
r = sh.cmp(
'-s', os.path.join(pkgoutdir, specname), cached_spec, _ok_code=[0, 1])
if cleanup:
os.remove(cached_spec)
return r.exit_code == 1
def osc_detachbranch(workdir, project, pkgname):
olddir = os.getcwd()
try:
os.chdir(os.path.join(workdir))
sh.osc('detachbranch', project, pkgname)
os.mkdir(pkgname + '.b')
for f in glob.glob(os.path.join(pkgname, '*')):
os.rename(f, os.path.join(pkgname + '.b', os.path.basename(f)))
sh.rm('-rf', pkgname)
sh.osc('co', pkgname)
for f in glob.glob(os.path.join(pkgname + '.b', '*')):
dst = os.path.basename(f)
try:
os.unlink(os.path.join(pkgname, dst))
except OSError:
pass
os.rename(f, os.path.join(pkgname, dst))
os.rmdir(pkgname + '.b')
finally:
os.chdir(olddir)
def osc_commit_all(workdir, packagename):
olddir = os.getcwd()
try:
os.chdir(os.path.join(workdir, packagename))
sh.osc('addremove')
for o in sh.osc('service', 'localrun', 'source_validator'):
if o.startswith('###ASK'):
sh.osc('rm', '--force', o.strip().split()[1])
sh.osc('commit', '--noservice', '-n')
finally:
os.chdir(olddir)
def copy_extra_sources(specdir, pkgoutdir):
for f in glob.glob(os.path.join(specdir, '*')):
if f.endswith(".j2"):
continue
shutil.copy2(f, pkgoutdir)
def create_project(worktree, project, linkproject):
workdir = os.path.join(os.getcwd(), 'out')
sh.rm('-rf', workdir)
create_new_build_project(workdir, project, linkproject)
try:
existing_pkgs = [x.strip() for x in
sh.osc('ls', '-e', project, _iter=True)]
except Exception:
existing_pkgs = []
alive_pkgs = set()
worktree_pattern = os.path.join(worktree, 'openstack', '*', '*.spec.j2')
for spectemplate in sorted(glob.glob(worktree_pattern)):
pkgname = pymodule2pkg(spectemplate)
alive_pkgs.add(pkgname)
print(pkgname)
sys.stdout.flush()
pkgoutdir = os.path.join(workdir, pkgname)
osc_mkpac(workdir, pkgname)
copy_extra_sources(os.path.dirname(spectemplate), pkgoutdir)
generate_pkgspec(
pkgoutdir,
spectemplate, pkgname)
if pkgname in existing_pkgs:
if spec_is_modified(pkgoutdir, project, pkgname):
osc_detachbranch(workdir, project, pkgname)
print("Committing update to %s" % pkgname)
osc_commit_all(workdir, pkgname)
else:
print("Adding new pkg %s" % pkgname)
osc_commit_all(workdir, pkgname)
# remove no longer alive pkgs
for i in existing_pkgs:
if not linkproject and i not in alive_pkgs:
print("Removing outdated ", i)
sh.osc('rdelete', '-m', 'x', project, i)
def main():
parser = argparse.ArgumentParser(
description='Build a testproject for a given rpm-packaging checkout')
parser.add_argument('worktree',
help='directory with a rpm-packaging checkout')
parser.add_argument('project',
help='name of the destination buildservice project')
parser.add_argument('--linkproject',
help='create project link to given project')
args = parser.parse_args()
sh.ErrorReturnCode.truncate_cap = 9000
create_project(args.worktree, args.project, args.linkproject)
if __name__ == '__main__':
main()
|
|
import asyncio
from collections import OrderedDict
from functools import wraps
from itertools import zip_longest
import hashlib
import logging
import pickle
import random
import socket
logger = logging.getLogger(__name__)
def remote(func):
'''
Indicates that this instance method defines a remote procedure call (RPC). All
RPCs must be instance methods on a DatagramRPCProtocol subclass, and must
include at least one positional argument to accept the connecting peer, a tuple
of (ip, port).
Applying this decorator converts the given instance method to a remote RPC
request, while storing the original implementation as the function to invoke
to reply to that call.
'''
@asyncio.coroutine
@wraps(func)
def inner(*args, **kwargs):
instance, peer, *args = args
answer = yield from instance.request(peer, inner.remote_name, *args, **kwargs)
return answer
inner.remote_name = func.__name__
inner.reply_function = func
return inner
class DatagramRPCProtocol(asyncio.DatagramProtocol):
'''Implements an RPC mechanism over UDP. Create a subcass of DatagramRPCProtocol, and
decorate some of its methods with @remote to designate them as part of the
RPC interface.'''
def __init__(self, reply_timeout=5):
'''Initialized a DatagramRPCProtocol, optionally specifying an acceptable
reply_timeout (in seconds) while waiting for a response from a remote
server.'''
self.outstanding_requests = {}
self.reply_functions = self.find_reply_functions()
self.reply_timeout = reply_timeout
super(DatagramRPCProtocol, self).__init__()
def find_reply_functions(self):
'''Locates the reply functions (decorated by @remote) for all RPC methods,
returning a dictionary mapping {RPC method name: reply function}.'''
return {func.remote_name: func.reply_function
for func in self.__class__.__dict__.values()
if hasattr(func, 'remote_name')}
def connection_made(self, transport):
'''A callback from asyncio.DatagramProtocol indicating that the system
has established a connection. The transport should be saved for later.'''
logger.info('connection_made: %r', transport)
self.transport = transport
def datagram_received(self, data, peer):
'''The callback from asyncio.DatagramProtocol upon receipt of a datagram
packet. The data are the bytes of the packet's payload, and the peer
is the IP and port of the peer who sent the packet.'''
logger.info('data_received: %r, %r', peer, data)
direction, message_identifier, *details = pickle.loads(data)
if direction == 'request':
procedure_name, args, kwargs = details
self.request_received(peer, message_identifier, procedure_name, args, kwargs)
elif direction == 'reply':
answer, = details
self.reply_received(peer, message_identifier, answer)
def request_received(self, peer, message_identifier, procedure_name, args, kwargs):
'''Handles replying to an incoming RPC. May be overridden to inspect/modify
the incoming arguments or procedure_name, or to implement authorization
checks.'''
logger.info('request from %r: %r(*%r, **%r) as message %r',
peer, procedure_name, args, kwargs, message_identifier)
reply_function = self.reply_functions[procedure_name]
answer = reply_function(self, peer, *args, **kwargs)
self.reply(peer, message_identifier, answer)
def reply_received(self, peer, message_identifier, answer):
'''Handles a reply to an RPC. May be overridden to pre-process a reply, or
otherwise verify its authenticity.'''
logger.info('reply to message %r, answer %r', message_identifier, answer)
if message_identifier in self.outstanding_requests:
reply = self.outstanding_requests.pop(message_identifier)
reply.set_result(answer)
def reply_timed_out(self, message_identifier):
'''Scheduled after each outbound request to enforce the wait timeout on RPCs.'''
if message_identifier in self.outstanding_requests:
reply = self.outstanding_requests.pop(message_identifier)
reply.set_exception(socket.timeout)
def request(self, peer, procedure_name, *args, **kwargs):
'''Issues an RPC to a remote peer, returning a future that may either yield
the reply to the RPC, or a socket.timeout if the peer does not reply.'''
message_identifier = get_random_identifier()
reply = asyncio.Future()
self.outstanding_requests[message_identifier] = reply
loop = asyncio.get_event_loop()
loop.call_later(self.reply_timeout, self.reply_timed_out, message_identifier)
message = pickle.dumps(('request', message_identifier, procedure_name, args, kwargs))
self.transport.sendto(message, peer)
return reply
def reply(self, peer, message_identifier, answer):
'''Sends a reply to an earlier RPC call.'''
message = pickle.dumps(('reply', message_identifier, answer))
self.transport.sendto(message, peer)
class KademliaNode(DatagramRPCProtocol):
'''Implements the Kademlia protocol with the four primitive RPCs (ping, store, find_node, find_value),
and the three iterative procedures (lookup_node, get, put).'''
def __init__(self, alpha=3, k=20, identifier=None):
'''Initializes a Kademlia node, with the optional configuration parameters alpha and k (see the
Kademlia paper for details on these constants).'''
if identifier is None:
identifier = get_random_identifier()
self.identifier = identifier
self.routing_table = RoutingTable(self.identifier, k=k)
self.k = k
self.alpha = alpha
self.storage = Storage()
super(KademliaNode, self).__init__()
def request_received(self, peer, message_identifier, procedure_name, args, kwargs):
'''Overridden to place all peers this node receives requests from in the routing_table.'''
peer_identifier = args[0]
self.routing_table.update_peer(peer_identifier, peer)
super(KademliaNode, self).request_received(peer, message_identifier, procedure_name, args, kwargs)
def reply_received(self, peer, message_identifier, answer):
'''Overridden to place all peers this node sends replies to in the routing_table.'''
peer_identifier, answer = answer
self.routing_table.update_peer(peer_identifier, peer)
super(KademliaNode, self).reply_received(peer, message_identifier, answer)
@remote
def ping(self, peer, peer_identifier):
'''The primitive PING RPC. Returns the node's identifier to the requesting node.'''
logger.info('ping(%r, %r)', peer, peer_identifier)
return (self.identifier, self.identifier)
@remote
def store(self, peer, peer_identifier, key, value):
'''The primitive STORE RPC. Stores the given value, returning True if it was successful.'''
logger.info('store(%r, %r, %r, %r)', peer, peer_identifier, key, value)
self.storage[key] = value
return (self.identifier, True)
@remote
def find_node(self, peer, peer_identifier, key):
'''The primitive FIND_NODE RPC. Returns the k-closest peers to a key that this node is aware of.'''
logger.info('find_node(%r, %r, %r)', peer, peer_identifier, key)
return (self.identifier, self.routing_table.find_closest_peers(key, excluding=peer_identifier))
@remote
def find_value(self, peer, peer_identifier, key):
'''The primitive FIND_VALUE RPC. Returns either the value of a key, or the k-closest peers to it.'''
logger.info('find_value(%r, %r, %r)', peer, peer_identifier, key)
if key in self.storage:
return (self.identifier, ('found', self.storage[key]))
return (self.identifier, ('notfound', self.routing_table.find_closest_peers(key, excluding=peer_identifier)))
@asyncio.coroutine
def lookup_node(self, hashed_key, find_value=False):
'''The iterative node lookup procedure to find either the nearest peers to or the value of a key.'''
distance = lambda peer: peer[0] ^ hashed_key
contacted, dead = set(), set()
peers = {(peer_identifier, peer)
for peer_identifier, peer in
self.routing_table.find_closest_peers(hashed_key)}
if not peers:
raise KeyError(hashed_key, 'No peers available.')
while True:
uncontacted = peers - contacted
if not uncontacted:
break
closest = sorted(uncontacted, key=distance)[:self.alpha]
for peer_identifier, peer in closest:
contacted.add((peer_identifier, peer))
try:
if find_value:
result, contacts = yield from self.find_value(peer, self.identifier, hashed_key)
if result == 'found':
return contacts
else:
contacts = yield from self.find_node(peer, self.identifier, hashed_key)
except socket.timeout:
self.routing_table.forget_peer(peer_identifier)
dead.add((peer_identifier, peer))
continue
for new_peer_identifier, new_peer in contacts:
if new_peer_identifier == self.identifier:
continue
peers.add((new_peer_identifier, new_peer))
if find_value:
raise KeyError(hashed_key, 'Not found among any available peers.')
else:
return sorted(peers - dead, key=distance)[:self.k]
@asyncio.coroutine
def put(self, raw_key, value):
'''Given a plain key (usually a unicode) and a value, store it on the Kademlia network and
return the number of nodes who successfully accepted the value.'''
hashed_key = get_identifier(raw_key)
peers = yield from self.lookup_node(hashed_key, find_value=False)
store_tasks = [self.store(peer, self.identifier, hashed_key, value) for _, peer in peers]
results = yield from asyncio.gather(*store_tasks, return_exceptions=True)
return len([r for r in results if r == True])
@asyncio.coroutine
def get(self, raw_key):
'''Given a plain key (usually a unicode), find the value from the Kademlia network.'''
hashed_key = get_identifier(raw_key)
if hashed_key in self.storage:
return self.storage[hashed_key]
answer = yield from self.lookup_node(hashed_key, find_value=True)
return answer
class RoutingTable(object):
'''Implements the routing table described in the Kademlia paper. Peers are organized
by their XOR distance from the given node, and the most recently contacted peers
are kept easily at hand.'''
def __init__(self, node_identifier, k=20):
'''Initializes a RoutingTable with the node_identifier of a node, and the desired
k value (defaults to 20, as indicated in the Kademlia paper).'''
self.node_identifier = node_identifier
self.k = k
self.buckets = [OrderedDict() for _ in range(160)]
self.replacement_caches = [OrderedDict() for _ in range(160)]
super(RoutingTable, self).__init__()
def distance(self, peer_identifier):
'''Computes the XOR distance of the given identifier from the node.'''
return self.node_identifier ^ peer_identifier
def bucket_index(self, peer_identifier):
'''Returns the index of the k-bucket covering the provided identifier.'''
if not (0 <= peer_identifier < 2**160):
raise ValueError('peer_identifier should be a number between 0 and 2*160-1.')
return 160 - self.distance(peer_identifier).bit_length()
def update_peer(self, peer_identifier, peer):
'''Adds or updates a peer that this node has recently communicated with.'''
if peer_identifier == self.node_identifier:
return
bucket_index = self.bucket_index(peer_identifier)
bucket = self.buckets[bucket_index]
if peer_identifier in bucket:
del bucket[peer_identifier]
bucket[peer_identifier] = peer
elif len(bucket) < self.k:
bucket[peer_identifier] = peer
else:
replacement_cache = self.replacement_caches[bucket_index]
if peer_identifier in replacement_cache:
del replacement_cache[peer_identifier]
replacement_cache[peer_identifier] = peer
def forget_peer(self, peer_identifier):
'''Removes a peer from the Routing Table, possibly rotating in a standby peer this
node has recently communicated with.'''
if peer_identifier == self.node_identifier:
return
bucket_index = self.bucket_index(peer_identifier)
bucket = self.buckets[bucket_index]
replacement_cache = self.replacement_caches[bucket_index]
if peer_identifier in bucket:
del bucket[peer_identifier]
if len(replacement_cache):
replacement_identifier, replacement_peer = replacement_cache.popitem()
bucket[replacement_identifier] = replacement_peer
def find_closest_peers(self, key, excluding=None, k=None):
'''Returns the k-closest peers this node is aware of, excluding the optional
identifier given as the excluding keyword argument. If k peers aren't known,
will return all nodes this node is aware of.'''
peers = []
k = k or self.k
farther = range(self.bucket_index(key), -1, -1)
closer = range(self.bucket_index(key) + 1, 160, 1)
for f, c in zip_longest(farther, closer):
for i in (f, c):
if i is None:
continue
bucket = self.buckets[i]
for peer_identifier in reversed(bucket):
if peer_identifier == excluding:
continue
peers.append((peer_identifier, bucket[peer_identifier]))
if len(peers) == k:
return peers
return peers
class Storage(dict):
'''The storage associated with a node.'''
pass
def get_identifier(key):
'''Given a unicode or bytes value, returns the 160-bit SHA1 hash as an integer.'''
if hasattr(key, 'encode'):
key = key.encode()
digest = hashlib.sha1(key).digest()
return int.from_bytes(digest, byteorder='big', signed=False)
def get_random_identifier():
'''Produces a new 160-bit identifer from a random distribution.'''
identifier = random.getrandbits(160)
return get_identifier(identifier.to_bytes(20, byteorder='big', signed=False))
|
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import sys
import threading
import Queue
from pyglet import app
from pyglet import clock
from pyglet import event
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
class PlatformEventLoop(object):
''' Abstract class, implementation depends on platform.
:since: pyglet 1.2
'''
def __init__(self):
self._event_queue = Queue.Queue()
self._is_running = threading.Event()
self._is_running.clear()
def is_running(self):
'''Return True if the event loop is currently processing, or False
if it is blocked or not activated.
:rtype: bool
'''
return self._is_running.is_set()
def post_event(self, dispatcher, event, *args):
'''Post an event into the main application thread.
The event is queued internally until the `run` method's thread
is able to dispatch the event. This method can be safely called
from any thread.
If the method is called from the `run` method's thread (for example,
from within an event handler), the event may be dispatched within
the same runloop iteration or the next one; the choice is
nondeterministic.
:Parameters:
`dispatcher` : EventDispatcher
Dispatcher to process the event.
`event` : str
Event name.
`args` : sequence
Arguments to pass to the event handlers.
'''
self._event_queue.put((dispatcher, event, args))
self.notify()
def dispatch_posted_events(self):
'''Immediately dispatch all pending events.
Normally this is called automatically by the runloop iteration.
'''
while True:
try:
dispatcher, event, args = self._event_queue.get(False)
except Queue.Empty:
break
dispatcher.dispatch_event(event, *args)
def notify(self):
'''Notify the event loop that something needs processing.
If the event loop is blocked, it will unblock and perform an iteration
immediately. If the event loop is running, another iteration is
scheduled for immediate execution afterwards.
'''
raise NotImplementedError('abstract')
def start(self):
pass
def step(self, timeout=None):
''':TODO: in mac/linux: return True if didn't time out'''
raise NotImplementedError('abstract')
def set_timer(self, func, interval):
raise NotImplementedError('abstract')
def stop(self):
pass
class EventLoop(event.EventDispatcher):
'''The main run loop of the application.
Calling `run` begins the application event loop, which processes
operating system events, calls `pyglet.clock.tick` to call scheduled
functions and calls `pyglet.window.Window.on_draw` and
`pyglet.window.Window.flip` to update window contents.
Applications can subclass `EventLoop` and override certain methods
to integrate another framework's run loop, or to customise processing
in some other way. You should not in general override `run`, as
this method contains platform-specific code that ensures the application
remains responsive to the user while keeping CPU usage to a minimum.
'''
_has_exit_condition = None
_has_exit = False
def __init__(self):
self._has_exit_condition = threading.Condition()
self.clock = clock.get_default()
self.is_running = False
def run(self):
'''Begin processing events, scheduled functions and window updates.
This method returns when `has_exit` is set to True.
Developers are discouraged from overriding this method, as the
implementation is platform-specific.
'''
self.has_exit = False
self._legacy_setup()
platform_event_loop = app.platform_event_loop
platform_event_loop.start()
self.dispatch_event('on_enter')
self.is_running = True
if True: # TODO runtime option.
self._run_estimated()
else:
self._run()
self.is_running = False
self.dispatch_event('on_exit')
platform_event_loop.stop()
def _run(self):
'''The simplest standard run loop, using constant timeout. Suitable
for well-behaving platforms (Mac, Linux and some Windows).
'''
platform_event_loop = app.platform_event_loop
while not self.has_exit:
timeout = self.idle()
platform_event_loop.step(timeout)
def _run_estimated(self):
'''Run-loop that continually estimates function mapping requested
timeout to measured timeout using a least-squares linear regression.
Suitable for oddball platforms (Windows).
'''
platform_event_loop = app.platform_event_loop
predictor = self._least_squares()
gradient, offset = predictor.next()
time = self.clock.time
while not self.has_exit:
timeout = self.idle()
if timeout is None:
estimate = None
else:
estimate = max(gradient * timeout + offset, 0.0)
if False:
print 'Gradient = %f, Offset = %f' % (gradient, offset)
print 'Timeout = %f, Estimate = %f' % (timeout, estimate)
t = time()
if not platform_event_loop.step(estimate) and estimate != 0.0 and \
estimate is not None:
dt = time() - t
gradient, offset = predictor.send((dt, estimate))
@staticmethod
def _least_squares(gradient=1, offset=0):
X = 0
Y = 0
XX = 0
XY = 0
n = 0
x, y = yield gradient, offset
X += x
Y += y
XX += x * x
XY += x * y
n += 1
while True:
x, y = yield gradient, offset
X += x
Y += y
XX += x * x
XY += x * y
n += 1
try:
gradient = (n * XY - X * Y) / (n * XX - X * X)
offset = (Y - gradient * X) / n
except ZeroDivisionError:
# Can happen in pathalogical case; keep current
# gradient/offset for now.
pass
def _legacy_setup(self):
# Disable event queuing for dispatch_events
from pyglet.window import Window
Window._enable_event_queue = False
# Dispatch pending events
for window in app.windows:
window.switch_to()
window.dispatch_pending_events()
def enter_blocking(self):
'''Called by pyglet internal processes when the operating system
is about to block due to a user interaction. For example, this
is common when the user begins resizing or moving a window.
This method provides the event loop with an opportunity to set up
an OS timer on the platform event loop, which will continue to
be invoked during the blocking operation.
The default implementation ensures that `idle` continues to be called
as documented.
:since: pyglet 1.2
'''
timeout = self.idle()
app.platform_event_loop.set_timer(self._blocking_timer, timeout)
def exit_blocking(self):
'''Called by pyglet internal processes when the blocking operation
completes. See `enter_blocking`.
'''
app.platform_event_loop.set_timer(None, None)
def _blocking_timer(self):
timeout = self.idle()
app.platform_event_loop.set_timer(self._blocking_timer, timeout)
def idle(self):
'''Called during each iteration of the event loop.
The method is called immediately after any window events (i.e., after
any user input). The method can return a duration after which
the idle method will be called again. The method may be called
earlier if the user creates more input events. The method
can return `None` to only wait for user events.
For example, return ``1.0`` to have the idle method called every
second, or immediately after any user events.
The default implementation dispatches the
`pyglet.window.Window.on_draw` event for all windows and uses
`pyglet.clock.tick` and `pyglet.clock.get_sleep_time` on the default
clock to determine the return value.
This method should be overridden by advanced users only. To have
code execute at regular intervals, use the
`pyglet.clock.schedule` methods.
:rtype: float
:return: The number of seconds before the idle method should
be called again, or `None` to block for user input.
'''
dt = self.clock.update_time()
redraw_all = self.clock.call_scheduled_functions(dt)
# Redraw all windows
for window in app.windows:
if redraw_all or (window._legacy_invalid and window.invalid):
window.switch_to()
window.dispatch_event('on_draw')
window.flip()
window._legacy_invalid = False
# Update timout
return self.clock.get_sleep_time(True)
def _get_has_exit(self):
self._has_exit_condition.acquire()
result = self._has_exit
self._has_exit_condition.release()
return result
def _set_has_exit(self, value):
self._has_exit_condition.acquire()
self._has_exit = value
self._has_exit_condition.notify()
self._has_exit_condition.release()
has_exit = property(_get_has_exit, _set_has_exit,
doc='''Flag indicating if the event loop will exit in
the next iteration. When set, all waiting threads are interrupted (see
`sleep`).
Thread-safe since pyglet 1.2.
:see: `exit`
:type: bool
''')
def exit(self):
'''Safely exit the event loop at the end of the current iteration.
This method is a thread-safe equivalent for for setting `has_exit` to
``True``. All waiting threads will be interrupted (see
`sleep`).
'''
self._set_has_exit(True)
app.platform_event_loop.notify()
def sleep(self, timeout):
'''Wait for some amount of time, or until the `has_exit` flag is
set or `exit` is called.
This method is thread-safe.
:Parameters:
`timeout` : float
Time to wait, in seconds.
:since: pyglet 1.2
:rtype: bool
:return: ``True`` if the `has_exit` flag is now set, otherwise
``False``.
'''
self._has_exit_condition.acquire()
self._has_exit_condition.wait(timeout)
result = self._has_exit
self._has_exit_condition.release()
return result
def on_window_close(self, window):
'''Default window close handler.'''
if not app.windows:
self.exit()
if _is_epydoc:
def on_window_close(self, window):
'''A window was closed.
This event is dispatched when a window is closed. It is not
dispatched if the window's close button was pressed but the
window did not close.
The default handler calls `exit` if no more windows are open. You
can override this handler to base your application exit on some
other policy.
:event:
'''
def on_enter(self):
'''The event loop is about to begin.
This is dispatched when the event loop is prepared to enter
the main run loop, and represents the last chance for an
application to initialise itself.
:event:
'''
def on_exit(self):
'''The event loop is about to exit.
After dispatching this event, the `run` method returns (the
application may not actually exit if you have more code
following the `run` invocation).
:event:
'''
EventLoop.register_event_type('on_window_close')
EventLoop.register_event_type('on_enter')
EventLoop.register_event_type('on_exit')
|
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Marcus Uneson <marcus.uneson@gmail.com>
# based on previous (nltk2) version by
# Christopher Maloof, Edward Loper, Steven Bird
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, division
from collections import defaultdict
from nltk.compat import Counter
from nltk.tag import TaggerI
from nltk.tbl import Feature, Template
from nltk import jsontags
######################################################################
# Brill Templates
######################################################################
@jsontags.register_tag
class Word(Feature):
"""
Feature which examines the text (word) of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Word'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's text."""
return tokens[index][0]
@jsontags.register_tag
class Pos(Feature):
"""
Feature which examines the tags of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Pos'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's tag."""
return tokens[index][1]
def nltkdemo18():
"""
Return 18 templates, from the original nltk demo, in multi-feature syntax
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-3, -2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-1]), Word([1])),
]
def nltkdemo18plus():
"""
Return 18 templates, from the original nltk demo, and additionally a few
multi-feature ones (the motivation is easy comparison with nltkdemo18)
"""
return nltkdemo18() + [
Template(Word([-1]), Pos([1])),
Template(Pos([-1]), Word([1])),
Template(Word([-1]), Word([0]), Pos([1])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-1]), Word([0]), Pos([1])),
]
def fntbl37():
"""
Return 37 templates taken from the postagging task of the
fntbl distribution http://www.cs.jhu.edu/~rflorian/fntbl/
(37 is after excluding a handful which do not condition on Pos[0];
fntbl can do that but the current nltk implementation cannot.)
"""
return [
Template(Word([0]), Word([1]), Word([2])),
Template(Word([-1]), Word([0]), Word([1])),
Template(Word([0]), Word([-1])),
Template(Word([0]), Word([1])),
Template(Word([0]), Word([2])),
Template(Word([0]), Word([-2])),
Template(Word([1, 2])),
Template(Word([-2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-3, -2, -1])),
Template(Word([0]), Pos([2])),
Template(Word([0]), Pos([-2])),
Template(Word([0]), Pos([1])),
Template(Word([0]), Pos([-1])),
Template(Word([0])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([1])),
Template(Word([-1])),
Template(Pos([-1]), Pos([1])),
Template(Pos([1]), Pos([2])),
Template(Pos([-1]), Pos([-2])),
Template(Pos([1])),
Template(Pos([-1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([1, 2, 3])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([-2, -1])),
Template(Pos([1]), Word([0]), Word([1])),
Template(Pos([1]), Word([0]), Word([-1])),
Template(Pos([-1]), Word([-1]), Word([0])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Pos([1]), Pos([2]), Word([1]))
]
def brill24():
"""
Return 24 templates of the seminal TBL paper, Brill (1995)
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-1, 0])),
Template(Word([0, 1])),
Template(Word([0])),
Template(Word([-1]), Pos([-1])),
Template(Word([1]), Pos([1])),
Template(Word([0]), Word([-1]), Pos([-1])),
Template(Word([0]), Word([1]), Pos([1])),
]
def describe_template_sets():
"""
Print the available template sets in this demo, with a short description"
"""
import inspect
import sys
# a bit of magic to get all functions in this module
templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
for (name, obj) in templatesets:
if name == "describe_template_sets":
continue
print(name, obj.__doc__, "\n")
######################################################################
# The Brill Tagger
######################################################################
@jsontags.register_tag
class BrillTagger(TaggerI):
"""
Brill's transformational rule-based tagger. Brill taggers use an
initial tagger (such as ``tag.DefaultTagger``) to assign an initial
tag sequence to a text; and then apply an ordered list of
transformational rules to correct the tags of individual tokens.
These transformation rules are specified by the ``TagRule``
interface.
Brill taggers can be created directly, from an initial tagger and
a list of transformational rules; but more often, Brill taggers
are created by learning rules from a training corpus, using one
of the TaggerTrainers available.
"""
json_tag = 'nltk.tag.BrillTagger'
def __init__(self, initial_tagger, rules, training_stats=None):
"""
:param initial_tagger: The initial tagger
:type initial_tagger: TaggerI
:param rules: An ordered list of transformation rules that
should be used to correct the initial tagging.
:type rules: list(TagRule)
:param training_stats: A dictionary of statistics collected
during training, for possible later use
:type training_stats: dict
"""
self._initial_tagger = initial_tagger
self._rules = tuple(rules)
self._training_stats = training_stats
def encode_json_obj(self):
return self._initial_tagger, self._rules, self._training_stats
@classmethod
def decode_json_obj(cls, obj):
_initial_tagger, _rules, _training_stats = obj
return cls(_initial_tagger, _rules, _training_stats)
def rules(self):
"""
Return the ordered list of transformation rules that this tagger has learnt
:return: the ordered list of transformation rules that correct the initial tagging
:rtype: list of Rules
"""
return self._rules
def train_stats(self, statistic=None):
"""
Return a named statistic collected during training, or a dictionary of all
available statistics if no name given
:param statistic: name of statistic
:type statistic: str
:return: some statistic collected during training of this tagger
:rtype: any (but usually a number)
"""
if statistic is None:
return self._training_stats
else:
return self._training_stats.get(statistic)
def tag(self, tokens):
# Inherit documentation from TaggerI
# Run the initial tagger.
tagged_tokens = self._initial_tagger.tag(tokens)
# Create a dictionary that maps each tag to a list of the
# indices of tokens that have that tag.
tag_to_positions = defaultdict(set)
for i, (token, tag) in enumerate(tagged_tokens):
tag_to_positions[tag].add(i)
# Apply each rule, in order. Only try to apply rules at
# positions that have the desired original tag.
for rule in self._rules:
# Find the positions where it might apply
positions = tag_to_positions.get(rule.original_tag, [])
# Apply the rule at those positions.
changed = rule.apply(tagged_tokens, positions)
# Update tag_to_positions with the positions of tags that
# were modified.
for i in changed:
tag_to_positions[rule.original_tag].remove(i)
tag_to_positions[rule.replacement_tag].add(i)
return tagged_tokens
def print_template_statistics(self, test_stats=None, printunused=True):
"""
Print a list of all templates, ranked according to efficiency.
If test_stats is available, the templates are ranked according to their
relative contribution (summed for all rules created from a given template,
weighted by score) to the performance on the test set. If no test_stats, then
statistics collected during training are used instead. There is also
an unweighted measure (just counting the rules). This is less informative,
though, as many low-score rules will appear towards end of training.
:param test_stats: dictionary of statistics collected during testing
:type test_stats: dict of str -> any (but usually numbers)
:param printunused: if True, print a list of all unused templates
:type printunused: bool
:return: None
:rtype: None
"""
tids = [r.templateid for r in self._rules]
train_stats = self.train_stats()
trainscores = train_stats['rulescores']
assert len(trainscores) == len(tids), "corrupt statistics: " \
"{0} train scores for {1} rules".format(trainscores, tids)
template_counts = Counter(tids)
weighted_traincounts = Counter()
for (tid, score) in zip(tids, trainscores):
weighted_traincounts[tid] += score
tottrainscores = sum(trainscores)
# det_tplsort() is for deterministic sorting;
# the otherwise convenient Counter.most_common() unfortunately
# does not break ties deterministically
# between python versions and will break cross-version tests
def det_tplsort(tpl_value):
return (tpl_value[1], repr(tpl_value[0]))
def print_train_stats():
print("TEMPLATE STATISTICS (TRAIN) {0} templates, {1} rules)".format(
len(template_counts),
len(tids))
)
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
head = "#ID | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
train_tplscores = sorted(weighted_traincounts.items(), key=det_tplsort, reverse=True)
for (tid, trainscore) in train_tplscores:
s = "{0} | {1:5d} {2:5.3f} |{3:4d} {4:.3f} | {5}".format(
tid,
trainscore,
trainscore/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_testtrain_stats():
testscores = test_stats['rulescores']
print("TEMPLATE STATISTICS (TEST AND TRAIN) ({0} templates, {1} rules)".format(
len(template_counts),
len(tids)),
)
print("TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats))
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
weighted_testcounts = Counter()
for (tid, score) in zip(tids, testscores):
weighted_testcounts[tid] += score
tottestscores = sum(testscores)
head = "#ID | Score (test) | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
test_tplscores = sorted(weighted_testcounts.items(), key=det_tplsort, reverse=True)
for (tid, testscore) in test_tplscores:
s = "{0:s} |{1:5d} {2:6.3f} | {3:4d} {4:.3f} |{5:4d} {6:.3f} | {7:s}".format(
tid,
testscore,
testscore/tottestscores,
weighted_traincounts[tid],
weighted_traincounts[tid]/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_unused_templates():
usedtpls = set([int(tid) for tid in tids])
unused = [(tid, tpl) for (tid, tpl) in enumerate(Template.ALLTEMPLATES) if tid not in usedtpls]
print("UNUSED TEMPLATES ({0})".format(len(unused)))
for (tid, tpl) in unused:
print("{0:03d} {1:s}".format(tid, tpl))
if test_stats is None:
print_train_stats()
else:
print_testtrain_stats()
print()
if printunused:
print_unused_templates()
print()
def batch_tag_incremental(self, sequences, gold):
"""
Tags by applying each rule to the entire corpus (rather than all rules to a
single sequence). The point is to collect statistics on the test set for
individual rules.
NOTE: This is inefficient (does not build any index, so will traverse the entire
corpus N times for N rules) -- usually you would not care about statistics for
individual rules and thus use batch_tag() instead
:param sequences: lists of token sequences (sentences, in some applications) to be tagged
:type sequences: list of list of strings
:param gold: the gold standard
:type gold: list of list of strings
:returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule))
"""
def counterrors(xs):
return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair))
testing_stats = {}
testing_stats['tokencount'] = sum(len(t) for t in sequences)
testing_stats['sequencecount'] = len(sequences)
tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences]
testing_stats['initialerrors'] = counterrors(tagged_tokenses)
testing_stats['initialacc'] = 1 - testing_stats['initialerrors']/testing_stats['tokencount']
# Apply each rule to the entire corpus, in order
errors = [testing_stats['initialerrors']]
for rule in self._rules:
for tagged_tokens in tagged_tokenses:
rule.apply(tagged_tokens)
errors.append(counterrors(tagged_tokenses))
testing_stats['rulescores'] = [err0 - err1 for (err0, err1) in zip(errors, errors[1:])]
testing_stats['finalerrors'] = errors[-1]
testing_stats['finalacc'] = 1 - testing_stats['finalerrors']/testing_stats['tokencount']
return (tagged_tokenses, testing_stats)
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado import stack_context
from tornado.util import Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.platform.auto import set_close_exec, Waker
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import tornado.ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. In most other cases, it is better to use `current()`
to get the current thread's `IOLoop`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
if IOLoop.current(instance=False) is not None:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
If the function returns a `.Future`, the `IOLoop` will run
until the future is resolved. If it raises an exception, the
`IOLoop` will stop and the exception will be re-raised to the
caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
"""
future_cell = [None]
def run():
try:
result = func()
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None and is_future(ret):
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
self.add_future(ret, lambda f: f.result())
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512
and self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for callback in callbacks:
self._run_callback(callback)
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
if thread.get_ident() != self._thread_ident:
# If we're not on the IOLoop's thread, we need to synchronize
# with other threads, or waking logic will induce a race.
with self._callback_lock:
if self._closing:
raise RuntimeError("IOLoop is closing")
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty:
# If we're not in the IOLoop's thread, and we added the
# first callback to an empty list, we may need to wake it
# up (it may wake up on its own, but an occasional extra
# wake is harmless). Waking up a polling IOLoop is
# relatively expensive, so we try to avoid it when we can.
self._waker.wake()
else:
if self._closing:
raise RuntimeError("IOLoop is closing")
# If we're on the IOLoop's thread, we don't need the lock,
# since we don't need to wake anyone, just add the callback.
# Blindly insert into self._callbacks.
# This is safe because the GIL makes list.append atomic.
# One subtlety is that if the thread is interrupting another
# thread holding the _callback_lock block in IOLoop.start,
# we may modify either the old or new version of self._callbacks,
# but either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
if thread.get_ident() != self._thread_ident:
# if the signal is handled on another thread, we can add
# it normally (modulo the NullContext)
self.add_callback(callback, *args, **kwargs)
else:
# If we're on the IOLoop's thread, we cannot use
# the regular add_callback because it may deadlock on
# _callback_lock. Blindly insert into self._callbacks.
# This is safe because the GIL makes list.append atomic.
# One subtlety is that if the signal interrupted the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks,
# but either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tiebreaker']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tiebreaker = next(io_loop._timeout_counter)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return ((self.deadline, self.tiebreaker) <
(other.deadline, other.tiebreaker))
def __le__(self, other):
return ((self.deadline, self.tiebreaker) <=
(other.deadline, other.tiebreaker))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) / callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from .build import build_server
from . import errors
from . import pilot
from . import db
from .context import Context
from . import tags
from .tags import cookie
from .logtools import LoggerFile
from .tools import timer, lazystr
from .logic import debug_lock, is_debugging
from .logic import notify
from .request import MoyaRequest, ReplaceRequest
from .response import MoyaResponse
from . import http
from .compat import text_type, itervalues, py2bytes
from . import namespaces
from .loggingconf import init_logging_fs
from .context.expression import Expression
from webob import Response
from fs.path import splitext
from fs.opener import open_fs
from fs.errors import FSError
import io
import gc
import random
from time import time, clock, sleep
from threading import RLock
import weakref
from collections import defaultdict
from textwrap import dedent
import os.path
import logging
log = logging.getLogger("moya")
request_log = logging.getLogger("moya.request")
runtime_log = logging.getLogger("moya.runtime")
startup_log = logging.getLogger("moya.startup")
preflight_log = logging.getLogger("moya.preflight")
try:
import watchdog
import watchdog.events
import watchdog.observers
except ImportError:
watchdog = None
try:
import objgraph
except:
objgraph = None
if watchdog:
class ReloadChangeWatcher(watchdog.events.FileSystemEventHandler):
def __init__(self, watch_fs, app):
self._app = weakref.ref(app)
self.watch_types = app.archive.cfg.get_list(
"autoreload", "extensions", ".xml\n.ini\n.py"
)
self.watching_fs = watch_fs
self.observer = None
try:
path = self.watching_fs.getsyspath("/")
except FSError:
startup_log.warning("auto reload not available on this filesystem")
else:
try:
observer = watchdog.observers.Observer()
observer.schedule(self, path, recursive=True)
observer.start()
self.observer = observer
except:
startup_log.exception(
'failed to watch "{}" for changes'.format(path)
)
else:
startup_log.debug('watching "{}" for changes'.format(path))
super(ReloadChangeWatcher, self).__init__()
def on_any_event(self, event):
ext = splitext(event.src_path)[1].lower()
if ext not in self.watch_types:
return
if not self.app.rebuild_required:
log.info(
"detected modification to project, rebuild will occur on next request"
)
self.app.rebuild_required = True
@property
def app(self):
return self._app()
def close(self):
if self.observer is not None:
try:
self.observer.stop()
# No need to join since we are exiting the process
# self.observer.join()
except:
pass
self.watching_fs.close()
else:
class ReloadChangeWatcher(object):
def __init__(self, watch_fs, app):
startup_log.warning(
"'watchdog' module could not be imported, autoreload is disabled"
)
startup_log.warning(
"you might be able to fix this with 'pip install watchdog'"
)
def close(self):
pass
def memory_tracker(f):
def deco(self, *args, **kwargs):
if self.debug_memory:
objgraph.show_growth(limit=1)
try:
return f(self, *args, **kwargs)
finally:
if self.debug_memory:
runtime_log.info("New objects:")
objgraph.show_growth(file=LoggerFile("moya.runtime"))
# roots = objgraph.get_leaking_objects()
# runtime_log.info('Unreachable ojects:')
# objgraph.show_most_common_types(objects=roots, file=LoggerFile('moya.runtime'))
return deco
class WSGIApplication(object):
def __init__(
self,
filesystem_url,
settings_path,
server="main",
logging=None,
disable_autoreload=False,
breakpoint=False,
breakpoint_startup=False,
validate_db=False,
simulate_slow_network=False,
debug_memory=False,
strict=False,
master_settings=None,
test_build=False,
develop=False,
load_expression_cache=True,
post_build_hook=None,
):
self.filesystem_url = filesystem_url
self.settings_path = settings_path
self.server_ref = server
self.logging = logging
self.breakpoint = breakpoint
self.validate_db = validate_db
self.watching_fs = None
self.rebuild_required = False
self._new_build_lock = RLock()
self.archive = None
self._self = weakref.ref(self, self.on_close)
self.simulate_slow_network = simulate_slow_network
self.debug_memory = debug_memory
self.master_settings = master_settings
self.test_build = test_build
self.develop = develop
self.load_expression_cache = load_expression_cache
self.post_build_hook = post_build_hook
if logging is not None:
with open_fs(self.filesystem_url) as logging_fs:
init_logging_fs(logging_fs, logging)
try:
self.build(breakpoint=breakpoint_startup, strict=strict)
except Exception as e:
startup_log.critical(text_type(e))
raise
if self.archive.debug_memory:
self.debug_memory = True
if self.debug_memory and objgraph is None:
self.debug_memory = False
runtime_log.error(
"memory debugging requires objgraph (https://pypi.python.org/pypi/objgraph)"
)
if self.debug_memory:
runtime_log.warning("memory debugging is on, this will effect performance")
self.watcher = None
if self.archive.auto_reload and not disable_autoreload:
try:
location = self.archive.project_fs.getsyspath("/")
except FSError:
log.warning("project filesystem has no syspath, disabling autoreload")
else:
watch_location = os.path.join(
location, self.archive.cfg.get("autoreload", "location", "")
)
self.watcher = ReloadChangeWatcher(open_fs(watch_location), self)
@classmethod
def on_close(cls, application_weakref):
# Called prior to Python finalizing the WSGIApplication, but before __del__
# Note, application_weakref will always return None. There is no way to use the original object at this point
pass
def close(self):
if self.watcher is not None:
self.watcher.close()
def __repr__(self):
return """<wsgiapplication {} {}>""".format(self.settings_path, self.server_ref)
def build(self, breakpoint=False, strict=False):
with timer("startup", output=startup_log.debug):
build_result = build_server(
self.filesystem_url,
self.settings_path,
server_element=self.server_ref,
validate_db=self.validate_db,
breakpoint=breakpoint,
strict=strict,
master_settings=self.master_settings,
test_build=self.test_build,
develop=self.develop,
)
if build_result is None:
msg = "Failed to build project"
raise errors.StartupFailedError(msg)
self.archive = build_result.archive
self.archive.finalize()
self.server = build_result.server
if self.load_expression_cache:
if self.archive.has_cache("parser"):
parser_cache = self.archive.get_cache("parser")
if Expression.load(parser_cache):
log.debug("expression cache loaded")
if self.post_build_hook is not None:
try:
self.post_build_hook(self)
except:
log.exception("post build hook failed")
raise
context = Context(
{
"console": self.archive.console,
"settings": self.archive.settings,
"debug": self.archive.debug,
"develop": self.develop or self.archive.develop,
"pilot": pilot,
}
)
self.populate_context(context)
self.archive.populate_context(context)
self.archive.fire(context, "sys.startup")
db.commit_sessions(context)
gc.collect()
def populate_context(self, context):
# Called by moya <command>
context.root.update(
_dbsessions=db.get_session_map(self.archive),
console=self.archive.console,
fs=self.archive.get_context_filesystems(),
)
def do_rebuild(self):
self.archive.console.div(
"Re-building project due to changes", bold=True, fg="blue"
)
error_text = None
try:
new_build = build_server(
self.filesystem_url,
self.settings_path,
server_element=self.server_ref,
strict=self.archive.strict,
validate_db=True,
)
except Exception as e:
error_text = text_type(e)
log.warning(e)
new_build = None
if new_build is None:
self.rebuild_required = False
notify(
"Rebuild Failed", error_text or "Unable to build project, see console"
)
return
with self._new_build_lock:
self.archive = new_build.archive
self.server = new_build.server
self.archive.finalize()
if self.post_build_hook is not None:
try:
self.post_build_hook(self)
except:
log.exception("post build hook failed")
raise
self.rebuild_required = False
self.archive.console.div(
"Modified project built successfully", bold=True, fg="green"
)
def preflight(self, report=True):
app_preflight = []
if self.archive.preflight:
for app in itervalues(self.archive.apps):
preflight = []
for element in app.lib.get_elements_by_type(
(namespaces.preflight, "check")
):
preflight_callable = self.archive.get_callable_from_element(
element, app=app
)
context = Context({"preflight": preflight})
self.archive.populate_context(context)
self.populate_context(context)
context[".app"] = app
if not element.check(context):
preflight.append((element, "skip", ""))
continue
try:
preflight_callable(context, app=app)
except Exception as e:
preflight.append((element, "error", text_type(e)))
app_preflight.append((app, preflight))
if report:
all_ok = True
for app, checks in app_preflight:
if not checks:
continue
totals = defaultdict(int)
for element, status, text in checks:
lines = dedent(text).splitlines()
totals[status] += 1
for line in lines:
if line:
if status == "warning":
preflight_log.warning("%s", line)
elif status == "fail":
preflight_log.error("%s", line)
elif status == "error":
preflight_log.critical("%s", line)
results = []
for status in ("warning", "fail", "error"):
if totals[status]:
results.append("{} {}(s)".format(totals[status], status))
if status != "skip":
all_ok = False
if results:
preflight_log.info("%s in %s", ", ".join(results), app)
if all_ok:
preflight_log.info("all passed")
else:
preflight_log.warning(
"preflight detected potential problems -- run 'moya preflight' for more information"
)
return app_preflight
def get_response(self, request, context):
"""Get a response object"""
fire = self.archive.fire
fire(context, "request.start", app=None, sender=None, data={"request": request})
with pilot.manage_request(request, context):
root = context.root
root.update(
settings=self.archive.settings,
debug=self.archive.debug,
request=request,
cookiejar=cookie.CookieJar(),
)
self.populate_context(context)
fire(context, "request.pre-dispatch", data={"request": request})
while 1:
try:
result = self.server.dispatch(
self.archive, context, request, breakpoint=self.breakpoint
)
except Exception:
log.exception("error in dispatch")
raise
if isinstance(result, ReplaceRequest):
context.root["request"] = request = result.request
continue
break
fire(
context,
"request.post-dispatch",
data={"request": request, "result": result},
)
response = None
if result is not None:
if isinstance(result, text_type):
response = MoyaResponse(
charset=py2bytes("utf8"), text=text_type(result)
)
elif isinstance(result, Response):
response = result
else:
response = context.root.get("response", None)
if response is None:
response = MoyaResponse(
status=http.StatusCode.not_found, text=py2bytes("404 - Not Found")
)
if "headers" in root:
for k, v in root["headers"].items():
response.headers[k.encode("utf-8")] = v.encode("utf-8")
fire(
context, "request.response", data={"request": request, "response": response}
)
return response
def slow_iter(self, response_iter):
"""A generator that yields data slowly."""
response_file = io.BytesIO(b"".join(response_iter))
while 1:
chunk = response_file.read(16384)
if not chunk:
break
sleep(0.1)
yield chunk
@memory_tracker
def __call__(self, environ, start_response):
"""Build the request."""
if self.rebuild_required and not is_debugging():
with debug_lock:
self.do_rebuild()
slow = self.simulate_slow_network
if slow:
sleep(random.uniform(0.2, 0.5))
start = time()
start_clock = clock()
context = Context(name="WSGIApplication.__call__")
request = MoyaRequest(environ)
response = self.get_response(request, context)
taken = time() - start
clock_taken = clock() - start_clock
start_response(response.status, response.headerlist)
log_fmt = '"%s %s %s" %i %s %s'
taken_ms = lazystr("{:.1f}ms {:.1f}ms".format, taken * 1000, clock_taken * 1000)
request_log.info(
log_fmt,
request.method,
request.path_qs,
request.http_version,
response.status_int,
response.content_length or 0,
taken_ms,
)
try:
if request.method == "HEAD":
return []
else:
if slow:
return self.slow_iter(response.app_iter)
else:
return response.app_iter
finally:
self.archive.fire(context, "request.end", data={"response": response})
context.root = {}
Application = WSGIApplication
|
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""XCode test log parser."""
import json
import logging
import os
import plistlib
import re
import shutil
import subprocess
import sys
import file_util
from test_result_util import ResultCollection, TestResult, TestStatus
import test_runner
import xcode_util
# Some system errors are reported as failed tests in Xcode test result log in
# Xcode 12, e.g. test app crash in xctest parallel testing. This is reported
# as 'BUILD_INTERRUPTED' if it's in final attempt. If not in final attempt, it
# will be ignored since future attempts will cover tests not ran.
SYSTEM_ERROR_TEST_NAME_SUFFIXES = ['encountered an error']
LOGGER = logging.getLogger(__name__)
_XCRESULT_SUFFIX = '.xcresult'
def _sanitize_str(line):
"""Encodes str when in python 2."""
if sys.version_info.major == 2:
if isinstance(line, unicode):
line = line.encode('utf-8')
return line
def _sanitize_str_list(lines):
"""Encodes any unicode in list when in python 2."""
sanitized_lines = []
for line in lines:
sanitized_lines.append(_sanitize_str(line))
return sanitized_lines
def get_parser():
"""Returns correct parser from version of Xcode installed."""
if xcode_util.using_xcode_11_or_higher():
return Xcode11LogParser()
return XcodeLogParser()
def parse_passed_failed_tests_for_interrupted_run(output):
"""Parses xcode runner output to get passed & failed tests.
Args:
output: [str] An output of test run.
Returns:
test_result_util.ResultCollection: Results of tests parsed.
"""
result = ResultCollection()
passed_tests = []
failed_tests = []
# Test has format:
# [09:04:42:INFO] Test case '-[Test_class test_method]' passed.
# [09:04:42:INFO] Test Case '-[Test_class test_method]' failed.
passed_test_regex = re.compile(r'Test [Cc]ase \'\-\[(.+?)\s(.+?)\]\' passed')
failed_test_regex = re.compile(r'Test [Cc]ase \'\-\[(.+?)\s(.+?)\]\' failed')
def _find_list_of_tests(tests, regex):
"""Adds test names matched by regex to result list."""
for test_line in output:
m_test = regex.search(test_line)
if m_test:
tests.append('%s/%s' % (m_test.group(1), m_test.group(2)))
_find_list_of_tests(passed_tests, passed_test_regex)
_find_list_of_tests(failed_tests, failed_test_regex)
result.add_test_names_status(passed_tests, TestStatus.PASS)
result.add_test_names_status(
failed_tests,
TestStatus.FAIL,
test_log='Test failed in interrupted(timedout) run.')
LOGGER.info('%d passed tests for interrupted build.' % len(passed_tests))
LOGGER.info('%d failed tests for interrupted build.' % len(failed_tests))
return result
def format_test_case(test_case):
"""Format test case from `-[TestClass TestMethod]` to `TestClass_TestMethod`.
Args:
test_case: (basestring) Test case id in format `-[TestClass TestMethod]` or
`[TestClass/TestMethod]`
Returns:
(str) Test case id in format TestClass/TestMethod.
"""
test_case = _sanitize_str(test_case)
test = test_case.replace('[', '').replace(']',
'').replace('-',
'').replace(' ', '/')
return test
def copy_screenshots_for_failed_test(failure_message, test_case_folder):
screenshot_regex = re.compile(r'Screenshots:\s({(\n.*)+?\n})')
screenshots = screenshot_regex.search(failure_message)
if not os.path.exists(test_case_folder):
os.makedirs(test_case_folder)
if screenshots:
screenshots_files = screenshots.group(1).strip()
# For some failures xcodebuild attaches screenshots in the `Attachments`
# folder and in plist no paths to them, only references e.g.
# "Screenshot At Failure" : <UIImage: 0x6000032ab410>, {768, 1024}
if 'UIImage:' in screenshots_files:
return
d = json.loads(screenshots_files)
for f in d.values():
if not os.path.exists(f):
continue
screenshot = os.path.join(test_case_folder, os.path.basename(f))
shutil.copyfile(f, screenshot)
class Xcode11LogParser(object):
"""Xcode 11 log parser. Parse Xcode result types v3."""
@staticmethod
def _xcresulttool_get(xcresult_path, ref_id=None):
"""Runs `xcresulttool get` command and returns JSON output.
Xcresult folder contains test result in Xcode Result Types v. 3.19.
Documentation of xcresulttool usage is in
https://help.apple.com/xcode/mac/current/#/devc38fc7392?sub=dev0fe9c3ea3
Args:
xcresult_path: A full path to xcresult folder that must have Info.plist.
ref_id: A reference id used in a command and can be used to get test data.
If id is from ['timelineRef', 'logRef', 'testsRef', 'diagnosticsRef']
method will run xcresulttool 2 times:
1. to get specific id value running command without id parameter.
xcresulttool get --path %xcresul%
2. to get data based on id
xcresulttool get --path %xcresul% --id %id%
Returns:
An output of a command in JSON format.
"""
xcode_info = test_runner.get_current_xcode_info()
folder = os.path.join(xcode_info['path'], 'usr', 'bin')
# By default xcresulttool is %Xcode%/usr/bin,
# that is not in directories from $PATH
# Need to check whether %Xcode%/usr/bin is in a $PATH
# and then call xcresulttool
if folder not in os.environ['PATH']:
os.environ['PATH'] += ':%s' % folder
reference_types = ['timelineRef', 'logRef', 'testsRef', 'diagnosticsRef']
if ref_id in reference_types:
data = json.loads(Xcode11LogParser._xcresulttool_get(xcresult_path))
# Redefine ref_id to get only the reference data
ref_id = data['actions']['_values'][0]['actionResult'][
ref_id]['id']['_value']
# If no ref_id then xcresulttool will use default(root) id.
id_params = ['--id', ref_id] if ref_id else []
xcresult_command = ['xcresulttool', 'get', '--format', 'json',
'--path', xcresult_path] + id_params
return subprocess.check_output(xcresult_command).decode('utf-8').strip()
@staticmethod
def _list_of_failed_tests(actions_invocation_record, excluded=None):
"""Gets failed tests from xcresult root data.
ActionsInvocationRecord is an object that contains properties:
+ metadataRef: id of the record that can be get as
`xcresult get --path xcresult --id metadataRef`
+ metrics: number of run and failed tests.
+ issues: contains TestFailureIssueSummary in case of failure otherwise
it contains just declaration of `issues` node.
+ actions: a list of ActionRecord.
Args:
actions_invocation_record: An output of `xcresult get --path xcresult`.
excluded: A set of tests that will be excluded.
Returns:
test_results.ResultCollection: Results of failed tests.
"""
excluded = excluded or set()
result = ResultCollection()
if 'testFailureSummaries' not in actions_invocation_record['issues']:
return result
for failure_summary in actions_invocation_record['issues'][
'testFailureSummaries']['_values']:
test_case_id = format_test_case(failure_summary['testCaseName']['_value'])
if test_case_id in excluded:
continue
error_line = _sanitize_str(
failure_summary['documentLocationInCreatingWorkspace']['url']
['_value'])
fail_message = error_line + '\n' + _sanitize_str(
failure_summary['message']['_value'])
result.add_test_result(
TestResult(test_case_id, TestStatus.FAIL, test_log=fail_message))
return result
@staticmethod
def _get_test_statuses(xcresult):
"""Returns test results from xcresult.
Also extracts and stores attachments for failed tests.
Args:
xcresult: (str) A path to xcresult.
Returns:
test_result.ResultCollection: Test results.
"""
result = ResultCollection()
# See TESTS_REF in xcode_log_parser_test.py for an example of |root|.
root = json.loads(Xcode11LogParser._xcresulttool_get(xcresult, 'testsRef'))
for summary in root['summaries']['_values'][0][
'testableSummaries']['_values']:
if not summary['tests']:
continue
for test_suite in summary['tests']['_values'][0]['subtests'][
'_values'][0]['subtests']['_values']:
if 'subtests' not in test_suite:
# Sometimes(if crash occurs) `subtests` node does not upload.
# It happens only for failed tests that and a list of failures
# can be parsed from root.
continue
for test in test_suite['subtests']['_values']:
test_name = _sanitize_str(test['identifier']['_value'])
if any(
test_name.endswith(suffix)
for suffix in SYSTEM_ERROR_TEST_NAME_SUFFIXES):
result.crashed = True
result.crash_message += 'System error in %s: %s\n' % (xcresult,
test_name)
continue
# If a test case was executed multiple times, there will be multiple
# |test| objects of it. Each |test| corresponds to an execution of the
# test case.
if test['testStatus']['_value'] == 'Success':
result.add_test_result(TestResult(test_name, TestStatus.PASS))
else:
# Parse data for failed test by its id. See SINGLE_TEST_SUMMARY_REF
# in xcode_log_parser_test.py for an example of |summary_ref|.
summary_ref = json.loads(
Xcode11LogParser._xcresulttool_get(
xcresult, test['summaryRef']['id']['_value']))
failure_message = 'Logs from "failureSummaries" in .xcresult:\n'
# On rare occasions rootFailure doesn't have 'failureSummaries'.
for failure in summary_ref.get('failureSummaries',
{}).get('_values', []):
file_name = _sanitize_str(
failure.get('fileName', {}).get('_value', ''))
line_number = _sanitize_str(
failure.get('lineNumber', {}).get('_value', ''))
failure_location = 'file: %s, line: %s' % (file_name, line_number)
failure_message += failure_location + '\n'
failure_message += _sanitize_str(
failure['message']['_value']) + '\n'
attachments = Xcode11LogParser._extract_artifacts_for_test(
test_name, summary_ref, xcresult)
result.add_test_result(
TestResult(
test_name,
TestStatus.FAIL,
test_log=failure_message,
attachments=attachments))
return result
@staticmethod
def collect_test_results(output_path, output):
"""Gets XCTest results, diagnostic data & artifacts from xcresult.
Args:
output_path: (str) An output path passed in --resultBundlePath when
running xcodebuild.
output: [str] An output of test run.
Returns:
test_result.ResultCollection: Test results.
"""
output_path = _sanitize_str(output_path)
output = _sanitize_str_list(output)
LOGGER.info('Reading %s' % output_path)
overall_collected_result = ResultCollection()
# Xcodebuild writes staging data to |output_path| folder during test
# execution. If |output_path| doesn't exist, it means tests didn't start at
# all.
if not os.path.exists(output_path):
overall_collected_result.crashed = True
overall_collected_result.crash_message = (
'%s with staging data does not exist.\n' % output_path +
'\n'.join(output))
return overall_collected_result
# During a run `xcodebuild .. -resultBundlePath %output_path%`
# that generates output_path folder,
# but Xcode 11+ generates `output_path.xcresult` and `output_path`
# where output_path.xcresult is a folder with results and `output_path`
# is symlink to the `output_path.xcresult` folder.
# `xcresulttool` with folder/symlink behaves in different way on laptop and
# on bots. This piece of code uses .xcresult folder.
xcresult = output_path + _XCRESULT_SUFFIX
# |output_path|.xcresult folder is created at the end of tests. If
# |output_path| folder exists but |output_path|.xcresult folder doesn't
# exist, it means xcodebuild exited or was killed half way during tests.
if not os.path.exists(xcresult):
overall_collected_result.crashed = True
overall_collected_result.crash_message = (
'%s with test results does not exist.\n' % xcresult +
'\n'.join(output))
overall_collected_result.add_result_collection(
parse_passed_failed_tests_for_interrupted_run(output))
return overall_collected_result
# See XCRESULT_ROOT in xcode_log_parser_test.py for an example of |root|.
root = json.loads(Xcode11LogParser._xcresulttool_get(xcresult))
metrics = root['metrics']
# In case of test crash both numbers of run and failed tests are equal to 0.
if (metrics.get('testsCount', {}).get('_value', 0) == 0 and
metrics.get('testsFailedCount', {}).get('_value', 0) == 0):
overall_collected_result.crashed = True
overall_collected_result.crash_message = '0 tests executed!'
else:
overall_collected_result.add_result_collection(
Xcode11LogParser._get_test_statuses(xcresult))
# For some crashed tests info about error contained only in root node.
overall_collected_result.add_result_collection(
Xcode11LogParser._list_of_failed_tests(
root, excluded=overall_collected_result.all_test_names()))
Xcode11LogParser.export_diagnostic_data(output_path)
# Remove the symbol link file.
if os.path.islink(output_path):
os.unlink(output_path)
file_util.zip_and_remove_folder(xcresult)
return overall_collected_result
@staticmethod
def copy_artifacts(output_path):
"""Copy screenshots, crash logs of failed tests to output folder.
Warning: This method contains duplicate logic as |collect_test_results|
method. Do not use these on the same test output path.
Args:
output_path: (str) An output path passed in --resultBundlePath when
running xcodebuild.
"""
xcresult = output_path + _XCRESULT_SUFFIX
if not os.path.exists(xcresult):
LOGGER.warn('%s does not exist.' % xcresult)
return
root = json.loads(Xcode11LogParser._xcresulttool_get(xcresult))
if 'testFailureSummaries' not in root.get('issues', {}):
LOGGER.info('No failures in %s' % xcresult)
return
# See TESTS_REF['summaries']['_values'] in xcode_log_parser_test.py.
test_summaries = json.loads(
Xcode11LogParser._xcresulttool_get(xcresult, 'testsRef')).get(
'summaries', {}).get('_values', [])
test_summary_refs = {}
for summaries in test_summaries:
for summary in summaries.get('testableSummaries', {}).get('_values', []):
for all_tests in summary.get('tests', {}).get('_values', []):
for test_suite in all_tests.get('subtests', {}).get('_values', []):
for test_case in test_suite.get('subtests', {}).get('_values', []):
for test in test_case.get('subtests', {}).get('_values', []):
if test['testStatus']['_value'] != 'Success':
test_summary_refs[
test['identifier']
['_value']] = test['summaryRef']['id']['_value']
for test, summary_ref_id in test_summary_refs.items():
# See SINGLE_TEST_SUMMARY_REF in xcode_log_parser_test.py for an example
# of |test_summary|.
test_summary = json.loads(
Xcode11LogParser._xcresulttool_get(xcresult, summary_ref_id))
Xcode11LogParser._extract_artifacts_for_test(test, test_summary, xcresult)
@staticmethod
def export_diagnostic_data(output_path):
"""Exports diagnostic data from xcresult to xcresult_diagnostic.zip.
Since Xcode 11 format of result bundles changed, to get diagnostic data
need to run command below:
xcresulttool export --type directory --id DIAGNOSTICS_REF --output-path
./export_folder --path ./RB.xcresult
Args:
output_path: (str) An output path passed in --resultBundlePath when
running xcodebuild.
"""
xcresult = output_path + _XCRESULT_SUFFIX
if not os.path.exists(xcresult):
LOGGER.warn('%s does not exist.' % xcresult)
return
root = json.loads(Xcode11LogParser._xcresulttool_get(xcresult))
try:
diagnostics_ref = root['actions']['_values'][0]['actionResult'][
'diagnosticsRef']['id']['_value']
diagnostic_folder = '%s_diagnostic' % xcresult
Xcode11LogParser._export_data(xcresult, diagnostics_ref, 'directory',
diagnostic_folder)
# Copy log files out of diagnostic_folder if any. Use |name_count| to
# generate an index for same name files produced from Xcode parallel
# testing.
name_count = {}
for root, dirs, files in os.walk(diagnostic_folder):
for filename in files:
if 'StandardOutputAndStandardError' in filename:
file_index = name_count.get(filename, 0)
output_filename = (
'%s_simulator#%d_%s' %
(os.path.basename(output_path), file_index, filename))
output_filepath = os.path.join(output_path, os.pardir,
output_filename)
shutil.copy(os.path.join(root, filename), output_filepath)
name_count[filename] = name_count.get(filename, 0) + 1
file_util.zip_and_remove_folder(diagnostic_folder)
except KeyError:
LOGGER.warn('Did not parse diagnosticsRef from %s!' % xcresult)
@staticmethod
def _export_data(xcresult, ref_id, output_type, output_path):
"""Exports data from xcresult using xcresulttool.
Since Xcode 11 format of result bundles changed, to get diagnostic data
need to run command below:
xcresulttool export --type directory --id DIAGNOSTICS_REF --output-path
./export_folder --path ./RB.xcresult
Args:
xcresult: (str) A path to xcresult directory.
ref_id: (str) A reference id of exporting entity.
output_type: (str) An export type (can be directory or file).
output_path: (str) An output location.
"""
export_command = [
'xcresulttool', 'export', '--type', output_type, '--id', ref_id,
'--path', xcresult, '--output-path', output_path
]
subprocess.check_output(export_command).decode('utf-8').strip()
@staticmethod
def _extract_attachments(test,
test_activities,
xcresult,
attachments,
include_jpg=True):
"""Exrtact attachments from xcresult folder for a single test result.
Copies all attachments under test_activities and nested subactivities (if
any) to the same directory as xcresult directory. Saves abs paths of
extracted attachments in |attachments|.
Filenames are in format `${output}_TestCase_testMethod_${index}`, where
${output} is the basename of |xcresult| folder, ${index} is the index of
attachment for a test case, e.g.:
attempt_0_TestCase_testMethod_1.jpg
....
attempt_0_TestCase_testMethod_3.crash
Args:
test: (str) Test name.
test_activities: (list) List of test activities (dict) that
store data about each test step.
xcresult: (str) A path to test results.
attachments: (dict) File basename to abs path mapping for extracted
attachments to be stored in. Its length is also used as part of file
name to avoid duplicated filename.
include_jpg: (bool) Whether include jpg or jpeg attachments.
"""
for activity_summary in test_activities:
if 'subactivities' in activity_summary:
Xcode11LogParser._extract_attachments(
test,
activity_summary.get('subactivities', {}).get('_values', []),
xcresult, attachments, include_jpg)
for attachment in activity_summary.get('attachments',
{}).get('_values', []):
payload_ref = attachment['payloadRef']['id']['_value']
_, file_name_extension = os.path.splitext(
str(attachment['filename']['_value']))
if not include_jpg and file_name_extension in ['.jpg', '.jpeg']:
continue
attachment_index = len(attachments) + 1
attachment_filename = (
'%s_%s_%d%s' %
(os.path.splitext(os.path.basename(xcresult))[0],
test.replace('/', '_'), attachment_index, file_name_extension))
# Extracts attachment to the same folder containing xcresult.
attachment_output_path = os.path.abspath(
os.path.join(xcresult, os.pardir, attachment_filename))
Xcode11LogParser._export_data(xcresult, payload_ref, 'file',
attachment_output_path)
attachments[attachment_filename] = attachment_output_path
@staticmethod
def _extract_artifacts_for_test(test, summary_ref, xcresult):
"""Extracts artifacts for a test case result.
Args:
test: (str) Test name.
summary_ref: (dict) Summary ref field of a test result parsed by
xcresulttool . See SINGLE_TEST_SUMMARY_REF in xcode_log_parser_test.py
for an example.
xcresult: (str) A path to test results.
Returns:
(dict) File basename to abs path mapping for extracted attachments.
"""
attachments = {}
# Extract all attachments except for screenshots from each step of the
# test.
Xcode11LogParser._extract_attachments(
test,
summary_ref.get('activitySummaries', {}).get('_values', []),
xcresult,
attachments,
include_jpg=False)
# Extract all attachments of the failure step (applied to failed tests).
Xcode11LogParser._extract_attachments(
test,
summary_ref.get('failureSummaries', {}).get('_values', []),
xcresult,
attachments,
include_jpg=True)
return attachments
class XcodeLogParser(object):
"""Xcode log parser. Parses logs for Xcode until version 11."""
@staticmethod
def _test_status_summary(summary_plist):
"""Gets status summary from TestSummaries.plist.
Args:
summary_plist: (str) A path to plist-file.
Returns:
test_results.ResultCollection: Results of tests parsed.
"""
result = ResultCollection()
root_summary = plistlib.readPlist(summary_plist)
for summary in root_summary['TestableSummaries']:
if not summary['Tests']:
continue
for test_suite in summary['Tests'][0]['Subtests'][0]['Subtests']:
for test in test_suite['Subtests']:
if test['TestStatus'] == 'Success':
result.add_test_result(
TestResult(test['TestIdentifier'], TestStatus.PASS))
else:
message = ''
for failure_summary in test['FailureSummaries']:
failure_message = failure_summary['FileName']
if failure_summary['LineNumber']:
failure_message = '%s: line %s' % (
failure_message, failure_summary['LineNumber'])
message += failure_message + '\n'
message += failure_summary['Message'] + '\n'
result.add_test_result(
TestResult(
test['TestIdentifier'], TestStatus.FAIL, test_log=message))
return result
@staticmethod
def collect_test_results(output_folder, output):
"""Gets XCtest result data from Info.plist and copies artifacts.
Args:
output_folder: (str) A path to output folder.
output: [str] An output of test run.
Returns:
test_result.ResultCollection representing all test results.
"""
output_folder = _sanitize_str(output_folder)
output = _sanitize_str_list(output)
overall_collected_result = ResultCollection()
plist_path = os.path.join(output_folder, 'Info.plist')
if not os.path.exists(plist_path):
overall_collected_result.crashed = True
overall_collected_result.crash_message += (
'%s with test results does not exist.\n' % plist_path +
'\n'.join(output))
overall_collected_result.add_result_collection(
parse_passed_failed_tests_for_interrupted_run(output))
return overall_collected_result
root = plistlib.readPlist(plist_path)
for action in root['Actions']:
action_result = action['ActionResult']
if ((root['TestsCount'] == 0 and root['TestsFailedCount'] == 0) or
'TestSummaryPath' not in action_result):
overall_collected_result.crashed = True
if ('ErrorSummaries' in action_result and
action_result['ErrorSummaries']):
overall_collected_result.crash_message = '\n'.join(
_sanitize_str_list([
error_summary['Message']
for error_summary in action_result['ErrorSummaries']
]))
else:
summary_plist = os.path.join(
os.path.dirname(plist_path), action_result['TestSummaryPath'])
overall_collected_result.add_result_collection(
XcodeLogParser._test_status_summary(summary_plist))
XcodeLogParser._copy_screenshots(output_folder)
return overall_collected_result
@staticmethod
def _copy_screenshots(output_folder):
"""Copy screenshots of failed tests to output folder.
Args:
output_folder: (str) A full path to folder where
"""
info_plist_path = os.path.join(output_folder, 'Info.plist')
if not os.path.exists(info_plist_path):
LOGGER.info('%s does not exist.' % info_plist_path)
return
plist = plistlib.readPlist(info_plist_path)
if 'TestFailureSummaries' not in plist or not plist['TestFailureSummaries']:
LOGGER.info('No failures in %s' % info_plist_path)
return
for failure_summary in plist['TestFailureSummaries']:
# Screenshot folder has format 'TestClass_test_method'
test_case_id = format_test_case(failure_summary['TestCase'])
test_case_folder = os.path.join(output_folder, 'failures', test_case_id)
copy_screenshots_for_failed_test(failure_summary['Message'],
test_case_folder)
@staticmethod
def copy_artifacts(output_path):
"""Invokes _copy_screenshots(). To make public methods consistent."""
LOGGER.info('Invoking _copy_screenshots call for copy_artifacts in'
'XcodeLogParser')
XcodeLogParser._copy_screenshots(output_path)
@staticmethod
def export_diagnostic_data(output_path):
"""No-op. To make parser public methods consistent."""
LOGGER.warn('Exporting diagnostic data only supported in Xcode 11+')
|
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.postgres.search import SearchQuery
from django.contrib.postgres.search import SearchVector
from django.core.files.storage import get_storage_class
from django.db import models
from django.db.models import Q, F, Case, When, Value, Sum, Min, Max, OuterRef, Subquery, Count, CharField
from django.db.models.functions import Length
from django.views import View
from django.views.generic.detail import SingleObjectMixin
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import get_language_info
from django.utils.decorators import method_decorator
from django.utils.text import slugify
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django.http import HttpResponseForbidden
from django.views.decorators.cache import cache_control
from collections import Counter
import json
import datetime
import re
import pytz
import logging
from studygroups.decorators import user_is_group_facilitator
from studygroups.decorators import user_is_team_organizer
from studygroups.models import Course
from studygroups.models import StudyGroup
from studygroups.models import Application
from studygroups.models import Meeting
from studygroups.models import Team
from studygroups.models import TeamMembership
from studygroups.models import TeamInvitation
from studygroups.models import Announcement
from studygroups.models import FacilitatorGuide
from studygroups.models import generate_meetings_from_dates
from studygroups.models import get_json_response
from studygroups.models.course import course_platform_from_url
from studygroups.models.team import eligible_team_by_email_domain
from uxhelpers.utils import json_response
from api.geo import getLatLonDelta
from api import schema
from api.forms import ImageForm
logger = logging.getLogger(__name__)
def studygroups(request):
# TODO remove this API endpoint, where is it currently being used??
study_groups = StudyGroup.objects.published().filter(unlisted=False)
if 'course_id' in request.GET:
study_groups = study_groups.filter(course_id=request.GET.get('course_id'))
def to_json(sg):
data = {
"name": sg.name,
"course_title": sg.course.title,
"facilitator": sg.facilitator.first_name + " " + sg.facilitator.last_name,
"venue": sg.venue_name,
"venue_address": sg.venue_address + ", " + sg.city,
"city": sg.city,
"day": sg.day(),
"start_date": sg.start_date,
"meeting_time": sg.meeting_time,
"time_zone": sg.timezone_display(),
"end_time": sg.end_time(),
"weeks": sg.meeting_set.active().count(),
"url": f"{settings.PROTOCOL}://{settings.DOMAIN}" + reverse('studygroups_signup', args=(slugify(sg.venue_name, allow_unicode=True), sg.id,)),
}
if sg.image:
data["image_url"] = f"{settings.PROTOCOL}://{settings.DOMAIN}" + sg.image.url
#TODO else set default image URL
return data
data = [ to_json(sg) for sg in study_groups ]
return json_response(request, data)
class CustomSearchQuery(SearchQuery):
""" use to_tsquery to support partial matches """
""" NOTE: This is potentially unsafe!!"""
def as_sql(self, compiler, connection):
query = re.sub(r'[!\'()|&\:=,\.\ \-\<\>@]+', ' ', self.value).strip().lower()
tsquery = ":* & ".join(query.split(' '))
tsquery += ":*"
params = [tsquery]
if self.config:
config_sql, config_params = compiler.compile(self.config)
template = 'to_tsquery({}::regconfig, %s)'.format(config_sql)
params = config_params + [tsquery]
else:
template = 'to_tsquery(%s)'
if self.invert:
template = '!!({})'.format(template)
return template, params
def serialize_learning_circle(sg):
data = {
"course": {
"id": sg.course.pk,
"title": sg.course.title,
"provider": sg.course.provider,
"link": sg.course.link,
"course_page_url": settings.PROTOCOL + '://' + settings.DOMAIN + reverse('studygroups_course_page', args=(sg.course.id,)),
"discourse_topic_url": sg.course.discourse_topic_url if sg.course.discourse_topic_url else settings.PROTOCOL + '://' + settings.DOMAIN + reverse("studygroups_generate_course_discourse_topic", args=(sg.course.id,)),
},
"id": sg.id,
"name": sg.name,
"facilitator": sg.facilitator.first_name,
"venue": sg.venue_name,
"venue_address": sg.venue_address + ", " + sg.city,
"venue_website": sg.venue_website,
"city": sg.city,
"region": sg.region,
"country": sg.country,
"country_en": sg.country_en,
"latitude": sg.latitude,
"longitude": sg.longitude,
"place_id": sg.place_id,
"online": sg.online,
"language": sg.language,
"day": sg.day(),
"start_date": sg.start_date,
"start_datetime": sg.local_start_date(),
"meeting_time": sg.meeting_time,
"time_zone": sg.timezone_display(),
"last_meeting_date": sg.end_date, # TODO rename to end_date or last_meeting_date - ie make consistent
"end_time": sg.end_time(),
"weeks": sg.weeks if sg.draft else sg.meeting_set.active().count(), # TODO
"url": f"{settings.PROTOCOL}://{settings.DOMAIN}" + reverse('studygroups_signup', args=(slugify(sg.venue_name, allow_unicode=True), sg.id,)),
"report_url": sg.report_url(),
"studygroup_path": reverse('studygroups_view_study_group', args=(sg.id,)),
"draft": sg.draft,
"signup_count": sg.application_set.active().count(),
"signup_open": sg.signup_open and sg.end_date > datetime.date.today(),
}
if sg.image:
data["image_url"] = settings.PROTOCOL + '://' + settings.DOMAIN + sg.image.url
# TODO else set default image URL
if sg.signup_question:
data["signup_question"] = sg.signup_question
if hasattr(sg, 'next_meeting_date'):
data["next_meeting_date"] = sg.next_meeting_date
if hasattr(sg, 'status'):
data["status"] = sg.status
return data
def _intCommaList(csv):
values = csv.split(',') if csv else []
cleaned = []
for value in values:
try:
v = int(value)
cleaned += [v]
except ValueError:
return None, 'Not a list of integers seperated by commas'
return cleaned, None
def _limit_offset(request):
if 'offset' in request.GET or 'limit' in request.GET:
try:
offset = int(request.GET.get('offset', 0))
except ValueError as e:
offset = 0
try:
limit = int(request.GET.get('limit', 100))
except ValueError as e:
limit = 100
return limit, offset
@method_decorator(cache_control(max_age=15*60), name='dispatch')
class LearningCircleListView(View):
def get(self, request):
query_schema = {
"latitude": schema.floating_point(),
"longitude": schema.floating_point(),
"distance": schema.floating_point(),
"offset": schema.integer(),
"limit": schema.integer(),
"weekdays": _intCommaList,
"user": schema.boolean(),
"scope": schema.text(),
"draft": schema.boolean(),
"team_id": schema.integer(),
"order": lambda v: (v, None) if v in ['name', 'start_date', 'created_at', 'first_meeting_date', 'last_meeting_date', None] else (None, "must be 'name', 'created_at', 'first_meeting_date', 'last_meeting_date', or 'start_date'"),
}
data = schema.django_get_to_dict(request.GET)
clean_data, errors = schema.validate(query_schema, data)
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
study_groups = StudyGroup.objects.published().filter(unlisted=False).prefetch_related('course', 'meeting_set', 'application_set').order_by('id')
if 'draft' in request.GET:
study_groups = StudyGroup.objects.active().order_by('id')
if 'id' in request.GET:
id = request.GET.get('id')
study_groups = StudyGroup.objects.filter(pk=int(id))
if 'user' in request.GET:
user_id = request.user.id
study_groups = study_groups.filter(facilitator=user_id)
today = datetime.date.today()
active_meetings = Meeting.objects.filter(study_group=OuterRef('pk'), deleted_at__isnull=True).order_by('meeting_date')
# TODO status is being used by the learning circle search page?
study_groups = study_groups.annotate(
status=Case(
When(signup_open=True, start_date__gt=today, then=Value('upcoming')),
When(signup_open=True, start_date__lte=today, end_date__gte=today, then=Value('in_progress')),
When(signup_open=False, end_date__gte=today, then=Value('closed')),
default=Value('completed'),
output_field=CharField(),
),
)
# TODO scope is used by dashboard?
if 'scope' in request.GET:
scope = request.GET.get('scope')
upcoming_meetings = Meeting.objects.filter(study_group=OuterRef('pk'), deleted_at__isnull=True, meeting_date__gte=today).order_by('meeting_date')
if scope == "active":
study_groups = study_groups\
.annotate(next_meeting_date=Subquery(upcoming_meetings.values('meeting_date')[:1]))\
.filter(Q(end_date__gte=today) | Q(draft=True))
elif scope == "upcoming":
study_groups = study_groups\
.annotate(next_meeting_date=Subquery(upcoming_meetings.values('meeting_date')[:1]))\
.filter(Q(start_date__gt=today) | Q(draft=True))
elif scope == "current":
study_groups = study_groups\
.annotate(next_meeting_date=Subquery(upcoming_meetings.values('meeting_date')[:1]))\
.filter(start_date__lte=today, end_date__gte=today)
elif scope == "completed":
study_groups = study_groups\
.filter(end_date__lt=today)
q = request.GET.get('q', '').strip()
if q:
tsquery = CustomSearchQuery(q, config='simple')
study_groups = study_groups.annotate(
search = SearchVector(
'city',
'name',
'course__title',
'course__provider',
'course__topics',
'venue_name',
'venue_address',
'venue_details',
'facilitator__first_name',
'facilitator__last_name',
config='simple'
)
).filter(search=tsquery)
if 'course_id' in request.GET:
study_groups = study_groups.filter(
course_id=request.GET.get('course_id')
)
city = request.GET.get('city')
if city is not None:
study_groups = study_groups.filter(city=city)
team_id = request.GET.get('team_id')
if team_id is not None:
study_groups = study_groups.filter(team_id=team_id)
# TODO How is this different from scope=active?
if 'active' in request.GET:
active = request.GET.get('active') == 'true'
if active:
study_groups = study_groups.filter(end_date__gte=today)
else:
study_groups = study_groups.filter(end_date__lt=today)
if 'latitude' in request.GET and 'longitude' in request.GET:
# work with floats for ease
latitude = float(request.GET.get('latitude'))
longitude = float(request.GET.get('longitude'))
distance = float(request.GET.get('distance', False) or 50)
lat_delta, lon_delta = getLatLonDelta(latitude, longitude, distance)
lat_min = max(-90, latitude - lat_delta)
lat_max = min(90, latitude + lat_delta)
lon_min = max(-180, longitude - lon_delta)
lon_max = min(180, longitude + lon_delta)
# NOTE doesn't wrap around,
# iow, something at lat=45, lon=-189 and distance=1000 won't match
# lat=45, lon=189 even though they are only 222 km apart.
study_groups = study_groups.filter(
latitude__gte=lat_min,
latitude__lte=lat_max,
longitude__gte=lon_min,
longitude__lte=lon_max
)
# NOTE could use haversine approximation to filter more accurately
if 'topics' in request.GET:
topics = request.GET.get('topics').split(',')
query = Q(course__topics__icontains=topics[0])
for topic in topics[1:]:
query = Q(course__topics__icontains=topic) | query
study_groups = study_groups.filter(query)
if 'weekdays' in request.GET:
weekdays = request.GET.get('weekdays').split(',')
query = None
for weekday in weekdays:
# __week_day differs from datetime.weekday()
# Monday should be 0
weekday = int(weekday) + 2 % 7
query = query | Q(start_date__week_day=weekday) if query else Q(start_date__week_day=weekday)
study_groups = study_groups.filter(query)
# TODO this conflates signup open and active
study_groups_signup_open = study_groups.filter(signup_open=True, end_date__gte=today)
study_groups_signup_closed = study_groups.filter(Q(signup_open=False) | Q(end_date__lt=today))
if 'signup' in request.GET:
signup_open = request.GET.get('signup') == 'open'
if signup_open:
study_groups = study_groups_signup_open
else:
study_groups = study_groups_signup_closed
order = request.GET.get('order', None)
if order == 'name':
study_groups = study_groups.order_by('name')
elif order == 'start_date':
study_groups = study_groups.order_by('-start_date')
elif order == 'created_at':
study_groups = study_groups.order_by('-created_at')
elif order == 'first_meeting_date':
study_groups = study_groups.order_by('start_date')
elif order == 'last_meeting_date':
study_groups = study_groups.order_by('-end_date')
data = {
'count': study_groups.count(),
'signup_open_count': study_groups_signup_open.count(),
'signup_closed_count': study_groups_signup_closed.count(),
}
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
study_groups = study_groups[offset:offset+limit]
data['items'] = [ serialize_learning_circle(sg) for sg in study_groups ]
return json_response(request, data)
class LearningCircleTopicListView(View):
""" Return topics for listed courses """
def get(self, request):
study_group_ids = Meeting.objects.active().filter(
meeting_date__gte=timezone.now()
).values('study_group')
course_ids = None
course_ids = StudyGroup.objects.published().filter(id__in=study_group_ids).values('course')
topics = Course.objects.active()\
.filter(unlisted=False)\
.filter(id__in=course_ids)\
.exclude(topics='')\
.values_list('topics')
topics = [
item.strip().lower() for sublist in topics for item in sublist[0].split(',')
]
data = {}
data['topics'] = { k: v for k, v in list(Counter(topics).items()) }
return json_response(request, data)
def _studygroup_object_for_map(sg):
active = sg.end_date > datetime.date.today()
report_available = sg.learnersurveyresponse_set.count() > 0
data = {
"id": sg.id,
"title": sg.name,
"latitude": sg.latitude,
"longitude": sg.longitude,
"city": sg.city,
"start_date": sg.start_date,
"active": active
}
if active:
data["url"] = settings.PROTOCOL + '://' + settings.DOMAIN + reverse('studygroups_signup', args=(slugify(sg.venue_name, allow_unicode=True), sg.id,))
elif report_available:
data["report_url"] = sg.report_url()
return data
class LearningCirclesMapView(View):
def get(self, request):
study_groups = StudyGroup.objects.published().select_related('course').prefetch_related("learnersurveyresponse_set")
data = {}
data['items'] = [ _studygroup_object_for_map(sg) for sg in study_groups ]
return json_response(request, data)
def _course_check(course_id):
if not Course.objects.filter(pk=int(course_id)).exists():
return None, 'Course matching ID not found'
else:
return Course.objects.get(pk=int(course_id)), None
def serialize_course(course):
data = {
"id": course.id,
"title": course.title,
"provider": course.provider,
"platform": course.platform,
"link": course.link,
"caption": course.caption,
"on_demand": course.on_demand,
"topics": [t.strip() for t in course.topics.split(',')] if course.topics else [],
"language": course.language,
"overall_rating": course.overall_rating,
"total_ratings": course.total_ratings,
"rating_step_counts": course.rating_step_counts,
"course_page_url": settings.PROTOCOL + '://' + settings.DOMAIN + reverse("studygroups_course_page", args=(course.id,)),
"course_page_path": reverse("studygroups_course_page", args=(course.id,)),
"course_edit_path": reverse("studygroups_course_edit", args=(course.id,)),
"created_at": course.created_at,
"unlisted": course.unlisted,
"discourse_topic_url": course.discourse_topic_url if course.discourse_topic_url else settings.PROTOCOL + '://' + settings.DOMAIN + reverse("studygroups_generate_course_discourse_topic", args=(course.id,)),
}
if hasattr(course, 'num_learning_circles'):
data["learning_circles"] = course.num_learning_circles
return data
class CourseListView(View):
def get(self, request):
query_schema = {
"offset": schema.integer(),
"limit": schema.integer(),
"order": lambda v: (v, None) if v in ['title', 'usage', 'overall_rating', 'created_at', None] else (None, "must be 'title', 'usage', 'created_at', or 'overall_rating'"),
"user": schema.boolean(),
"include_unlisted": schema.boolean(),
"facilitator_guide": schema.boolean(),
}
data = schema.django_get_to_dict(request.GET)
clean_data, errors = schema.validate(query_schema, data)
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
courses = Course.objects.active().filter(archived=False)
# include_unlisted must be != false and the query must be scoped
# by user to avoid filtering out unlisted courses
if request.GET.get('include_unlisted', "false") == "false" or 'user' not in request.GET:
# return only courses that is not unlisted
# if the user is part of a team, include unlisted courses from the team
if request.user.is_authenticated:
team_query = TeamMembership.objects.active().filter(user=request.user).values('team')
team_ids = TeamMembership.objects.active().filter(team__in=team_query).values('user')
courses = courses.filter(Q(unlisted=False) | Q(unlisted=True, created_by__in=team_ids))
else:
courses = courses.filter(unlisted=False)
courses = courses.annotate(
num_learning_circles=Sum(
Case(
When(
studygroup__deleted_at__isnull=True, then=Value(1),
studygroup__course__id=F('id')
),
default=Value(0), output_field=models.IntegerField()
)
)
)
if 'user' in request.GET:
user_id = request.user.id
courses = courses.filter(created_by=user_id)
if 'course_id' in request.GET:
course_id = request.GET.get('course_id')
courses = courses.filter(pk=int(course_id))
order = request.GET.get('order', None)
if order in ['title', None]:
courses = courses.order_by('title')
elif order == 'overall_rating':
courses = courses.order_by('-overall_rating', '-total_ratings', 'title')
elif order == 'created_at':
courses = courses.order_by('-created_at')
else:
courses = courses.order_by('-num_learning_circles', 'title')
query = request.GET.get('q', '').strip()
if query:
tsquery = CustomSearchQuery(query, config='simple')
courses = courses.annotate(
search=SearchVector('topics', 'title', 'caption', 'provider', config='simple')
).filter(search=tsquery)
if 'topics' in request.GET:
topics = request.GET.get('topics').split(',')
query = Q(topics__icontains=topics[0])
for topic in topics[1:]:
query = Q(topics__icontains=topic) | query
courses = courses.filter(query)
if 'languages' in request.GET:
languages = request.GET.get('languages').split(',')
courses = courses.filter(language__in=languages)
if 'oer' in request.GET and request.GET.get('oer', False) == 'true':
courses = courses.filter(license__in=Course.OER_LICENSES)
if clean_data.get('facilitator_guide'):
courses = courses.filter(id__in=FacilitatorGuide.objects.active().values('course_id'))
if 'active' in request.GET:
active = request.GET.get('active') == 'true'
study_group_ids = Meeting.objects.active().filter(
meeting_date__gte=timezone.now()
).values('study_group')
course_ids = None
if active:
course_ids = StudyGroup.objects.published().filter(id__in=study_group_ids).values('course')
else:
course_ids = StudyGroup.objects.published().exclude(id__in=study_group_ids).values('course')
courses = courses.filter(id__in=course_ids)
data = {
'count': courses.count()
}
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
courses = courses[offset:offset+limit]
data['items'] = [ serialize_course(course) for course in courses ]
return json_response(request, data)
class CourseTopicListView(View):
""" Return topics for listed courses """
def get(self, request):
topics = Course.objects.active()\
.filter(unlisted=False)\
.exclude(topics='')\
.values_list('topics')
topics = [
item.strip().lower() for sublist in topics for item in sublist[0].split(',')
]
from collections import Counter
data = {}
data['topics'] = { k: v for k, v in list(Counter(topics).items()) }
return json_response(request, data)
def _image_check():
def _validate(value):
if value.startswith(settings.MEDIA_URL):
return value.replace(settings.MEDIA_URL, '', 1), None
else:
return None, 'Image must be a valid URL for an existing file'
return _validate
def _user_check(user):
def _validate(value):
if value is False:
if user.profile.email_confirmed_at is None:
return None, 'Users with unconfirmed email addresses cannot publish courses'
return value, None
return _validate
def _studygroup_check(studygroup_id):
if not StudyGroup.objects.filter(pk=int(studygroup_id)).exists():
return None, 'Learning circle matching ID not found'
else:
return StudyGroup.objects.get(pk=int(studygroup_id)), None
def _venue_name_check(venue_name):
if len(slugify(venue_name, allow_unicode=True)):
return venue_name, None
return None, 'Venue name should include at least one alpha-numeric character.'
def _meetings_validator(meetings):
if len(meetings) == 0:
return None, 'Need to specify at least one meeting'
meeting_schema = schema.schema({
"meeting_date": schema.date(),
"meeting_time": schema.time()
})
results = list(map(meeting_schema, meetings))
errors = list(filter(lambda x: x, map(lambda x: x[1], results)))
mtngs = list(map(lambda x: x[0], results))
if errors:
return None, 'Invalid meeting data'
else:
return mtngs, None
def _make_learning_circle_schema(request):
post_schema = {
"name": schema.text(length=128, required=False),
"course": schema.chain([
schema.integer(),
_course_check,
], required=True),
"description": schema.text(required=True, length=2000),
"course_description": schema.text(required=False, length=2000),
"venue_name": schema.chain([
schema.text(required=True, length=256),
_venue_name_check,
], required=True),
"venue_details": schema.text(required=True, length=128),
"venue_address": schema.text(required=True, length=256),
"venue_website": schema.text(length=256),
"city": schema.text(required=True, length=256),
"region": schema.text(required=True, length=256),
"country": schema.text(required=True, length=256),
"country_en": schema.text(required=True, length=256),
"latitude": schema.floating_point(),
"longitude": schema.floating_point(),
"place_id": schema.text(length=256),
"language": schema.text(required=True, length=6),
"online": schema.boolean(),
"meeting_time": schema.time(required=True),
"duration": schema.integer(required=True),
"timezone": schema.text(required=True, length=128),
"signup_question": schema.text(length=256),
"facilitator_goal": schema.text(length=256),
"facilitator_concerns": schema.text(length=256),
"image_url": schema.chain([
schema.text(),
_image_check(),
], required=False),
"draft": schema.boolean(),
"meetings": _meetings_validator,
}
return post_schema
@method_decorator(login_required, name='dispatch')
class LearningCircleCreateView(View):
def post(self, request):
post_schema = _make_learning_circle_schema(request)
data = json.loads(request.body)
data, errors = schema.validate(post_schema, data)
if errors != {}:
logger.debug('schema error {0}'.format(json.dumps(errors)))
return json_response(request, {"status": "error", "errors": errors})
# start and end dates need to be set for db model to be valid
start_date = data.get('meetings')[0].get('meeting_date')
end_date = data.get('meetings')[-1].get('meeting_date')
# create learning circle
study_group = StudyGroup(
name=data.get('name', None),
course=data.get('course'),
course_description=data.get('course_description', None),
facilitator=request.user,
description=data.get('description'),
venue_name=data.get('venue_name'),
venue_address=data.get('venue_address'),
venue_details=data.get('venue_details'),
venue_website=data.get('venue_website', ''),
city=data.get('city'),
region=data.get('region'),
country=data.get('country'),
country_en=data.get('country_en'),
latitude=data.get('latitude'),
longitude=data.get('longitude'),
place_id=data.get('place_id', ''),
online=data.get('online', False),
language=data.get('language'),
start_date=start_date,
end_date=end_date,
meeting_time=data.get('meeting_time'),
duration=data.get('duration'),
timezone=data.get('timezone'),
image=data.get('image_url'),
signup_question=data.get('signup_question', ''),
facilitator_goal=data.get('facilitator_goal', ''),
facilitator_concerns=data.get('facilitator_concerns', '')
)
# use course.caption if course_description is not set
if study_group.course_description is None:
study_group.course_description = study_group.course.caption
# use course.title if name is not set
if study_group.name is None:
study_group.name = study_group.course.title
# only update value for draft if the use verified their email address
if request.user.profile.email_confirmed_at is not None:
study_group.draft = data.get('draft', True)
study_group.save()
# notification about new study group is sent at this point, but no associated meetings exists, which implies that the reminder can't use the date of the first meeting
generate_meetings_from_dates(study_group, data.get('meetings', []))
studygroup_url = f"{settings.PROTOCOL}://{settings.DOMAIN}" + reverse('studygroups_view_study_group', args=(study_group.id,))
return json_response(request, { "status": "created", "studygroup_url": studygroup_url })
@method_decorator(user_is_group_facilitator, name='dispatch')
@method_decorator(login_required, name='dispatch')
class LearningCircleUpdateView(SingleObjectMixin, View):
model = StudyGroup
pk_url_kwarg = 'study_group_id'
def post(self, request, *args, **kwargs):
study_group = self.get_object()
post_schema = _make_learning_circle_schema(request)
data = json.loads(request.body)
data, errors = schema.validate(post_schema, data)
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
# update learning circle
published = False
draft = data.get('draft', True)
# only publish a learning circle for a user with a verified email address
if draft is False and request.user.profile.email_confirmed_at is not None:
published = study_group.draft is True
study_group.draft = False
study_group.name = data.get('name', None)
study_group.course = data.get('course')
study_group.description = data.get('description')
study_group.course_description = data.get('course_description', None)
study_group.venue_name = data.get('venue_name')
study_group.venue_address = data.get('venue_address')
study_group.venue_details = data.get('venue_details')
study_group.venue_website = data.get('venue_website', '')
study_group.city = data.get('city')
study_group.region = data.get('region')
study_group.country = data.get('country')
study_group.country_en = data.get('country_en')
study_group.latitude = data.get('latitude')
study_group.longitude = data.get('longitude')
study_group.place_id = data.get('place_id', '')
study_group.language = data.get('language')
study_group.online = data.get('online')
study_group.meeting_time = data.get('meeting_time')
study_group.duration = data.get('duration')
study_group.timezone = data.get('timezone')
study_group.image = data.get('image_url')
study_group.signup_question = data.get('signup_question', '')
study_group.facilitator_goal = data.get('facilitator_goal', '')
study_group.facilitator_concerns = data.get('facilitator_concerns', '')
study_group.save()
generate_meetings_from_dates(study_group, data.get('meetings', []))
studygroup_url = f"{settings.PROTOCOL}://{settings.DOMAIN}" + reverse('studygroups_view_study_group', args=(study_group.id,))
return json_response(request, { "status": "updated", "studygroup_url": studygroup_url })
@method_decorator(csrf_exempt, name="dispatch")
class SignupView(View):
def post(self, request):
signup_questions = {
"goals": schema.text(required=True),
"support": schema.text(required=True),
"custom_question": schema.text(),
}
post_schema = {
"learning_circle": schema.chain([
schema.integer(),
lambda x: (None, 'No matching learning circle exists') if not StudyGroup.objects.filter(pk=int(x)).exists() else (StudyGroup.objects.get(pk=int(x)), None),
], required=True),
"name": schema.text(required=True),
"email": schema.email(required=True),
"communications_opt_in": schema.boolean(),
"consent": schema.chain([
schema.boolean(),
lambda consent: (None, 'Consent is needed to sign up') if not consent else (consent, None),
], required=True),
"mobile": schema.mobile(),
"signup_questions": schema.schema(signup_questions, required=True)
}
data = json.loads(request.body)
clean_data, errors = schema.validate(post_schema, data)
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
study_group = clean_data.get('learning_circle')
# Not sure how to cleanly implement validation like this using the schema?
if study_group.signup_question:
if not clean_data.get('signup_questions').get('custom_question'):
return json_response(request, {"status": "error", "errors": { "signup_questions": [{"custom_question": ["Field is required"]}]}})
if Application.objects.active().filter(email__iexact=clean_data.get('email'), study_group=study_group).exists():
application = Application.objects.active().get(email__iexact=clean_data.get('email'), study_group=study_group)
else:
application = Application(
study_group=study_group,
name=clean_data.get('name'),
email=clean_data.get('email'),
accepted_at=timezone.now()
)
application.name = clean_data.get('name')
application.signup_questions = json.dumps(clean_data.get('signup_questions'))
if clean_data.get('mobile'):
application.mobile = clean_data.get('mobile')
application.communications_opt_in = clean_data.get('communications_opt_in', False)
application.save()
return json_response(request, {"status": "created"})
class LandingPageLearningCirclesView(View):
""" return upcoming learning circles for landing page """
def get(self, request):
query_schema = {
"scope": schema.text(),
}
data = schema.django_get_to_dict(request.GET)
clean_data, errors = schema.validate(query_schema, data)
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
study_groups_unsliced = StudyGroup.objects.published()
if 'scope' in request.GET and request.GET.get('scope') == "team":
user = request.user
team_ids = TeamMembership.objects.active().filter(user=user).values("team")
if team_ids.count() == 0:
return json_response(request, { "status": "error", "errors": ["User is not on a team."] })
team_members = TeamMembership.objects.active().filter(team__in=team_ids).values("user")
study_groups_unsliced = study_groups_unsliced.filter(facilitator__in=team_members)
# get learning circles with image & upcoming meetings
study_groups = study_groups_unsliced.filter(
meeting__meeting_date__gte=timezone.now(),
).annotate(
next_meeting_date=Min('meeting__meeting_date')
).order_by('next_meeting_date')[:3]
# if there are less than 3 with upcoming meetings and an image
if study_groups.count() < 3:
# pad with learning circles with the most recent meetings
past_study_groups = study_groups_unsliced.filter(
meeting__meeting_date__lt=timezone.now(),
).annotate(
next_meeting_date=Max('meeting__meeting_date')
).order_by('-next_meeting_date')
study_groups = list(study_groups) + list(past_study_groups[:3-study_groups.count()])
data = {
'items': [ serialize_learning_circle(sg) for sg in study_groups ]
}
return json_response(request, data)
class LandingPageStatsView(View):
""" Return stats for the landing page """
"""
- Number of active learning circles
- Number of cities where learning circle happened
- Number of facilitators who ran at least 1 learning circle
- Number of learning circles to date
"""
def get(self, request):
study_groups = StudyGroup.objects.published().filter(
meeting__meeting_date__gte=timezone.now()
).annotate(
next_meeting_date=Min('meeting__meeting_date')
)
cities = StudyGroup.objects.published().filter(
latitude__isnull=False,
longitude__isnull=False,
).distinct('city').values('city')
learning_circle_count = StudyGroup.objects.published().count()
facilitators = StudyGroup.objects.active().distinct('facilitator').values('facilitator')
cities_s = list(set([c['city'].split(',')[0].strip() for c in cities]))
data = {
"active_learning_circles": study_groups.count(),
"cities": len(cities_s),
"facilitators": facilitators.count(),
"learning_circle_count": learning_circle_count
}
return json_response(request, data)
class ImageUploadView(View):
def post(self, request):
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.cleaned_data['image']
storage = get_storage_class()()
filename = storage.save(image.name, image)
# TODO - get full URL
image_url = ''.join([settings.MEDIA_URL, filename])
return json_response(request, {"image_url": image_url})
else:
return json_response(request, {'error': 'not a valid image'})
def detect_platform_from_url(request):
url = request.GET.get('url', "")
platform = course_platform_from_url(url)
return json_response(request, { "platform": platform })
class CourseLanguageListView(View):
""" Return langugages for listed courses """
def get(self, request):
languages = Course.objects.active().filter(unlisted=False).values_list('language', flat=True)
languages = set(languages)
languages_dict = [
get_language_info(language) for language in languages
]
data = { "languages": languages_dict }
return json_response(request, data)
class FinalReportListView(View):
def get(self, request):
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
studygroups = StudyGroup.objects.published().annotate(surveys=Count('learnersurveyresponse')).filter(surveys__gt=0, end_date__lt=today).order_by('-end_date')
data = {}
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
studygroups = studygroups[offset:offset+limit]
def _map(sg):
data = serialize_learning_circle(sg)
if request.user.is_authenticated:
data['signup_count'] = sg.application_set.active().count()
return data
data['items'] = [ _map(sg) for sg in studygroups ]
return json_response(request, data)
class InstagramFeed(View):
def get(self, request):
""" Get user media from Instagram Basic Diplay API """
""" https://developers.facebook.com/docs/instagram-basic-display-api/reference/media """
url = "https://graph.instagram.com/me/media?fields=id,permalink&access_token={}".format(settings.INSTAGRAM_TOKEN)
try:
response = get_json_response(url)
if response.get("data", None):
return json_response(request, { "items": response["data"] })
if response.get("error", None):
return json_response(request, { "status": "error", "errors": response["error"]["message"] })
logger.error('Could not make request to Instagram: {}'.format(response["error"]["message"]))
return json_response(request, { "status": "error", "errors": "Could not make request to Instagram" })
except ConnectionError as e:
logger.error('Could not make request to Instagram')
return json_response(request, { "status": "error", "errors": str(e) })
def serialize_team_data(team):
serialized_team = {
"id": team.pk,
"name": team.name,
"subtitle": team.subtitle,
"page_slug": team.page_slug,
"member_count": team.teammembership_set.active().count(),
"zoom": team.zoom,
"date_established": team.created_at.strftime("%B %Y"),
"intro_text": team.intro_text,
"website": team.website,
"email_address": team.email_address,
"location": team.location,
"facilitators": [],
"membership": team.membership,
}
members = team.teammembership_set.active().values('user')
studygroup_count = StudyGroup.objects.published().filter(facilitator__in=members).count()
serialized_team["studygroup_count"] = studygroup_count
facilitators = team.teammembership_set.active()
for facilitator in facilitators:
facilitator_role = "FACILITATOR" if facilitator.role == TeamMembership.MEMBER else facilitator.role
serialized_facilitator = {
"first_name": facilitator.user.first_name,
"city": facilitator.user.profile.city,
"bio": facilitator.user.profile.bio,
"contact_url": facilitator.user.profile.contact_url,
"role": facilitator_role,
}
if facilitator.user.profile.avatar:
serialized_facilitator["avatar_url"] = f"{settings.PROTOCOL}://{settings.DOMAIN}" + facilitator.user.profile.avatar.url
serialized_team["facilitators"].append(serialized_facilitator)
if team.page_image:
serialized_team["image_url"] = f"{settings.PROTOCOL}://{settings.DOMAIN}" + team.page_image.url
if team.logo:
serialized_team["logo_url"] = f"{settings.PROTOCOL}://{settings.DOMAIN}" + team.logo.url
if team.latitude and team.longitude:
serialized_team["coordinates"] = {
"longitude": team.longitude,
"latitude": team.latitude,
}
return serialized_team
class TeamListView(View):
def get(self, request):
data = {}
teams = Team.objects.all().order_by('name')
data["count"] = teams.count()
if 'image' in request.GET and request.GET.get('image') == "true":
teams = teams.exclude(page_image="")
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
teams = teams[offset:offset+limit]
data['items'] = [ serialize_team_data(team) for team in teams ]
return json_response(request, data)
class TeamDetailView(SingleObjectMixin, View):
model = Team
pk_url_kwarg = 'team_id'
def get(self, request, **kwargs):
data = {}
team = self.get_object()
serialized_team = serialize_team_data(team)
if request.user.is_authenticated and team.teammembership_set.active().filter(user=request.user, role=TeamMembership.ORGANIZER).exists():
# ensure user is team organizer
serialized_team['team_invitation_link'] = team.team_invitation_link()
data['item'] = serialized_team
return json_response(request, data)
def serialize_team_membership(tm):
role_label = dict(TeamMembership.ROLES)[tm.role]
email_validated = hasattr(tm.user, 'profile') and tm.user.profile.email_confirmed_at is not None
email_confirmed_at = tm.user.profile.email_confirmed_at.strftime("%-d %B %Y") if email_validated else "--"
return {
"facilitator": {
"first_name": tm.user.first_name,
"last_name": tm.user.last_name,
"email": tm.user.email,
"email_confirmed_at": email_confirmed_at
},
"role": role_label,
"id": tm.id,
}
def serialize_team_invitation(ti):
role_label = dict(TeamMembership.ROLES)[ti.role]
return {
"facilitator": {
"email": ti.email,
},
"created_at": ti.created_at.strftime("%-d %B %Y"),
"role": role_label,
"id": ti.id,
}
@method_decorator(login_required, name="dispatch")
class TeamMembershipListView(View):
def get(self, request, **kwargs):
query_schema = {
"offset": schema.integer(),
"limit": schema.integer(),
"team_id": schema.integer(required=True),
}
data = schema.django_get_to_dict(request.GET)
clean_data, errors = schema.validate(query_schema, data)
team_id = clean_data["team_id"]
user_is_team_organizer = TeamMembership.objects.active().filter(team=team_id, user=request.user, role=TeamMembership.ORGANIZER).exists()
if not user_is_team_organizer:
return HttpResponseForbidden()
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
team_memberships = TeamMembership.objects.active().filter(team=team_id)
data = {
'count': team_memberships.count()
}
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
team_memberships = team_memberships[offset:offset+limit]
data['items'] = [serialize_team_membership(m) for m in team_memberships]
return json_response(request, data)
@method_decorator(login_required, name="dispatch")
class TeamInvitationListView(View):
def get(self, request, **kwargs):
query_schema = {
"offset": schema.integer(),
"limit": schema.integer(),
"team_id": schema.integer(required=True)
}
data = schema.django_get_to_dict(request.GET)
clean_data, errors = schema.validate(query_schema, data)
team_id = clean_data["team_id"]
user_is_team_organizer = TeamMembership.objects.active().filter(team=team_id, user=request.user, role=TeamMembership.ORGANIZER).exists()
if not user_is_team_organizer:
return HttpResponseForbidden()
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
team_invitations = TeamInvitation.objects.filter(team=team_id, responded_at__isnull=True)
data = {
'count': team_invitations.count()
}
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
team_invitations = team_invitations[offset:offset+limit]
data['items'] = [serialize_team_invitation(i) for i in team_invitations]
return json_response(request, data)
def serialize_invitation_notification(invitation):
return {
"team_name": invitation.team.name,
"team_organizer_name": invitation.organizer.first_name,
"team_invitation_confirmation_url": reverse("studygroups_facilitator_invitation_confirm", args=(invitation.id,)),
}
@login_required
def facilitator_invitation_notifications(request):
email_validated = hasattr(request.user, 'profile') and request.user.profile.email_confirmed_at is not None
pending_invitations = TeamInvitation.objects.filter(email__iexact=request.user.email, responded_at__isnull=True)
eligible_team = eligible_team_by_email_domain(request.user)
invitation_notifications = [ serialize_invitation_notification(i) for i in pending_invitations]
if email_validated and eligible_team:
implicit_invitation_notification = {
'team_name': eligible_team.name,
'team_invitation_confirmation_url': reverse("studygroups_facilitator_invitation_confirm")
}
invitation_notifications.append(implicit_invitation_notification)
data = {
"items": invitation_notifications
}
return json_response(request, data)
@user_is_team_organizer
@login_required
@require_http_methods(["POST"])
def create_team_invitation_link(request, team_id):
team = Team.objects.get(pk=team_id)
team.generate_invitation_token()
return json_response(request, { "status": "updated", "team_invitation_link": team.team_invitation_link() })
@user_is_team_organizer
@login_required
@require_http_methods(["POST"])
def delete_team_invitation_link(request, team_id):
team = Team.objects.get(pk=team_id)
team.invitation_token = None
team.save()
return json_response(request, { "status": "deleted", "team_invitation_link": None })
def serialize_announcement(announcement):
return {
"text": announcement.text,
"link": announcement.link,
"link_text": announcement.link_text,
"color": announcement.color,
}
class AnnouncementListView(View):
def get(self, request):
announcements = Announcement.objects.filter(display=True)
data = {
"count": announcements.count(),
"items": [ serialize_announcement(announcement) for announcement in announcements ]
}
return json_response(request, data)
def cities(request):
cities = StudyGroup.objects.published().annotate(city_len=Length('city')).filter(city_len__gt=1).values_list('city', flat=True).distinct('city')
data = {
"count": cities.count(),
"items": [{ "label": city, "value": city.split(',')[0].lower().replace(' ', '_') } for city in cities]
}
return json_response(request, data)
|
|
# from ..utils.event import Eventful
from wasm.immtypes import (
LocalVarXsImm,
GlobalVarXsImm,
MemoryImm,
CurGrowMemImm,
I32ConstImm,
I64ConstImm,
F32ConstImm,
F64ConstImm,
)
import struct
from ctypes import c_int32
from .types import (
I32,
I64,
F32,
F64,
Value_t,
UnreachableInstructionTrap,
ConcretizeStack,
ZeroDivisionTrap,
OverflowDivisionTrap,
InvalidConversionTrap,
OutOfBoundsMemoryTrap,
)
from ..core.smtlib import Operators, BitVec, issymbolic
from ..utils.event import Eventful
from decimal import Decimal, InvalidOperation
import operator
import math
class Executor(Eventful):
"""
Contains execution semantics for all WASM instructions that don't involve control flow (and thus only need access
to the store and the stack).
In lieu of annotating every single instruction with the relevant link to the docs, we direct you here:
https://www.w3.org/TR/wasm-core-1/#a7-index-of-instructions
"""
_published_events = {
"write_memory",
"read_memory",
"set_global",
"read_global",
"set_local",
"read_local",
}
def __init__(self, constraints, *args, **kwargs):
self._mapping = {
0x00: self.unreachable,
0x01: self.nop,
0x02: self.nop, # block
0x03: self.nop, # loop
0x04: self.nop, # if
0x05: self.nop, # else
0x0B: self.nop, # end
0x0C: self.nop, # br
0x0D: self.nop, # br_if
0x0E: self.nop, # br_table
0x0F: self.nop, # return
0x10: self.nop, # call
0x11: self.nop, # call_indirect
0x1A: self.drop,
0x1B: self.select,
0x20: self.get_local,
0x21: self.set_local,
0x22: self.tee_local,
0x23: self.get_global,
0x24: self.set_global,
0x28: self.i32_load,
0x29: self.i64_load,
0x2A: self.f32_load,
0x2B: self.f64_load,
0x2C: self.i32_load8_s,
0x2D: self.i32_load8_u,
0x2E: self.i32_load16_s,
0x2F: self.i32_load16_u,
0x30: self.i64_load8_s,
0x31: self.i64_load8_u,
0x32: self.i64_load16_s,
0x33: self.i64_load16_u,
0x34: self.i64_load32_s,
0x35: self.i64_load32_u,
0x36: self.i32_store,
0x37: self.i64_store,
0x38: self.f32_store,
0x39: self.f64_store,
0x3A: self.i32_store8,
0x3B: self.i32_store16,
0x3C: self.i64_store8,
0x3D: self.i64_store16,
0x3E: self.i64_store32,
0x3F: self.current_memory,
0x40: self.grow_memory,
0x41: self.i32_const,
0x42: self.i64_const,
0x43: self.f32_const,
0x44: self.f64_const,
0x45: self.i32_eqz,
0x46: self.i32_eq,
0x47: self.i32_ne,
0x48: self.i32_lt_s,
0x49: self.i32_lt_u,
0x4A: self.i32_gt_s,
0x4B: self.i32_gt_u,
0x4C: self.i32_le_s,
0x4D: self.i32_le_u,
0x4E: self.i32_ge_s,
0x4F: self.i32_ge_u,
0x50: self.i64_eqz,
0x51: self.i64_eq,
0x52: self.i64_ne,
0x53: self.i64_lt_s,
0x54: self.i64_lt_u,
0x55: self.i64_gt_s,
0x56: self.i64_gt_u,
0x57: self.i64_le_s,
0x58: self.i64_le_u,
0x59: self.i64_ge_s,
0x5A: self.i64_ge_u,
0x5B: self.f32_eq,
0x5C: self.f32_ne,
0x5D: self.f32_lt,
0x5E: self.f32_gt,
0x5F: self.f32_le,
0x60: self.f32_ge,
0x61: self.f64_eq,
0x62: self.f64_ne,
0x63: self.f64_lt,
0x64: self.f64_gt,
0x65: self.f64_le,
0x66: self.f64_ge,
0x67: self.i32_clz,
0x68: self.i32_ctz,
0x69: self.i32_popcnt,
0x6A: self.i32_add,
0x6B: self.i32_sub,
0x6C: self.i32_mul,
0x6D: self.i32_div_s,
0x6E: self.i32_div_u,
0x6F: self.i32_rem_s,
0x70: self.i32_rem_u,
0x71: self.i32_and,
0x72: self.i32_or,
0x73: self.i32_xor,
0x74: self.i32_shl,
0x75: self.i32_shr_s,
0x76: self.i32_shr_u,
0x77: self.i32_rotl,
0x78: self.i32_rotr,
0x79: self.i64_clz,
0x7A: self.i64_ctz,
0x7B: self.i64_popcnt,
0x7C: self.i64_add,
0x7D: self.i64_sub,
0x7E: self.i64_mul,
0x7F: self.i64_div_s,
0x80: self.i64_div_u,
0x81: self.i64_rem_s,
0x82: self.i64_rem_u,
0x83: self.i64_and,
0x84: self.i64_or,
0x85: self.i64_xor,
0x86: self.i64_shl,
0x87: self.i64_shr_s,
0x88: self.i64_shr_u,
0x89: self.i64_rotl,
0x8A: self.i64_rotr,
0x8B: self.f32_abs,
0x8C: self.f32_neg,
0x8D: self.f32_ceil,
0x8E: self.f32_floor,
0x8F: self.f32_trunc,
0x90: self.f32_nearest,
0x91: self.f32_sqrt,
0x92: self.f32_add,
0x93: self.f32_sub,
0x94: self.f32_mul,
0x95: self.f32_div,
0x96: self.f32_min,
0x97: self.f32_max,
0x98: self.f32_copysign,
0x99: self.f64_abs,
0x9A: self.f64_neg,
0x9B: self.f64_ceil,
0x9C: self.f64_floor,
0x9D: self.f64_trunc,
0x9E: self.f64_nearest,
0x9F: self.f64_sqrt,
0xA0: self.f64_add,
0xA1: self.f64_sub,
0xA2: self.f64_mul,
0xA3: self.f64_div,
0xA4: self.f64_min,
0xA5: self.f64_max,
0xA6: self.f64_copysign,
0xA7: self.i32_wrap_i64,
0xA8: self.i32_trunc_s_f32,
0xA9: self.i32_trunc_u_f32,
0xAA: self.i32_trunc_s_f64,
0xAB: self.i32_trunc_u_f64,
0xAC: self.i64_extend_s_i32,
0xAD: self.i64_extend_u_i32,
0xAE: self.i64_trunc_s_f32,
0xAF: self.i64_trunc_u_f32,
0xB0: self.i64_trunc_s_f64,
0xB1: self.i64_trunc_u_f64,
0xB2: self.f32_convert_s_i32,
0xB3: self.f32_convert_u_i32,
0xB4: self.f32_convert_s_i64,
0xB5: self.f32_convert_u_i64,
0xB6: self.f32_demote_f64,
0xB7: self.f64_convert_s_i32,
0xB8: self.f64_convert_u_i32,
0xB9: self.f64_convert_s_i64,
0xBA: self.f64_convert_u_i64,
0xBB: self.f64_promote_f32,
0xBC: self.i32_reinterpret_f32,
0xBD: self.i64_reinterpret_f64,
0xBE: self.f32_reinterpret_i32,
0xBF: self.f64_reinterpret_i64,
}
#: Constraint set to use for checking overflows and boundary conditions
self.constraints = constraints
self.zero_div = False
self.overflow = False
super().__init__()
def __getstate__(self):
state = super().__getstate__()
state["mapping"] = self._mapping
state["constraints"] = self.constraints
state["zero_div"] = self.zero_div
state["overflow"] = self.overflow
return state
def __setstate__(self, state):
self._mapping = state["mapping"]
self.constraints = state["constraints"]
self.zero_div = state["zero_div"]
self.overflow = state["overflow"]
super().__setstate__(state)
def check_overflow(self, expression) -> bool:
if issymbolic(expression):
self.overflow = Operators.OR(self.overflow, expression)
return False
return expression
def check_zero_div(self, expression) -> bool:
if issymbolic(expression):
self.zero_div = Operators.OR(self.zero_div, expression)
return False
return expression
def dispatch(self, inst, store, stack):
"""
Selects the correct semantics for the given instruction, and executes them
:param inst: the Instruction to execute
:param store: the current Store
:param stack: the current Stack
:return: the result of the semantic function, which is (probably) always None
"""
opcode = inst.opcode
assert opcode in self._mapping
func = self._mapping[opcode]
try:
if inst.imm:
return func(store, stack, inst.imm)
else:
return func(store, stack)
except (ZeroDivisionError, InvalidOperation):
raise ZeroDivisionTrap()
def unreachable(self, store, stack):
raise UnreachableInstructionTrap()
def nop(self, store, stack):
pass
def drop(self, store, stack):
stack.has_type_on_top(Value_t, 1)
stack.pop()
def select(self, store, stack):
c = stack.pop()
v2 = stack.pop()
v1 = stack.pop()
assert isinstance(c, (I32, BitVec)), f"{type(c)} is not I32"
if not issymbolic(v2) and not issymbolic(v1):
assert type(v2) == type(v1), f"{type(v2)} is not the same as {type(v1)}"
if issymbolic(c):
stack.push(Operators.ITEBV(getattr(v1, "size", 32), c != 0, v1, v2))
else:
if c != 0:
stack.push(v1)
else:
stack.push(v2)
def get_local(self, store, stack, imm: LocalVarXsImm):
f = stack.get_frame().frame
assert imm.local_index in range(len(f.locals))
self._publish("will_get_local", imm.local_index)
stack.push(f.locals[imm.local_index])
self._publish("did_get_local", imm.local_index, stack.peek())
def set_local(self, store, stack, imm: LocalVarXsImm):
f = stack.get_frame().frame
assert imm.local_index in range(len(f.locals))
stack.has_type_on_top(Value_t, 1)
self._publish("will_set_local", imm.local_index, stack.peek())
f.locals[imm.local_index] = stack.pop()
self._publish("did_set_local", imm.local_index, f.locals[imm.local_index])
def tee_local(self, store, stack, imm: LocalVarXsImm):
stack.has_type_on_top(Value_t, 1)
v = stack.pop()
stack.push(v)
stack.push(v)
self.set_local(store, stack, imm)
def get_global(self, store, stack, imm: GlobalVarXsImm):
f = stack.get_frame().frame
assert imm.global_index in range(len(f.module.globaladdrs))
a = f.module.globaladdrs[imm.global_index]
assert a in range(len(store.globals))
glob = store.globals[a]
self._publish("will_get_global", imm.global_index, glob.value)
stack.push(glob.value)
self._publish("did_get_global", imm.global_index, stack.peek())
def set_global(self, store, stack, imm: GlobalVarXsImm):
f = stack.get_frame().frame
assert imm.global_index in range(len(f.module.globaladdrs))
a = f.module.globaladdrs[imm.global_index]
assert a in range(len(store.globals))
stack.has_type_on_top(Value_t, 1)
self._publish("did_set_global", imm.global_index, stack.peek())
store.globals[a].value = stack.pop()
self._publish("did_set_global", imm.global_index, store.globals[a].value)
def i32_load(self, store, stack, imm: MemoryImm):
f = stack.get_frame().frame
assert f.module.memaddrs
a = f.module.memaddrs[0]
assert a in range(len(store.mems))
mem = store.mems[a]
stack.has_type_on_top(I32, 1)
i = stack.pop()
if issymbolic(i):
raise ConcretizeStack(
-1, I32, "Concretizing memory read", i
) # TODO - Implement a symbolic memory model
ea = i + imm.offset
if (ea + 4) not in range(len(mem.data) + 1):
raise OutOfBoundsMemoryTrap(ea + 4)
self._publish("will_read_memory", ea, ea + 4)
c = Operators.CONCAT(32, *map(Operators.ORD, reversed(mem.data[ea : ea + 4])))
stack.push(I32.cast(c))
self._publish("did_read_memory", ea, stack.peek())
def i64_load(self, store, stack, imm: MemoryImm):
f = stack.get_frame().frame
assert f.module.memaddrs
a = f.module.memaddrs[0]
assert a in range(len(store.mems))
mem = store.mems[a]
stack.has_type_on_top(I32, 1)
i = stack.pop()
if issymbolic(i):
raise ConcretizeStack(
-1, I32, "Concretizing memory read", i
) # TODO - Implement a symbolic memory model
ea = i + imm.offset
if (ea + 8) not in range(len(mem.data) + 1):
raise OutOfBoundsMemoryTrap(ea + 8)
self._publish("will_read_memory", ea, ea + 8)
c = Operators.CONCAT(64, *map(Operators.ORD, reversed(mem.data[ea : ea + 8])))
stack.push(I64.cast(c))
self._publish("did_read_memory", ea, stack.peek())
def int_load(self, store, stack, imm: MemoryImm, ty: type, size: int, signed: bool):
assert ty in {I32, I64}, f"{type(ty)} is not an I32 or I64"
f = stack.get_frame().frame
assert f.module.memaddrs
a = f.module.memaddrs[0]
assert a in range(len(store.mems))
mem = store.mems[a]
stack.has_type_on_top(I32, 1)
i = stack.pop()
if issymbolic(i):
raise ConcretizeStack(
-1, I32, "Concretizing memory read", i
) # TODO - Implement a symbolic memory model
ea = i + imm.offset
if ea not in range(len(mem.data)):
raise OutOfBoundsMemoryTrap(ea)
if ea + (size // 8) not in range(len(mem.data) + 1):
raise OutOfBoundsMemoryTrap(ea + (size // 8))
self._publish("will_read_memory", ea, ea + size // 8)
c = Operators.CONCAT(size, *map(Operators.ORD, reversed(mem.data[ea : ea + (size // 8)])))
width = 32 if ty is I32 else 64
if signed:
c = Operators.SEXTEND(c, size, width)
else:
c = Operators.ZEXTEND(c, width)
# Mypy can't figure out that that ty will definitely have a cast method, so we ignore the type
stack.push(ty.cast(c)) # type: ignore
self._publish("did_read_memory", ea, stack.peek())
def i32_load8_s(self, store, stack, imm: MemoryImm):
self.int_load(store, stack, imm, I32, 8, True)
def i32_load8_u(self, store, stack, imm: MemoryImm):
self.int_load(store, stack, imm, I32, 8, False)
def i32_load16_s(self, store, stack, imm: MemoryImm):
self.int_load(store, stack, imm, I32, 16, True)
def i32_load16_u(self, store, stack, imm: MemoryImm):
self.int_load(store, stack, imm, I32, 16, False)
def i64_load8_s(self, store, stack, imm: MemoryImm):
self.int_load(store, stack, imm, I64, 8, True)
def i64_load8_u(self, store, stack, imm: MemoryImm):
self.int_load(store, stack, imm, I64, 8, False)
def i64_load16_s(self, store, stack, imm: MemoryImm):
self.int_load(store, stack, imm, I64, 16, True)
def i64_load16_u(self, store, stack, imm: MemoryImm):
self.int_load(store, stack, imm, I64, 16, False)
def i64_load32_s(self, store, stack, imm: MemoryImm):
self.int_load(store, stack, imm, I64, 32, True)
def i64_load32_u(self, store, stack, imm: MemoryImm):
self.int_load(store, stack, imm, I64, 32, False)
def int_store(self, store, stack, imm: MemoryImm, ty: type, n=None):
assert ty in {I32, I64}, f"{type(ty)} is not an I32 or I64"
f = stack.get_frame().frame
assert f.module.memaddrs
a = f.module.memaddrs[0]
assert a in range(len(store.mems))
mem = store.mems[a]
stack.has_type_on_top(ty, 1)
c = stack.pop()
stack.has_type_on_top(I32, 1)
i = stack.pop()
if issymbolic(i):
raise ConcretizeStack(
-2, I32, "Concretizing integer memory write", i
) # TODO - Implement a symbolic memory model
ea = i + imm.offset
N = n if n else (32 if ty is I32 else 64)
if ea not in range(len(mem.data)):
raise OutOfBoundsMemoryTrap(ea)
if (ea + (N // 8)) not in range(len(mem.data) + 1):
raise OutOfBoundsMemoryTrap(ea + (N // 8))
if n:
b = [
Operators.CHR(Operators.EXTRACT(c % 2 ** N, offset, 8)) for offset in range(0, N, 8)
]
else:
b = [Operators.CHR(Operators.EXTRACT(c, offset, 8)) for offset in range(0, N, 8)]
self._publish("will_write_memory", ea, ea + len(b), b)
for idx, v in enumerate(b):
mem.data[ea + idx] = v
self._publish("did_write_memory", ea, b)
def i32_store(self, store, stack, imm: MemoryImm):
self.int_store(store, stack, imm, I32)
def i64_store(self, store, stack, imm: MemoryImm):
self.int_store(store, stack, imm, I64)
def i32_store8(self, store, stack, imm: MemoryImm):
self.int_store(store, stack, imm, I32, 8)
def i32_store16(self, store, stack, imm: MemoryImm):
self.int_store(store, stack, imm, I32, 16)
def i64_store8(self, store, stack, imm: MemoryImm):
self.int_store(store, stack, imm, I64, 8)
def i64_store16(self, store, stack, imm: MemoryImm):
self.int_store(store, stack, imm, I64, 16)
def i64_store32(self, store, stack, imm: MemoryImm):
self.int_store(store, stack, imm, I64, 32)
def current_memory(self, store, stack, imm: CurGrowMemImm):
f = stack.get_frame().frame
assert f.module.memaddrs
a = f.module.memaddrs[0]
assert a in range(len(store.mems))
mem = store.mems[a]
stack.push(I32(len(mem.data) // 65536))
def grow_memory(self, store, stack, imm: CurGrowMemImm):
f = stack.get_frame().frame
assert f.module.memaddrs
a = f.module.memaddrs[0]
assert a in range(len(store.mems))
mem = store.mems[a]
sz = len(mem.data) // 65536
stack.has_type_on_top(I32, 1)
if issymbolic(stack.peek()):
raise ConcretizeStack(-1, I32, "Concretizing memory grow operand", stack.peek())
if mem.grow(stack.pop()):
stack.push(I32(sz))
else:
stack.push(I32(-1))
def i32_const(self, store, stack, imm: I32ConstImm):
stack.push(I32.cast(imm.value))
def i64_const(self, store, stack, imm: I64ConstImm):
stack.push(I64.cast(imm.value))
def i32_eqz(self, store, stack):
stack.has_type_on_top(I32, 1)
c1 = stack.pop()
v = c1 == 0
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i32_eq(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c2 == c1
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i32_ne(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c2 != c1
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i32_lt_s(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c1 < c2
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i32_lt_u(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
v = Operators.ULT(c1, c2)
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i32_gt_s(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c1 > c2
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i32_gt_u(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
v = Operators.UGT(c1, c2)
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i32_le_s(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c1 <= c2
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i32_le_u(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
v = Operators.ULE(c1, c2)
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i32_ge_s(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c1 >= c2
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i32_ge_u(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
v = Operators.UGE(c1, c2)
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i64_eqz(self, store, stack):
stack.has_type_on_top(I64, 1)
c1 = stack.pop()
v = c1 == 0
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i64_eq(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c2 == c1
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i64_ne(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c2 != c1
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i64_lt_s(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c1 < c2
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i64_lt_u(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
v = Operators.ULT(c1, c2)
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i64_gt_s(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c1 > c2
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i64_gt_u(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
v = Operators.UGT(c1, c2)
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i64_le_s(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c1 <= c2
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i64_le_u(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
v = Operators.ULE(c1, c2)
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i64_ge_s(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
v = c1 >= c2
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i64_ge_u(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
v = Operators.UGE(c1, c2)
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(I32.cast(I32(1) if v else I32(0)))
def i32_clz(self, store, stack):
stack.has_type_on_top(I32, 1)
c1 = stack.pop()
flag = Operators.EXTRACT(c1, 31, 1) == 1
res = 0
for pos in range(1, 32):
res = Operators.ITEBV(32, flag, res, pos)
flag = Operators.OR(flag, Operators.EXTRACT(c1, 31 - pos, 1) == 1)
res = Operators.ITEBV(32, flag, res, 32)
stack.push(I32.cast(res))
def i32_ctz(self, store, stack): # Copied from x86 TZCNT
stack.has_type_on_top(I32, 1)
c1 = stack.pop()
flag = Operators.EXTRACT(c1, 0, 1) == 1
res = 0
for pos in range(1, 32):
res = Operators.ITEBV(32, flag, res, pos)
flag = Operators.OR(flag, Operators.EXTRACT(c1, pos, 1) == 1)
res = Operators.ITEBV(32, flag, res, 32)
stack.push(I32.cast(res))
def i32_popcnt(self, store, stack):
stack.has_type_on_top(I32, 1)
c1 = stack.pop()
flag = Operators.EXTRACT(c1, 0, 1) != 0
res = 0
for pos in range(1, 32):
res = Operators.ITEBV(32, flag, res + 1, res)
flag = Operators.EXTRACT(c1, pos, 1) != 0
res = Operators.ITEBV(32, flag, res + 1, res)
stack.push(I32.cast(res))
def i32_add(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I32.cast((c2 + c1) % 2 ** 32))
def i32_sub(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I32.cast((c1 - c2 + 2 ** 32) % 2 ** 32))
def i32_mul(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I32.cast((c2 * c1) % 2 ** 32))
def i32_div_s(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
can_div_0 = c2 == 0
if self.check_zero_div(can_div_0):
raise ZeroDivisionTrap()
res = Operators.SDIV(c1, c2)
can_overflow = res == 2 ** 31
if self.check_overflow(can_overflow):
raise OverflowDivisionTrap()
stack.push(I32.cast(res))
def i32_div_u(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
can_div_0 = c2 == 0
if self.check_zero_div(can_div_0):
raise ZeroDivisionTrap()
if not issymbolic(c2):
c2 = I32.to_unsigned(c2)
if not issymbolic(c1):
c1 = I32.to_unsigned(c1)
stack.push(I32.cast(Operators.UDIV(c1, c2)))
def i32_rem_s(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
if self.check_zero_div(c2 == 0):
raise ZeroDivisionTrap()
stack.push(I32.cast(Operators.SREM(c1, c2)))
def i32_rem_u(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
if not issymbolic(c2):
c2 = I32.to_unsigned(c2)
if not issymbolic(c1):
c1 = I32.to_unsigned(c1)
if self.check_zero_div(c2 == 0):
raise ZeroDivisionTrap()
stack.push(I32.cast(Operators.UREM(c1, c2)))
def i32_and(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I32.cast(c2 & c1))
def i32_or(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I32.cast(c2 | c1))
def i32_xor(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I32.cast(c2 ^ c1))
def i32_shl(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I32.cast((c1 << (c2 % 32)) % 2 ** 32))
def i32_shr_s(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
k = c2 % 32
stack.push(I32.cast(Operators.SAR(32, c1, k)))
def i32_shr_u(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
if not issymbolic(c2):
c2 = I32.to_unsigned(c2)
if not issymbolic(c1):
c1 = I32.to_unsigned(c1)
stack.push(I32.cast(c1 >> (c2 % 32)))
def i32_rotl(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
if not issymbolic(c1):
c1 = I32.to_unsigned(c1)
k = c2 % 32
stack.push(I32.cast((c1 << k) | c1 >> (32 - k)))
def i32_rotr(self, store, stack):
stack.has_type_on_top(I32, 2)
c2 = stack.pop()
c1 = stack.pop()
if not issymbolic(c1):
c1 = I32.to_unsigned(c1)
k = c2 % 32
stack.push(I32.cast((c1 >> k) | c1 << (32 - k)))
def i64_clz(self, store, stack):
stack.has_type_on_top(I64, 1)
c1 = stack.pop()
flag = Operators.EXTRACT(c1, 63, 1) == 1
res = 0
for pos in range(1, 64):
res = Operators.ITEBV(64, flag, res, pos)
flag = Operators.OR(flag, Operators.EXTRACT(c1, 63 - pos, 1) == 1)
res = Operators.ITEBV(64, flag, res, 64)
stack.push(I64.cast(res))
def i64_ctz(self, store, stack):
stack.has_type_on_top(I64, 1)
c1 = stack.pop()
flag = Operators.EXTRACT(c1, 0, 1) == 1
res = 0
for pos in range(1, 64):
res = Operators.ITEBV(64, flag, res, pos)
flag = Operators.OR(flag, Operators.EXTRACT(c1, pos, 1) == 1)
res = Operators.ITEBV(64, flag, res, 64)
stack.push(I64.cast(res))
def i64_popcnt(self, store, stack):
stack.has_type_on_top(I64, 1)
c1 = stack.pop()
flag = Operators.EXTRACT(c1, 0, 1) != 0
res = 0
for pos in range(1, 64):
res = Operators.ITEBV(64, flag, res + 1, res)
flag = Operators.EXTRACT(c1, pos, 1) != 0
res = Operators.ITEBV(64, flag, res + 1, res)
stack.push(I64.cast(res))
def i64_add(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I64.cast((c2 + c1) % 2 ** 64))
def i64_sub(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I64.cast((c1 - c2 + 2 ** 64) % 2 ** 64))
def i64_mul(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I64.cast((c2 * c1) % 2 ** 64))
def i64_div_s(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
can_div_0 = c2 == 0
if self.check_zero_div(can_div_0):
raise ZeroDivisionTrap()
if issymbolic(c1) or issymbolic(c2):
res = Operators.SDIV(c1, c2)
else:
res = int(math.trunc(Decimal(c1) / Decimal(c2)))
can_overflow = res == 2 ** 63
if self.check_overflow(can_overflow):
raise OverflowDivisionTrap()
stack.push(I64.cast(res))
def i64_div_u(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
can_div_0 = c2 == 0
if self.check_zero_div(can_div_0):
raise ZeroDivisionTrap()
if not issymbolic(c2):
c2 = I64.to_unsigned(c2)
if not issymbolic(c1):
c1 = I64.to_unsigned(c1)
stack.push(I64.cast(Operators.UDIV(c1, c2)))
def i64_rem_s(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
if self.check_zero_div(c2 == 0):
raise ZeroDivisionTrap()
if issymbolic(c1) or issymbolic(c2):
res = Operators.SREM(c1, c2)
else:
res = c1 - int(Decimal(c1) / Decimal(c2)) * c2
stack.push(I64.cast(res))
def i64_rem_u(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
if not issymbolic(c2):
c2 = I64.to_unsigned(c2)
if not issymbolic(c1):
c1 = I64.to_unsigned(c1)
if self.check_zero_div(c2 == 0):
raise ZeroDivisionTrap()
stack.push(I64.cast(Operators.UREM(c1, c2)))
def i64_and(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I64.cast(c2 & c1))
def i64_or(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I64.cast(c2 | c1))
def i64_xor(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I64.cast(c2 ^ c1))
def i64_shl(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
stack.push(I64.cast((c1 << (c2 % 64)) % 2 ** 64))
def i64_shr_s(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
k = c2 % 64
stack.push(I64.cast(Operators.SAR(64, c1, k)))
def i64_shr_u(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
if not issymbolic(c2):
c2 = I64.to_unsigned(c2)
if not issymbolic(c1):
c1 = I64.to_unsigned(c1)
stack.push(I64.cast(c1 >> (c2 % 64)))
def i64_rotl(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
if not issymbolic(c1):
c1 = I64.to_unsigned(c1)
k = c2 % 64
stack.push(I64.cast((c1 << k) | c1 >> (64 - k)))
def i64_rotr(self, store, stack):
stack.has_type_on_top(I64, 2)
c2 = stack.pop()
c1 = stack.pop()
if not issymbolic(c1):
c1 = I64.to_unsigned(c1)
k = c2 % 64
stack.push(I64.cast((c1 >> k) | c1 << (64 - k)))
def i32_wrap_i64(self, store, stack):
stack.has_type_on_top(I64, 1)
c1: I64 = stack.pop()
c1 %= 2 ** 32
c1 = Operators.EXTRACT(c1, 0, 32)
stack.push(I32.cast(c1))
def i32_trunc_s_f32(self, store, stack):
stack.has_type_on_top(F32, 1)
c1: F32 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, F32, "Concretizing for float->int conversion", c1)
if math.isnan(c1):
raise InvalidConversionTrap(I32, "NaN")
if math.isinf(c1):
raise InvalidConversionTrap(I32, "infinity")
if c1 >= 2 ** 31 or c1 <= -(2 ** 31) - 1:
raise InvalidConversionTrap(I32, c1)
stack.push(I32.cast(c1))
def i32_trunc_u_f32(self, store, stack):
stack.has_type_on_top(F32, 1)
c1: F32 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, F32, "Concretizing for float->int conversion", c1)
if math.isnan(c1):
raise InvalidConversionTrap(I32, "NaN")
if math.isinf(c1):
raise InvalidConversionTrap(I32, "infinity")
if c1 >= 2 ** 32 or c1 <= -1:
raise InvalidConversionTrap(I32, c1)
stack.push(I32.cast(c1))
def i32_trunc_s_f64(self, store, stack):
stack.has_type_on_top(F64, 1)
c1: F64 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, F64, "Concretizing for float->int conversion", c1)
if math.isnan(c1):
raise InvalidConversionTrap(I32, "NaN")
if math.isinf(c1):
raise InvalidConversionTrap(I32, "infinity")
if c1 >= 2 ** 31 or c1 <= -(2 ** 31) - 1:
raise InvalidConversionTrap(I32, c1)
stack.push(I32.cast(c1))
def i32_trunc_u_f64(self, store, stack):
stack.has_type_on_top(F64, 1)
c1: F64 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, F64, "Concretizing for float->int conversion", c1)
if math.isnan(c1):
raise InvalidConversionTrap(I32, "NaN")
if math.isinf(c1):
raise InvalidConversionTrap(I32, "infinity")
if c1 >= 2 ** 32 or c1 <= -1:
raise InvalidConversionTrap(I32, c1)
stack.push(I32.cast(c1))
def i64_extend_s_i32(self, store, stack):
stack.has_type_on_top(I32, 1)
c1: I32 = stack.pop()
stack.push(I64.cast(Operators.SEXTEND(c1, 32, 64)))
def i64_extend_u_i32(self, store, stack):
stack.has_type_on_top(I32, 1)
c1: I32 = stack.pop()
if issymbolic(
c1
): # ZEXTEND doesn't have a concept of sized ints, so it will promote a negative I32
# to a negative I64 with the same value.
stack.push(I64.cast(Operators.ZEXTEND(c1, 64)))
else:
stack.push(I64.cast(struct.unpack("q", bytes(c_int32(c1)) + b"\x00" * 4)[0]))
def i64_trunc_s_f32(self, store, stack):
stack.has_type_on_top(F32, 1)
c1: F32 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, F32, "Concretizing float", c1)
if math.isnan(c1):
raise InvalidConversionTrap(I64, "NaN")
if math.isinf(c1):
raise InvalidConversionTrap(I64, "infinity")
if c1 >= 2 ** 63 or c1 <= -(2 ** 63) - 1:
raise InvalidConversionTrap(I64, c1)
stack.push(I64.cast(c1))
def i64_trunc_u_f32(self, store, stack):
stack.has_type_on_top(F32, 1)
c1: F32 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, F32, "Concretizing float", c1)
if math.isnan(c1):
raise InvalidConversionTrap(I64, "NaN")
if math.isinf(c1):
raise InvalidConversionTrap(I64, "infinity")
if c1 >= 2 ** 64 or c1 <= -1:
raise InvalidConversionTrap(I64, c1)
stack.push(I64.cast(c1))
def i64_trunc_s_f64(self, store, stack):
stack.has_type_on_top(F64, 1)
c1: F64 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, F64, "Concretizing float", c1)
if math.isnan(c1):
raise InvalidConversionTrap(I64, "NaN")
if math.isinf(c1):
raise InvalidConversionTrap(I64, "infinity")
if c1 >= 2 ** 63 or c1 <= -(2 ** 63) - 1:
raise InvalidConversionTrap(I64, c1)
stack.push(I64.cast(c1))
def i64_trunc_u_f64(self, store, stack):
stack.has_type_on_top(F64, 1)
c1: F64 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, F64, "Concretizing float", c1)
if math.isnan(c1):
raise InvalidConversionTrap(I64, "NaN")
if math.isinf(c1):
raise InvalidConversionTrap(I64, "infinity")
if c1 >= 2 ** 64 or c1 <= -1:
raise InvalidConversionTrap(I64, c1)
stack.push(I64.cast(c1))
def i32_reinterpret_f32(self, store, stack):
stack.has_type_on_top(F32, 1)
c1: F32 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, F32, "Concretizing float", c1)
c1 = struct.unpack("i", struct.pack("f", c1))[0]
stack.push(I32.cast(c1))
def i64_reinterpret_f64(self, store, stack):
stack.has_type_on_top(F64, 1)
c1: F64 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, F64, "Concretizing float", c1)
c1 = struct.unpack("q", struct.pack("d", c1))[0]
stack.push(I64.cast(c1))
###########################################################################################################
# Floating point instructions# Floating point instructions
def float_load(self, store, stack, imm: MemoryImm, ty: type):
assert ty in {F32, F64}, f"{type(ty)} is not an F32 or F64"
size = 32 if ty == F32 else 64
f = stack.get_frame().frame
a = f.module.memaddrs[0]
mem = store.mems[a]
stack.has_type_on_top(I32, 1)
i = stack.pop()
if issymbolic(i):
raise ConcretizeStack(
-1, I32, "Concretizing float memory read", i
) # TODO - Implement a symbolic memory model
ea = i + imm.offset
if ea not in range(len(mem.data)):
raise OutOfBoundsMemoryTrap(ea)
if (ea + (size // 8)) not in range(len(mem.data) + 1):
raise OutOfBoundsMemoryTrap(ea + (size // 8))
self._publish("will_read_memory", ea, ea + (size // 8))
c = Operators.CONCAT(size, *map(Operators.ORD, reversed(mem.data[ea : ea + (size // 8)])))
# Mypy can't figure out that that ty will definitely have a cast method, so we ignore the type
ret = ty.cast(c) # type: ignore
stack.push(ret)
self._publish("did_read_memory", ea, stack.peek())
def f32_load(self, store, stack, imm: MemoryImm):
return self.float_load(store, stack, imm, F32)
def f64_load(self, store, stack, imm: MemoryImm):
return self.float_load(store, stack, imm, F64)
def float_store(self, store, stack, imm: MemoryImm, ty: type, n=None):
f = stack.get_frame().frame
a = f.module.memaddrs[0]
mem = store.mems[a]
c = stack.pop()
i = stack.pop()
if issymbolic(i):
raise ConcretizeStack(-2, I32, "Concretizing memory address for float_store", i)
ea = i + imm.offset
if ty == F32:
size = 32
else:
size = 64
if ea not in range(len(mem.data)):
raise OutOfBoundsMemoryTrap(ea)
if (ea + (size // 8)) not in range(len(mem.data) + 1):
raise OutOfBoundsMemoryTrap(ea + (size // 8))
if not issymbolic(c):
c = struct.unpack(
"i" if size == 32 else "q", struct.pack("f" if size == 32 else "d", c)
)[0]
b = [Operators.CHR(Operators.EXTRACT(c, offset, 8)) for offset in range(0, size, 8)]
self._publish("did_write_memory", ea, ea + len(b), b)
for idx, v in enumerate(b):
mem.data[ea + idx] = v
self._publish("did_write_memory", ea, b)
def float_push_compare_return(self, stack, v, rettype=I32):
if issymbolic(v):
stack.push(Operators.ITEBV(32, v, I32(1), I32(0)))
else:
stack.push(rettype(v))
def f32_store(self, store, stack, imm: MemoryImm):
self.float_store(store, stack, imm, F32)
def f64_store(self, store, stack, imm: MemoryImm):
self.float_store(store, stack, imm, F64)
def f32_const(self, store, stack, imm: F32ConstImm):
stack.push(F32.cast(imm.value))
def f64_const(self, store, stack, imm: F64ConstImm):
stack.push(F64.cast(imm.value))
def f32_unary(self, store, stack, op, rettype: type = I32):
stack.has_type_on_top(F32, 1)
if issymbolic(stack.peek()):
raise ConcretizeStack(-1, F32, "Concretizing before float op", stack.peek())
v1 = stack.pop()
v = op(v1)
self.float_push_compare_return(stack, v, rettype)
def f32_binary(self, store, stack, op, rettype: type = I32):
stack.has_type_on_top(F32, 2)
if issymbolic(stack.peek()):
raise ConcretizeStack(-1, F32, "Concretizing before float op", stack.peek())
v2 = stack.pop()
if issymbolic(stack.peek()):
raise ConcretizeStack(-2, F32, "Concretizing before float op", stack.peek())
v1 = stack.pop()
v = op(v1, v2)
self.float_push_compare_return(stack, v, rettype)
def f64_unary(self, store, stack, op, rettype: type = F64):
stack.has_type_on_top(F64, 1)
if issymbolic(stack.peek()):
raise ConcretizeStack(-1, F64, "Concretizing before float op", stack.peek())
v1 = stack.pop()
v = op(v1)
self.float_push_compare_return(stack, v, rettype)
def f64_binary(self, store, stack, op, rettype: type = I32):
stack.has_type_on_top(F64, 2)
if issymbolic(stack.peek()):
raise ConcretizeStack(-1, F64, "Concretizing before float op", stack.peek())
v2 = stack.pop()
if issymbolic(stack.peek()):
raise ConcretizeStack(-2, F64, "Concretizing before float op", stack.peek())
v1 = stack.pop()
v = op(v1, v2)
self.float_push_compare_return(stack, v, rettype)
def f32_eq(self, store, stack):
return self.f32_binary(store, stack, operator.eq)
def f32_ne(self, store, stack):
return self.f32_binary(store, stack, operator.ne)
def f32_lt(self, store, stack):
return self.f32_binary(store, stack, operator.lt)
def f32_gt(self, store, stack):
return self.f32_binary(store, stack, operator.gt)
def f32_le(self, store, stack):
return self.f32_binary(store, stack, operator.le)
def f32_ge(self, store, stack):
return self.f32_binary(store, stack, operator.ge)
def f64_eq(self, store, stack):
return self.f64_binary(store, stack, operator.eq)
def f64_ne(self, store, stack):
return self.f64_binary(store, stack, operator.ne)
def f64_lt(self, store, stack):
return self.f64_binary(store, stack, operator.lt)
def f64_gt(self, store, stack):
return self.f64_binary(store, stack, operator.gt)
def f64_le(self, store, stack):
return self.f64_binary(store, stack, operator.le)
def f64_ge(self, store, stack):
return self.f64_binary(store, stack, operator.ge)
def f32_abs(self, store, stack):
return self.f32_unary(store, stack, operator.abs, F32)
def f32_neg(self, store, stack):
return self.f32_unary(store, stack, operator.neg, F32)
def f32_ceil(self, store, stack):
return self.f32_unary(store, stack, operator_ceil, F32)
def f32_floor(self, store, stack):
return self.f32_unary(store, stack, operator_floor, F32)
def f32_trunc(self, store, stack):
return self.f32_unary(store, stack, operator_trunc, F32)
def f32_nearest(self, store, stack):
return self.f32_unary(store, stack, operator_nearest, F32)
def f32_sqrt(self, store, stack):
return self.f32_unary(store, stack, math.sqrt, F32)
def f32_add(self, store, stack):
return self.f32_binary(store, stack, operator.add, F32)
def f32_sub(self, store, stack):
return self.f32_binary(store, stack, operator.sub, F32)
def f32_mul(self, store, stack):
return self.f32_binary(store, stack, operator.mul, F32)
def f32_div(self, store, stack):
return self.f32_binary(store, stack, operator_div, F32)
def f32_min(self, store, stack):
return self.f32_binary(store, stack, operator_min, F32)
def f32_max(self, store, stack):
return self.f32_binary(store, stack, operator_max, F32)
def f32_copysign(self, store, stack):
return self.f32_binary(store, stack, math.copysign, F32)
def f64_abs(self, store, stack):
return self.f64_unary(store, stack, operator.abs, F64)
def f64_neg(self, store, stack):
return self.f64_unary(store, stack, operator.neg, F64)
def f64_ceil(self, store, stack):
return self.f64_unary(store, stack, operator_ceil, F64)
def f64_floor(self, store, stack):
return self.f64_unary(store, stack, operator_floor, F64)
def f64_trunc(self, store, stack):
return self.f64_unary(store, stack, operator_trunc, F64)
def f64_nearest(self, store, stack):
return self.f32_unary(store, stack, operator_nearest, F64)
def f64_sqrt(self, store, stack):
return self.f64_unary(store, stack, math.sqrt, F64)
def f64_add(self, store, stack):
return self.f64_binary(store, stack, operator.add, F64)
def f64_sub(self, store, stack):
return self.f64_binary(store, stack, operator.sub, F64)
def f64_mul(self, store, stack):
return self.f64_binary(store, stack, operator.mul, F64)
def f64_div(self, store, stack):
return self.f64_binary(store, stack, operator_div, F64)
def f64_min(self, store, stack):
return self.f64_binary(store, stack, operator_min, F64)
def f64_max(self, store, stack):
return self.f64_binary(store, stack, operator_max, F64)
def f64_copysign(self, store, stack):
return self.f64_binary(store, stack, math.copysign, F64)
def f32_convert_s_i32(self, store, stack):
stack.has_type_on_top(I32, 1)
c1: I32 = stack.pop()
stack.push(F32.cast(float(c1)))
def f32_convert_u_i32(self, store, stack):
stack.has_type_on_top(I32, 1)
c1: I32 = stack.pop()
stack.push(F32.cast(float(I32.to_unsigned(c1))))
def f32_convert_s_i64(self, store, stack):
stack.has_type_on_top(I64, 1)
c1: I64 = stack.pop()
stack.push(F32.cast(float(c1)))
def f32_convert_u_i64(self, store, stack):
stack.has_type_on_top(I64, 1)
c1: I64 = stack.pop()
stack.push(F32.cast(float(I64.to_unsigned(c1))))
def f32_demote_f64(self, store, stack):
stack.has_type_on_top(F64, 1)
c1: F64 = stack.pop()
if math.isnan(c1) or math.isinf(c1) or c1 == 0.0 or c1 == -0.0:
stack.push(F32.cast(c1))
return
raise NotImplementedError("f32_demote_f64")
c1 = struct.unpack("f", struct.pack("d", c1)[:4])[0]
stack.push(F32.cast(c1))
def f64_convert_s_i32(self, store, stack):
stack.has_type_on_top(I32, 1)
c1: I32 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, I32, "Concretizing int for float conversion", c1)
stack.push(F64.cast(float(c1)))
def f64_convert_u_i32(self, store, stack):
stack.has_type_on_top(I32, 1)
c1: I32 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, I32, "Concretizing int for float conversion", c1)
stack.push(F64.cast(float(I32.to_unsigned(c1))))
def f64_convert_s_i64(self, store, stack):
stack.has_type_on_top(I64, 1)
c1: I64 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, I64, "Concretizing int for float conversion", c1)
stack.push(F64.cast(float(c1)))
def f64_convert_u_i64(self, store, stack):
stack.has_type_on_top(I64, 1)
c1: I64 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, I64, "Concretizing int for float conversion", c1)
stack.push(F64.cast(float(I64.to_unsigned(c1))))
def f64_promote_f32(self, store, stack):
stack.has_type_on_top(F32, 1)
c1: F32 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, F32, "Concretizing F32 for F64 promotion", c1)
stack.push(F64.cast(c1))
def f32_reinterpret_i32(self, store, stack):
stack.has_type_on_top(I32, 1)
c1: I32 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, I32, "Concretizing int for float conversion", c1)
c1 = struct.unpack("f", struct.pack("i", c1))[0]
stack.push(F32.cast(c1))
def f64_reinterpret_i64(self, store, stack):
stack.has_type_on_top(I64, 1)
c1: I64 = stack.pop()
if issymbolic(c1):
raise ConcretizeStack(-1, I64, "Concretizing int for float conversion", c1)
c1 = struct.unpack("d", struct.pack("q", c1))[0]
stack.push(F64.cast(c1))
################################################################################################
# Unary and binary operators for floats that don't fit python
def operator_nearest(a):
if math.isnan(a) or math.isinf(a):
return a.integer
else:
return round(a)
def operator_trunc(a):
if math.isnan(a) or math.isinf(a):
return a.integer
else:
return math.trunc(a)
def operator_ceil(a):
if math.isnan(a) or math.isinf(a):
return a.integer
else:
return math.ceil(a)
def operator_floor(a):
if math.isnan(a) or math.isinf(a):
return a.integer
else:
return math.floor(a)
def operator_div(a, b):
if b == 0:
return math.inf
else:
return operator.truediv(a, b)
def operator_min(a, b):
return a if a < b else b
def operator_max(a, b):
return a if a > b else b
|
|
"""The script used to train the model."""
import os
import sys
import getopt
import numpy as np
import models as m
from tqdm import tqdm
from keras.optimizers import Adam
from util.data import TwoImageIterator
from util.util import MyDict, log, save_weights, load_weights, load_losses, create_expt_dir
def print_help():
"""Print how to use this script."""
print "Usage:"
print "train.py [--help] [--nfd] [--nfatob] [--alpha] [--epochs] [batch_size] [--samples_per_batch] " \
"[--save_every] [--lr] [--beta_1] [--continue_train] [--log_dir]" \
"[--expt_name] [--base_dir] [--train_dir] [--val_dir] [--train_samples] " \
"[--val_samples] [--load_to_memory] [--a_ch] [--b_ch] [--is_a_binary] " \
"[--is_b_binary] [--is_a_grayscale] [--is_b_grayscale] [--target_size] " \
"[--rotation_range] [--height_shift_range] [--width_shift_range] " \
"[--horizontal_flip] [--vertical_flip] [--zoom_range]"
print "--nfd: Number of filters of the first layer of the discriminator."
print "--nfatob: Number of filters of the first layer of the AtoB model."
print "--alpha: The weight of the reconstruction loss of the AtoB model."
print "--epochs: Number of epochs to train the model."
print "--batch_size: the size of the batch to train."
print "--samples_per_batch: The number of samples to train each model on each iteration."
print "--save_every: Save results every 'save_every' epochs on the log folder."
print "--lr: The learning rate to train the models."
print "--beta_1: The beta_1 value of the Adam optimizer."
print "--continue_train: If it should continue the training from the last checkpoint."
print "--log_dir: The directory to place the logs."
print "--expt_name: The name of the experiment. Saves the logs into a folder with this name."
print "--base_dir: Directory that contains the data."
print "--train_dir: Directory inside base_dir that contains training data. " \
"Must contain an A and B folder."
print "--val_dir: Directory inside base_dir that contains validation data. " \
"Must contain an A and B folder."
print "--train_samples: The number of training samples. Set -1 to be the same as training examples."
print "--val_samples: The number of validation samples. Set -1 to be the same as validation examples."
print "--load_to_memory: Whether to load images into memory or read from the filesystem."
print "--a_ch: Number of channels of images A."
print "--b_ch: Number of channels of images B."
print "--is_a_binary: If A is binary, its values will be 0 or 1. A threshold of 0.5 is used."
print "--is_b_binary: If B is binary, the last layer of the atob model is " \
"followed by a sigmoid. Otherwise, a tanh is used. When the sigmoid is " \
"used, the binary crossentropy loss is used. For the tanh, the L1 is used. Also, " \
"its values will be 0 or 1. A threshold of 0.5 is used."
print "--is_a_grayscale: If A images should only have one channel. If they are color images, " \
"they are converted to grayscale."
print "--is_b_grayscale: If B images should only have one channel. If they are color images, " \
"they are converted to grayscale."
print "--target_size: The size of the images loaded by the iterator. THIS DOES NOT CHANGE THE MODELS. " \
"If you want to accept images of different sizes you will need to update the models.py files."
print "--rotation_range: The range to rotate training images for dataset augmentation."
print "--height_shift_range: Percentage of height of the image to translate for dataset augmentation."
print "--width_shift_range: Percentage of width of the image to translate for dataset augmentation."
print "--horizontal_flip: If true performs random horizontal flips on the train set."
print "--vertical_flip: If true performs random vertical flips on the train set."
print "--zoom_range: Defines the range to scale the image for dataset augmentation."
def discriminator_generator(it, atob, dout_size):
"""
Generate batches for the discriminator.
Parameters:
- it: an iterator that returns a pair of images;
- atob: the generator network that maps an image to another representation;
- dout_size: the size of the output of the discriminator.
"""
while True:
# Fake pair
a_fake, _ = next(it)
b_fake = atob.predict(a_fake)
# Real pair
a_real, b_real = next(it)
# Concatenate the channels. Images become (ch_a + ch_b) x 256 x 256
fake = np.concatenate((a_fake, b_fake), axis=1)
real = np.concatenate((a_real, b_real), axis=1)
# Concatenate fake and real pairs into a single batch
batch_x = np.concatenate((fake, real), axis=0)
# 1 is fake, 0 is real
batch_y = np.ones((batch_x.shape[0], 1) + dout_size)
batch_y[fake.shape[0]:] = 0
yield batch_x, batch_y
def train_discriminator(d, it, samples_per_batch=20):
"""Train the discriminator network."""
return d.fit_generator(it, samples_per_epoch=samples_per_batch*2, nb_epoch=1, verbose=False)
def pix2pix_generator(it, dout_size):
"""
Generate data for the generator network.
Parameters:
- it: an iterator that returns a pair of images;
- dout_size: the size of the output of the discriminator.
"""
for a, b in it:
# 1 is fake, 0 is real
y = np.zeros((a.shape[0], 1) + dout_size)
yield [a, b], y
def train_pix2pix(pix2pix, it, samples_per_batch=20):
"""Train the generator network."""
return pix2pix.fit_generator(it, nb_epoch=1, samples_per_epoch=samples_per_batch, verbose=False)
def evaluate(models, generators, losses, val_samples=192):
"""Evaluate and display the losses of the models."""
# Get necessary generators
d_gen = generators.d_gen_val
p2p_gen = generators.p2p_gen_val
# Get necessary models
d = models.d
p2p = models.p2p
# Evaluate
d_loss = d.evaluate_generator(d_gen, val_samples)
p2p_loss = p2p.evaluate_generator(p2p_gen, val_samples)
losses['d_val'].append(d_loss)
losses['p2p_val'].append(p2p_loss)
print ''
print ('Train Losses of (D={0} / P2P={1});\n'
'Validation Losses of (D={2} / P2P={3})'.format(
losses['d'][-1], losses['p2p'][-1], d_loss, p2p_loss))
return d_loss, p2p_loss
def model_creation(d, atob, params):
"""Create all the necessary models."""
opt = Adam(lr=params.lr, beta_1=params.beta_1)
p2p = m.pix2pix(atob, d, params.a_ch, params.b_ch, alpha=params.alpha, opt=opt,
is_a_binary=params.is_a_binary, is_b_binary=params.is_b_binary)
models = MyDict({
'atob': atob,
'd': d,
'p2p': p2p,
})
return models
def generators_creation(it_train, it_val, models, dout_size):
"""Create all the necessary data generators."""
# Discriminator data generators
d_gen = discriminator_generator(it_train, models.atob, dout_size)
d_gen_val = discriminator_generator(it_val, models.atob, dout_size)
# Workaround to make tensorflow work. When atob.predict is called the first
# time it calls tf.get_default_graph. This should be done on the main thread
# and not inside fit_generator. See https://github.com/fchollet/keras/issues/2397
next(d_gen)
# pix2pix data generators
p2p_gen = pix2pix_generator(it_train, dout_size)
p2p_gen_val = pix2pix_generator(it_val, dout_size)
generators = MyDict({
'd_gen': d_gen,
'd_gen_val': d_gen_val,
'p2p_gen': p2p_gen,
'p2p_gen_val': p2p_gen_val,
})
return generators
def train_iteration(models, generators, losses, params):
"""Perform a train iteration."""
# Get necessary generators
d_gen = generators.d_gen
p2p_gen = generators.p2p_gen
# Get necessary models
d = models.d
p2p = models.p2p
# Update the dscriminator
dhist = train_discriminator(d, d_gen, samples_per_batch=params.samples_per_batch)
losses['d'].extend(dhist.history['loss'])
# Update the generator
p2phist = train_pix2pix(p2p, p2p_gen, samples_per_batch=params.samples_per_batch)
losses['p2p'].extend(p2phist.history['loss'])
def train(models, it_train, it_val, params):
"""
Train the model.
Parameters:
- models: a dictionary with all the models.
- atob: a model that goes from A to B.
- d: the discriminator model.
- p2p: a Pix2Pix model.
- it_train: the iterator of the training data.
- it_val: the iterator of the validation data.
- params: parameters of the training procedure.
- dout_size: the size of the output of the discriminator model.
"""
# Create the experiment folder and save the parameters
create_expt_dir(params)
# Get the output shape of the discriminator
dout_size = d.output_shape[-2:]
# Define the data generators
generators = generators_creation(it_train, it_val, models, dout_size)
# Define the number of samples to use on each training epoch
train_samples = params.train_samples
if params.train_samples == -1:
train_samples = it_train.N
batches_per_epoch = train_samples // params.samples_per_batch
# Define the number of samples to use for validation
val_samples = params.val_samples
if val_samples == -1:
val_samples = it_val.N
losses = {'p2p': [], 'd': [], 'p2p_val': [], 'd_val': []}
if params.continue_train:
losses = load_losses(log_dir=params.log_dir, expt_name=params.expt_name)
for e in tqdm(range(params.epochs)):
for b in range(batches_per_epoch):
train_iteration(models, generators, losses, params)
# Evaluate how the models is doing on the validation set.
evaluate(models, generators, losses, val_samples=val_samples)
if (e + 1) % params.save_every == 0:
save_weights(models, log_dir=params.log_dir, expt_name=params.expt_name)
log(losses, models.atob, it_val, log_dir=params.log_dir, expt_name=params.expt_name,
is_a_binary=params.is_a_binary, is_b_binary=params.is_b_binary)
if __name__ == '__main__':
a = sys.argv[1:]
params = MyDict({
# Model
'nfd': 32, # Number of filters of the first layer of the discriminator
'nfatob': 64, # Number of filters of the first layer of the AtoB model
'alpha': 100, # The weight of the reconstruction loss of the atob model
# Train
'epochs': 100, # Number of epochs to train the model
'batch_size': 1, # The batch size
'samples_per_batch': 20, # The number of samples to train each model on each iteration
'save_every': 10, # Save results every 'save_every' epochs on the log folder
'lr': 2e-4, # The learning rate to train the models
'beta_1': 0.5, # The beta_1 value of the Adam optimizer
'continue_train': False, # If it should continue the training from the last checkpoint
# File system
'log_dir': 'log', # Directory to log
'expt_name': None, # The name of the experiment. Saves the logs into a folder with this name
'base_dir': 'data/unet_segmentations_binary', # Directory that contains the data
'train_dir': 'train', # Directory inside base_dir that contains training data
'val_dir': 'val', # Directory inside base_dir that contains validation data
'train_samples': -1, # The number of training samples. Set -1 to be the same as training examples
'val_samples': -1, # The number of validation samples. Set -1 to be the same as validation examples
'load_to_memory': True, # Whether to load the images into memory
# Image
'a_ch': 1, # Number of channels of images A
'b_ch': 3, # Number of channels of images B
'is_a_binary': True, # If A is binary, its values will be either 0 or 1
'is_b_binary': False, # If B is binary, the last layer of the atob model is followed by a sigmoid
'is_a_grayscale': True, # If A is grayscale, the image will only have one channel
'is_b_grayscale': False, # If B is grayscale, the image will only have one channel
'target_size': 512, # The size of the images loaded by the iterator. DOES NOT CHANGE THE MODELS
'rotation_range': 0., # The range to rotate training images for dataset augmentation
'height_shift_range': 0., # Percentage of height of the image to translate for dataset augmentation
'width_shift_range': 0., # Percentage of width of the image to translate for dataset augmentation
'horizontal_flip': False, # If true performs random horizontal flips on the train set
'vertical_flip': False, # If true performs random vertical flips on the train set
'zoom_range': 0., # Defines the range to scale the image for dataset augmentation
})
param_names = [k + '=' for k in params.keys()] + ['help']
try:
opts, args = getopt.getopt(a, '', param_names)
except getopt.GetoptError:
print_help()
sys.exit()
for opt, arg in opts:
if opt == '--help':
print_help()
sys.exit()
elif opt in ('--nfatob' '--nfd', '--a_ch', '--b_ch', '--epochs', '--batch_size',
'--samples_per_batch', '--save_every', '--train_samples', '--val_samples',
'--target_size'):
params[opt[2:]] = int(arg)
elif opt in ('--lr', '--beta_1', '--rotation_range', '--height_shift_range',
'--width_shift_range', '--zoom_range', '--alpha'):
params[opt[2:]] = float(arg)
elif opt in ('--is_a_binary', '--is_b_binary', '--is_a_grayscale', '--is_b_grayscale',
'--continue_train', '--horizontal_flip', '--vertical_flip',
'--load_to_memory'):
params[opt[2:]] = True if arg == 'True' else False
elif opt in ('--base_dir', '--train_dir', '--val_dir', '--expt_name', '--log_dir'):
params[opt[2:]] = arg
dopt = Adam(lr=params.lr, beta_1=params.beta_1)
# Define the U-Net generator
unet = m.g_unet(params.a_ch, params.b_ch, params.nfatob,
batch_size=params.batch_size, is_binary=params.is_b_binary)
# Define the discriminator
d = m.discriminator(params.a_ch, params.b_ch, params.nfd, opt=dopt)
if params.continue_train:
load_weights(unet, d, log_dir=params.log_dir, expt_name=params.expt_name)
ts = params.target_size
train_dir = os.path.join(params.base_dir, params.train_dir)
it_train = TwoImageIterator(train_dir, is_a_binary=params.is_a_binary,
is_a_grayscale=params.is_a_grayscale,
is_b_grayscale=params.is_b_grayscale,
is_b_binary=params.is_b_binary,
batch_size=params.batch_size,
load_to_memory=params.load_to_memory,
rotation_range=params.rotation_range,
height_shift_range=params.height_shift_range,
width_shift_range=params.height_shift_range,
zoom_range=params.zoom_range,
horizontal_flip=params.horizontal_flip,
vertical_flip=params.vertical_flip,
target_size=(ts, ts))
val_dir = os.path.join(params.base_dir, params.val_dir)
it_val = TwoImageIterator(val_dir, is_a_binary=params.is_a_binary,
is_b_binary=params.is_b_binary,
is_a_grayscale=params.is_a_grayscale,
is_b_grayscale=params.is_b_grayscale,
batch_size=params.batch_size,
load_to_memory=params.load_to_memory,
target_size=(ts, ts))
models = model_creation(d, unet, params)
train(models, it_train, it_val, params)
|
|
import os
from functools import partial
import datetime
import unittest
from mock import patch
from ..api_client import ApiClient
from ..configuration import ApiConfiguration
from .. import models
test_dict = {
'name': 'Test Name',
'display_name': 'Test Display Name',
'data_format': 'Test Format',
}
class TestModel(object):
def __init__(self):
self.swagger_types = {
'display_name': 'str',
'name': 'str',
'data_format': 'str',
}
self.attribute_map = {
'display_name': 'display_name',
'name': 'name',
'data_format': 'data_format',
}
self.display_name = None
self.name = None
self.data_format = None
self.some_other_attribute = None
class ApiClientTest(unittest.TestCase):
def setUp(self):
host = 'http://example.com'
api_key = 'keyboardcat'
configuration = ApiConfiguration(host, api_key)
self.client = ApiClient(configuration=configuration)
self.base_expected_headers = {
'Authorization': 'Token keyboardcat',
'User-Agent': 'Python-Swagger',
}
def test_sanitization_for_serialization(self):
"""
Verify that data are normalized
"""
model = TestModel()
for key in test_dict.keys():
setattr(model, key, test_dict[key])
sanitized_model = self.client.sanitize_for_serialization(model)
self.assertEqual(sanitized_model, test_dict)
def test_deserialization(self):
obj = [{'foo': 'bar'}, {'baz': 'qux'}]
deserialized = self.client.deserialize(obj, 'list[dict]')
self.assertEqual(obj, deserialized)
obj = 1
deserialized = self.client.deserialize(obj, 'dict')
self.assertEqual(deserialized, obj)
# deserialize model from dict that doesn't have all model attributes
models.TestModel = TestModel
obj = {'name': 'some name'}
deserialized = self.client.deserialize(obj, 'TestModel')
self.assertIsNone(deserialized.display_name)
self.assertIsNone(deserialized.data_format)
# deserialize datetimes
now = datetime.datetime.now()
deserialized = self.client.deserialize(now.isoformat(), 'datetime')
self.assertEqual(now, deserialized)
@patch('ambition.api_client.RESTClient.GET')
@patch('ambition.api_client.RESTClient.HEAD')
@patch('ambition.api_client.RESTClient.POST')
@patch('ambition.api_client.RESTClient.PATCH')
@patch('ambition.api_client.RESTClient.PUT')
@patch('ambition.api_client.RESTClient.DELETE')
def test_request_method(self, delete, put, patch, post, head, get):
"""
Verify that the correct client method is called with the right kwargs
"""
query_params = {'query': 'query_param'}
post_params = {'post': 'post_param'}
body = 'body'
self.client.request(
'GET', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
self.client.request(
'HEAD', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
self.client.request(
'POST', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
self.client.request(
'PATCH', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
self.client.request(
'PUT', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
self.client.request(
'DELETE', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
delete.assert_called_with(
'some_url', query_params=query_params,
headers=self.base_expected_headers)
put.assert_called_with(
'some_url', post_params=post_params, body=body,
headers=self.base_expected_headers)
patch.assert_called_with(
'some_url', post_params=post_params, body=body,
headers=self.base_expected_headers)
post.assert_called_with(
'some_url', post_params=post_params, body=body,
headers=self.base_expected_headers)
head.assert_called_with(
'some_url', query_params=query_params,
headers=self.base_expected_headers)
get.assert_called_with(
'some_url', query_params=query_params,
headers=self.base_expected_headers)
n = ['NOT_A_METHOD', 'some_url']
self.assertRaises(ValueError, partial(self.client.request, *n))
def test_files(self):
"""
Verifies that the files are included in post params
"""
file_path = os.path.abspath(__file__)
files = {
'this_file': file_path
}
post_params = self.client.prepare_post_parameters(files=files)
self.assertIn('this_file', post_params)
def test_select_accepts(self):
"""
Verifies that the accept header is correctly selected (or not)
from a list
"""
self.assertIsNone(self.client.select_header_accept([]))
accepts = ['application/vnd.ms-excel', 'application/json']
self.assertEqual('application/json', self.client.select_header_accept(accepts))
accepts = ['application/vnd.ms-excel', 'text/csv']
self.assertEqual(', '.join(accepts), self.client.select_header_accept(accepts))
def test_select_content_type(self):
"""
Verifies that the content type header is correctly selected
"""
self.assertEqual('application/json', self.client.select_header_content_type([]))
content_types = ['application/vnd.ms-excel', 'application/json']
self.assertEqual('application/json', self.client.select_header_content_type(content_types))
content_types = ['application/vnd.ms-excel', 'text/csv']
self.assertEqual('application/vnd.ms-excel', self.client.select_header_content_type(content_types))
@patch('ambition.api_client.models')
@patch('ambition.api_client.RESTClient.GET')
def test_deserialization_single_model(self, rest_get, models):
"""
Verify that api responses are cast as the right model type
"""
rest_get.return_value = test_dict
models.TestModel = TestModel
model = self.client.call_api('/fake', 'GET', response='TestModel')
self.assertIsInstance(model, TestModel)
self.assertEqual(model.display_name, test_dict.get('display_name'))
self.assertEqual(model.name, test_dict.get('name'))
self.assertEqual(model.data_format, test_dict.get('data_format'))
@patch('ambition.api_client.models')
@patch('ambition.api_client.RESTClient.GET')
def test_deserialization_multiple_models(self, rest_get, models):
"""
Verify that list api responses are model iterators
"""
serialized_response = [test_dict, test_dict]
rest_get.return_value = serialized_response
models.TestModel = TestModel
response = self.client.call_api('/fake', 'GET', response='TestModel')
self.assertEqual(len(list(response)), 2)
for model in response:
self.assertIsInstance(model, TestModel)
@patch('ambition.api_client.ApiClient.request')
def test_path_params(self, request_mock):
"""
Verify that path parameters are constructed properly
"""
path_params = {
'foo': 'f',
'bar': 'b',
}
self.client.call_api('/{foo}/{bar}/', 'GET', path_params=path_params)
expected_url = 'http://example.com/f/b/'
request_mock.assert_called_with(
'GET', expected_url, body=None,
headers=self.base_expected_headers,
post_params=None, query_params=None)
@patch('ambition.api_client.ApiClient.request')
def test_query_params(self, request_mock):
"""
Verify that query parameters are normalized
"""
today = datetime.datetime.now().date()
query_params = {
'today': today,
'users': ['Marty McFly', 'H. G. Wells'],
'none_thing': None,
}
self.client.call_api('/stuff/', 'GET', query_params=query_params)
expected_query_params = {
'today': datetime.datetime.now().date().isoformat(),
'users': 'Marty McFly,H. G. Wells',
'none_thing': 'None',
}
request_mock.assert_called_with(
'GET', 'http://example.com/stuff/', body=None,
headers=self.base_expected_headers,
post_params=None, query_params=expected_query_params)
@patch('ambition.api_client.ApiClient.request')
def test_post_params(self, request_mock):
"""
Verify that post parameters are normalized
"""
today = datetime.datetime.now().date()
post_params = {
'today': today,
}
self.client.call_api('/stuff/', 'POST', post_params=post_params)
expected_post_params = {
'today': datetime.datetime.now().date().isoformat()
}
request_mock.assert_called_with(
'POST', 'http://example.com/stuff/', body=None,
headers=self.base_expected_headers,
post_params=expected_post_params, query_params=None)
@patch('ambition.api_client.ApiClient.request')
def test_body_normalization(self, request_mock):
"""
Verify that body is normalized
"""
today = datetime.datetime.now().date()
body = today
self.client.call_api('/stuff/', 'POST', body=body)
request_mock.assert_called_with(
'POST', 'http://example.com/stuff/', body=today.isoformat(),
headers=self.base_expected_headers,
post_params=None, query_params=None)
def test_update_params_for_auth(self):
"""
Verify that authentication is defined correctly
"""
auth_settings = None
headers = {}
query_params = {}
self.client.update_params_for_auth(headers, query_params, auth_settings)
# confirm that neither dict was modified
self.assertEqual({}, headers)
self.assertEqual({}, query_params)
def test_user_agent(self):
"""
Verifies that clients are being constructed with user agent
"""
self.assertEqual('Python-Swagger', self.client.user_agent)
def test_deserialize_model_gracefully_handles_bad_input(self):
"""
Verifies that we won't try to enumerate an object not of list/dict type
when trying to cast it to a model type
"""
from ambition.models import PublicApiDataTypeRetrieveResponse
model = self.client.deserialize_model(PublicApiDataTypeRetrieveResponse, None)
self.assertIsInstance(model, PublicApiDataTypeRetrieveResponse)
for attribute in model.attribute_map:
self.assertIsNone(getattr(model, attribute))
def test_deserialize_datetimes(self):
"""
Verifies that datetimes are deserialized
"""
now = datetime.datetime.now()
now_deserialized = self.client.deserialize(now.isoformat(), 'datetime')
self.assertEqual(now, now_deserialized)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Program: build.py
# Author: Mauricio Caceres Bravo <mauricio.caceres.bravo@gmail.com>
# Created: Sun Oct 15 10:26:39 EDT 2017
# Updated: Wed Feb 02 11:44:49 EST 2022
# Purpose: Main build file for gtools (copies contents into ./build and
# puts a .zip file in ./releases)
from os import makedirs, path, linesep, chdir, system, remove, rename
from shutil import copy2, rmtree
from sys import platform
from tempfile import gettempdir
from zipfile import ZipFile
from re import search
import argparse
# ---------------------------------------------------------------------
# Aux programs
try:
from shutil import which
except:
def which(program):
import os
def is_exe(fpath):
return path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = path.split(program)
if fpath:
if is_exe(program):
return program
else:
for epath in os.environ["PATH"].split(os.pathsep):
epath = epath.strip('"')
exe_file = path.join(epath, program)
if is_exe(exe_file):
return exe_file
return None
def makedirs_safe(directory):
try:
makedirs(directory)
return directory
except OSError:
if not path.isdir(directory):
raise
# ---------------------------------------------------------------------
# Command line parsing
parser = argparse.ArgumentParser()
parser.add_argument('--stata',
nargs = 1,
type = str,
metavar = 'STATA',
default = None,
required = False,
help = "Path to stata executable")
parser.add_argument('--stata-args',
nargs = 1,
type = str,
metavar = 'STATA_ARGS',
default = None,
required = False,
help = "Arguments to pass to Stata executable")
parser.add_argument('--make-flags',
nargs = 1,
type = str,
metavar = 'MAKE_FLAGS',
default = None,
required = False,
help = "Arguments to pass to make")
parser.add_argument('--clean',
dest = 'clean',
action = 'store_true',
help = "Clean build",
required = False)
parser.add_argument('--no-compile',
dest = 'no_compile',
action = 'store_false',
help = "do not re-compile",
required = False)
parser.add_argument('--replace',
dest = 'replace',
action = 'store_true',
help = "Replace build",
required = False)
parser.add_argument('--test',
dest = 'test',
action = 'store_true',
help = "Run tests",
required = False)
args = vars(parser.parse_args())
# ---------------------------------------------------------------------
# Relevant files
gtools_ssc = [
"_gtools_internal.ado",
"_gtools_internal.mata",
"lgtools.mlib",
"gcollapse.ado",
"gcontract.ado",
"gegen.ado",
"gunique.ado",
"gdistinct.ado",
"glevelsof.ado",
"gtop.ado",
"gtoplevelsof.ado",
"gisid.ado",
"greshape.ado",
"greg.ado",
"gregress.ado",
"givregress.ado",
"gglm.ado",
"gpoisson.ado",
"gstats.ado",
"gduplicates.ado",
"gquantiles.ado",
"fasterxtile.ado",
"hashsort.ado",
"gtools.ado",
"gcollapse.sthlp",
"gcontract.sthlp",
"gegen.sthlp",
"gunique.sthlp",
"gdistinct.sthlp",
"glevelsof.sthlp",
"gtop.sthlp",
"gtoplevelsof.sthlp",
"gisid.sthlp",
"greshape.sthlp",
"greg.sthlp",
"gregress.sthlp",
"givregress.sthlp",
"gglm.sthlp",
"gpoisson.sthlp",
"gstats.sthlp",
"gstats_transform.sthlp",
"gstats_range.sthlp",
"gstats_moving.sthlp",
"gstats_winsor.sthlp",
"gstats_tab.sthlp",
"gstats_sum.sthlp",
"gstats_summarize.sthlp",
"gduplicates.sthlp",
"gquantiles.sthlp",
"fasterxtile.sthlp",
"hashsort.sthlp",
"gtools.sthlp",
]
gtools_zip = [
"changelog.md",
"gtools.pkg",
"stata.toc",
] + gtools_ssc
gtools_build = gtools_zip + [
"gtools_tests.do"
]
# ---------------------------------------------------------------------
# Run the script
# Remove buld
# -----------
rc = 0
if args['clean']:
print("Removing build files")
for bfile in gtools_build:
try:
remove(path.join("build", bfile))
print("\tdeleted " + bfile)
except:
try:
remove(path.join("build", "gtools", bfile))
print("\tdeleted " + bfile)
except:
print("\t" + bfile + " not found")
if args['no_compile']:
rc = system("make clean SPI=2.0 SPIVER=v2")
rc = system("make clean SPI=3.0 SPIVER=v3")
exit(0)
makedirs_safe(path.join("build", "gtools"))
makedirs_safe("releases")
# Stata executable
# ----------------
# I don't have stata on my global path, so to make the script portable
# I make it look for my local executable when Stata is not found.
if args['stata'] is not None:
statadir = path.abspath(".")
stataexe = args['stata'][0]
statargs = "-b do" if args['stata_args'] is None else args['stata_args'][0]
statado = '"{0}" {1}'.format(stataexe, statargs)
elif which("stata") is None:
statadir = path.expanduser("~/.local/stata13")
stataexe = path.join(statadir, "stata")
statargs = "-b do" if args['stata_args'] is None else args['stata_args']
statado = '"{0}" {1}'.format(stataexe, statargs)
else:
statadir = path.abspath(".")
stataexe = 'stata'
statargs = "-b do" if args['stata_args'] is None else args['stata_args']
statado = '"{0}" {1}'.format(stataexe, statargs)
# Temporary files
# ---------------
maindir = path.dirname(path.realpath(__file__))
tmpdir = gettempdir()
tmpfile = path.join(tmpdir, ".compile_lgtools.do")
tmpupdate = path.join(tmpdir, ".update_gtools.do")
# Compile mlib files
# ------------------
matafiles = [path.join("src", "ado", "_gtools_internal.mata")]
with open(path.join("build", "gtools", "gtools.mata"), 'w') as outfile:
for mfile in matafiles:
with open(mfile) as infile:
outfile.write(infile.read())
if which(stataexe):
with open(tmpfile, 'w') as f:
f.write("global maindir {0}".format(maindir))
f.write(linesep)
f.write("mata: mata set matastrict on")
f.write(linesep)
f.write("mata: mata set mataoptimize on")
f.write(linesep)
f.write('cd "${maindir}/build/gtools"')
f.write(linesep)
f.write("do gtools.mata")
f.write(linesep)
f.write("mata")
f.write(linesep)
f.write('mata mlib create lgtools, dir("${maindir}/build/gtools") replace')
f.write(linesep)
f.write("mata mlib add lgtools Gtools*()")
f.write(linesep)
f.write("end")
f.write(linesep)
chdir(statadir)
system(statado + " " + tmpfile)
print("Compiled lgtools.mlib")
chdir(maindir)
copy2(
path.join("build", "gtools", "lgtools.mlib"),
path.join("lib", "plugin", "lgtools.mlib")
)
else:
copy2(
path.join("lib", "plugin", "lgtools.mlib"),
path.join("build", "gtools", "lgtools.mlib")
)
if not path.isfile(path.join("build", "gtools", "lgtools.mlib")):
print("ERROR: Failed to compile build/gtools/lgtools.mlib")
exit(-1)
else:
print("Found build/gtools/lgtools.mlib")
print("")
# Compile plugin files
# --------------------
if args['no_compile']:
if platform in ["linux", "linux2", "win32", "cygwin", "darwin"]:
print("Trying to compile plugins for -gtools-")
make_flags = args['make_flags'][0] if args['make_flags'] is not None else ""
rc = system("make all SPI=2.0 SPIVER=v2 {0}".format(make_flags))
rc = system("make all SPI=3.0 SPIVER=v3 {0}".format(make_flags))
print("Success!" if rc == 0 else "Failed.")
else:
print("Don't know platform '{0}'; compile manually.".format(platform))
exit(198)
print("")
# Get unit test files
# -------------------
testfile = open(path.join("src", "test", "gtools_tests.do")).readlines()
files = [path.join("src", "test", "test_gcollapse.do"),
path.join("src", "test", "test_gcontract.do"),
path.join("src", "test", "test_gquantiles.do"),
path.join("src", "test", "test_gquantiles_by.do"),
path.join("src", "test", "test_gegen.do"),
path.join("src", "test", "test_gunique.do"),
path.join("src", "test", "test_glevelsof.do"),
path.join("src", "test", "test_gtoplevelsof.do"),
path.join("src", "test", "test_gisid.do"),
path.join("src", "test", "test_greshape.do"),
path.join("src", "test", "test_gregress.do"),
path.join("src", "test", "test_gstats.do"),
path.join("src", "test", "test_gduplicates.do"),
path.join("src", "test", "test_hashsort.do")]
with open(path.join("build", "gtools_tests.do"), 'w') as outfile:
outfile.writelines(testfile[:-4])
with open(path.join("build", "gtools_tests.do"), 'a') as outfile:
for fname in files:
with open(fname) as infile:
outfile.write(infile.read())
outfile.writelines(testfile[-5:])
# Copy files to ./build
# ---------------------
gdir = path.join("build", "gtools")
copy2("changelog.md", gdir)
copy2(path.join("src", "gtools.pkg"), gdir)
copy2(path.join("src", "stata.toc"), gdir)
copy2(
path.join("docs", "stata", "gtoplevelsof.sthlp"),
path.join("docs", "stata", "gtop.sthlp")
)
copy2(
path.join("docs", "stata", "gquantiles.sthlp"),
path.join("docs", "stata", "fasterxtile.sthlp")
)
copy2(
path.join("docs", "stata", "gregress.sthlp"),
path.join("docs", "stata", "greg.sthlp")
)
copy2(
path.join("docs", "stata", "gstats_summarize.sthlp"),
path.join("docs", "stata", "gstats_sum.sthlp")
)
copy2(
path.join("docs", "stata", "gstats_summarize.sthlp"),
path.join("docs", "stata", "gstats_tab.sthlp")
)
copy2(
path.join("docs", "stata", "gstats_transform.sthlp"),
path.join("docs", "stata", "gstats_range.sthlp")
)
copy2(
path.join("docs", "stata", "gstats_transform.sthlp"),
path.join("docs", "stata", "gstats_moving.sthlp")
)
copy2(path.join("docs", "stata", "gcollapse.sthlp"), gdir)
copy2(path.join("docs", "stata", "gcontract.sthlp"), gdir)
copy2(path.join("docs", "stata", "gegen.sthlp"), gdir)
copy2(path.join("docs", "stata", "gunique.sthlp"), gdir)
copy2(path.join("docs", "stata", "gdistinct.sthlp"), gdir)
copy2(path.join("docs", "stata", "glevelsof.sthlp"), gdir)
copy2(path.join("docs", "stata", "gtop.sthlp"), gdir)
copy2(path.join("docs", "stata", "gtoplevelsof.sthlp"), gdir)
copy2(path.join("docs", "stata", "gisid.sthlp"), gdir)
copy2(path.join("docs", "stata", "greshape.sthlp"), gdir)
copy2(path.join("docs", "stata", "greg.sthlp"), gdir)
copy2(path.join("docs", "stata", "gregress.sthlp"), gdir)
copy2(path.join("docs", "stata", "givregress.sthlp"), gdir)
copy2(path.join("docs", "stata", "gglm.sthlp"), gdir)
copy2(path.join("docs", "stata", "gpoisson.sthlp"), gdir)
copy2(path.join("docs", "stata", "gstats.sthlp"), gdir)
copy2(path.join("docs", "stata", "gstats_transform.sthlp"), gdir)
copy2(path.join("docs", "stata", "gstats_range.sthlp"), gdir)
copy2(path.join("docs", "stata", "gstats_moving.sthlp"), gdir)
copy2(path.join("docs", "stata", "gstats_winsor.sthlp"), gdir)
copy2(path.join("docs", "stata", "gstats_summarize.sthlp"), gdir)
copy2(path.join("docs", "stata", "gstats_sum.sthlp"), gdir)
copy2(path.join("docs", "stata", "gstats_tab.sthlp"), gdir)
copy2(path.join("docs", "stata", "gduplicates.sthlp"), gdir)
copy2(path.join("docs", "stata", "gquantiles.sthlp"), gdir)
copy2(path.join("docs", "stata", "fasterxtile.sthlp"), gdir)
copy2(path.join("docs", "stata", "hashsort.sthlp"), gdir)
copy2(path.join("docs", "stata", "gtools.sthlp"), gdir)
copy2(path.join("src", "ado", "_gtools_internal.ado"), gdir)
copy2(path.join("src", "ado", "_gtools_internal.mata"), gdir)
copy2(path.join("src", "ado", "gcollapse.ado"), gdir)
copy2(path.join("src", "ado", "gcontract.ado"), gdir)
copy2(path.join("src", "ado", "gegen.ado"), gdir)
copy2(path.join("src", "ado", "gunique.ado"), gdir)
copy2(path.join("src", "ado", "gdistinct.ado"), gdir)
copy2(path.join("src", "ado", "glevelsof.ado"), gdir)
copy2(path.join("src", "ado", "gtop.ado"), gdir)
copy2(path.join("src", "ado", "gtoplevelsof.ado"), gdir)
copy2(path.join("src", "ado", "gisid.ado"), gdir)
copy2(path.join("src", "ado", "greshape.ado"), gdir)
copy2(path.join("src", "ado", "greg.ado"), gdir)
copy2(path.join("src", "ado", "gregress.ado"), gdir)
copy2(path.join("src", "ado", "givregress.ado"), gdir)
copy2(path.join("src", "ado", "gglm.ado"), gdir)
copy2(path.join("src", "ado", "gpoisson.ado"), gdir)
copy2(path.join("src", "ado", "gstats.ado"), gdir)
copy2(path.join("src", "ado", "gduplicates.ado"), gdir)
copy2(path.join("src", "ado", "gquantiles.ado"), gdir)
copy2(path.join("src", "ado", "fasterxtile.ado"), gdir)
copy2(path.join("src", "ado", "hashsort.ado"), gdir)
copy2(path.join("src", "ado", "gtools.ado"), gdir)
# Copy files to .zip folder in ./releases
# ---------------------------------------
# Get stata version
with open(path.join("src", "ado", "gtools.ado"), 'r') as f:
line = f.readline()
version = search('(\d+\.?)+', line).group(0)
plugins = [
"gtools_unix_v2.plugin",
"gtools_windows_v2.plugin",
"gtools_macosx_v2.plugin",
"gtools_unix_v3.plugin",
"gtools_windows_v3.plugin",
"gtools_macosx_v3.plugin"
]
plugbak = plugins[:]
for plug in plugbak:
if not path.isfile(path.join("build", plug)):
alt = path.join("lib", "plugin", plug)
if path.isfile(alt):
copy2(alt, "build")
else:
print("Could not find '{0}'".format(plug))
chdir("build")
print("Compressing build files for gtools-{0}".format(version))
if rc == 0:
gtools_anyplug = False
for plug in plugbak:
if path.isfile(plug):
gtools_anyplug = True
rename(path.join(plug), path.join("gtools", plug))
else:
plugins.remove(plug)
print("\t'{0}' not found; skipping.".format(plug))
if not gtools_anyplug:
print("WARNING: Could not find plugins despite build exit with 0 status.")
exit(-1)
gtools_zip += plugins
else:
print("WARNING: Failed to build plugins. Will exit.")
exit(-1)
outzip = path.join(maindir, "releases", "gtools-latest.zip".format(version))
with ZipFile(outzip, 'w') as zf:
for zfile in gtools_zip:
zf.write(path.join("gtools", zfile))
print("\t" + path.join("gtools", zfile))
rename(path.join("gtools", zfile), zfile)
chdir(maindir)
rmtree(path.join("build", "gtools"))
# Copy files to send to SSC
# -------------------------
print("")
print("Compressing build files for gtools-ssc.zip")
if rc == 0:
gtools_ssc += plugins
else:
print("WARNING: Failed to build plugins. Will exit.")
exit(-1)
chdir("build")
outzip = path.join(maindir, "releases", "gtools-ssc.zip")
with ZipFile(outzip, 'w') as zf:
for zfile in gtools_ssc:
zf.write(zfile)
print("\t" + zfile)
# Replace package in ~/ado/plus
# -----------------------------
chdir(maindir)
if args["replace"]:
if which(stataexe):
with open(tmpupdate, 'w') as f:
f.write("global builddir {0}".format(path.join(maindir, "build")))
f.write(linesep)
f.write("cap net uninstall gtools")
f.write(linesep)
f.write("net install gtools, from($builddir) replace")
f.write(linesep)
chdir(statadir)
system(statado + " " + tmpupdate)
remove(tmpupdate)
# print(linesep + "Replaced gtools in ~/ado/plus")
chdir(maindir)
else:
print("Could not find Stata executable '{0}'.".format(stataexe))
exit(-1)
# Run tests
# ---------
if args['test']:
print("Running tests (see build/gtools_tests.log for output)")
chdir("build")
system(statado + " gtools_tests.do")
chdir(maindir)
|
|
import json
import logging
from threading import RLock
import time
from api.caching.instance_codes import consumeCode, unconsumeCode
from api.caching.instance_lifetime import removeInstance, addInstance, setInstanceTemporalSourceLastTime
from api.caching.temporal_analytics import getTemporalInfluenceCollection, addTemporalEntry
from api.caching.tweet_user import getUserCollection, getTweetCollection
from api.config import Configuration
from api.core.data_structures.timestamp import Timestamped
from api.core.threads import startTwitterThread
from api.core.threads_core import BaseThread
from api.core.utility import criticalSection, getUniqueId, joinStringsToLengthPretty, joinStringsGrammarPretty, joinListOfLists, splitList, getEpochMs
from api.geocode.geocode_shared import GeocodeResultAbstract
from api.twitter.feed import TwitterAuthentication, TwitterSession
from api.twitter.flow.data_core import DataCollection
logger = logging.getLogger(__name__)
INFLUENCE_SOURCE = 1
GEOGRAPHICAL_FILTER = 2
INFLUENCE_SOURCE_AND_GEOGRAPHICAL_FILTER = 3
RECTANGLE_TYPE = 1
MARKER_TYPE = 2
def processRegionJsonString(regionJsonString):
regionJson = json.loads(regionJsonString)
results = {}
for item in regionJson:
displayType, entityType, coords, extraData = item
byDisplayType = results.setdefault(displayType,dict())
byEntityType = byDisplayType.setdefault(entityType, list())
byEntityType.append({'coords' : coords, 'extra_data' : extraData})
return results
def getInfluenceSourceRectangles(processedRegionJson):
rectangleType = processedRegionJson.get(RECTANGLE_TYPE,None)
if rectangleType is None:
return list()
result = rectangleType.get(INFLUENCE_SOURCE,list())
result += rectangleType.get(INFLUENCE_SOURCE_AND_GEOGRAPHICAL_FILTER,list())
return result
def getInfluenceSourceMarkers(processedRegionJson):
markerType = processedRegionJson.get(MARKER_TYPE,None)
if markerType is None:
return list()
results = markerType.get(INFLUENCE_SOURCE,list())
results += markerType.get(INFLUENCE_SOURCE_AND_GEOGRAPHICAL_FILTER,list())
return results
def getInfluenceSourceIdsFromMarkers(markers):
results = list()
for item in markers:
results.append(item['extra_data']['cacheId'])
return results
def getCoordsFromItems(items):
results = list()
for item in items:
results.append(item['coords'])
return results
def formatCoordsForTwitter(coords):
return splitList(joinListOfLists(coords),2)
def getGeographicalFilterRectangles(processedRegionJson):
rectType = processedRegionJson.get(RECTANGLE_TYPE,None)
if rectType is None:
return list()
return rectType.get(GEOGRAPHICAL_FILTER,list()) + rectType.get(INFLUENCE_SOURCE_AND_GEOGRAPHICAL_FILTER,list())
class TwitterInstance(Timestamped):
def __init__(self,
instanceKey,
parentTwitterInstances,
twitterAuthentication,
geographicSetupString,
keywords,
instanceSetupCode,
startTime = None,
lastTemporalTimeIdBySource = None):
super(TwitterInstance, self).__init__(startTime)
logger.debug('Instance is %dms old' % self.construct_age)
assert isinstance(parentTwitterInstances,TwitterInstances)
assert isinstance(twitterAuthentication,TwitterAuthentication)
if lastTemporalTimeIdBySource is None:
lastTemporalTimeIdBySource = dict()
if instanceSetupCode is None or len(instanceSetupCode) == 0:
self.enable_shutdown_after_no_usage = True
self.instance_setup_code = None
else:
codeValid = consumeCode(instanceSetupCode)
if codeValid:
logger.info('Instance %s has loaded setup code %s' % (instanceKey, instanceSetupCode))
self.instance_setup_code = instanceSetupCode
self.enable_shutdown_after_no_usage = False
else:
logger.warn('Instance %s was provided with an invalid setup code: %s' % (instanceKey, instanceSetupCode))
raise ValueError('Invalid setup code: %s' % unicode(instanceSetupCode))
self.twitter_authentication = twitterAuthentication
self.oauth = TwitterInstance.makeAuthTuple(twitterAuthentication.access_token, twitterAuthentication.access_secret)
self.instance_key = instanceKey
self.geographic_setup_string = geographicSetupString
self.parent_twitter_instances = parentTwitterInstances
self.region_json = processRegionJsonString(self.geographic_setup_string)
self.geographical_filter_rectangles = formatCoordsForTwitter(getCoordsFromItems(getGeographicalFilterRectangles(self.region_json)))
self.influence_source_rectangles = getCoordsFromItems(getInfluenceSourceRectangles(self.region_json))
self.influence_source_cache_ids = getInfluenceSourceIdsFromMarkers(getInfluenceSourceMarkers(self.region_json))
self.keywords = keywords
self.is_shutdown = False
self.last_temporal_time_id_by_source = lastTemporalTimeIdBySource
# Add first so that we only try to set this up one at a time.
# i.e. if instance takes an hour to start up several requests come in
# but we start that instance only once and subsequent requests are ignored.
self.parent_twitter_instances.add(self)
try:
twitterThread = startTwitterThread(self)
self.twitter_thread = twitterThread
self.setup_error = None
except TwitterSession.SessionException as e:
problemStr = 'Failed to establish twitter connection to streaming API with oauth: %s - instance could not be started, reason: %s' % (unicode(self.oauth), e)
logger.error(problemStr)
self.shutdownInstance()
self.setup_error = problemStr
return
addInstance(self.instance_key,
self.twitter_authentication.access_token,
self.twitter_authentication.access_secret,
self.geographic_setup_string,
self.keywords,
self.instance_setup_code,
self.constructed_at)
@staticmethod
def makeAuthTuple(oauthToken, oauthSecret):
return oauthToken, oauthSecret
def getShortDescription(self, capital):
def doCapital():
if capital:
return 'L' + self._short_description
else:
return 'l' + self._short_description
try:
return doCapital()
except AttributeError:
instanceKeywords = self.twitter_thread.twitter_feed.keywords
instanceNumGeographicAreas = len(self.twitter_thread.twitter_feed.locations) / 2 # divide by two because each area has two coordinates.
numInfluenceAreas = len(self.influence_source_rectangles)
numInfluenceLocations = len(self.influence_source_cache_ids)
if instanceKeywords is not None and len(instanceKeywords) > 0:
keywordsString = 'keywords: %s' % joinStringsToLengthPretty(instanceKeywords,Configuration.INSTANCE_SHORT_DESCRIPTION_KEYWORDS_MAX_LENGTH)
else:
keywordsString = None
if instanceNumGeographicAreas > 0:
geographicString = '%d geographic area' % instanceNumGeographicAreas
if instanceNumGeographicAreas > 1:
geographicString += 's'
else:
geographicString = None
if numInfluenceLocations > 0:
influenceLocationsString = '%d influence source location' % numInfluenceLocations
if numInfluenceLocations > 1:
influenceLocationsString += 's'
else:
influenceLocationsString = None
if numInfluenceAreas > 0:
influenceAreasString = '%d influence source area' % numInfluenceAreas
if numInfluenceAreas > 1:
influenceAreasString += 's'
else:
influenceAreasString = None
self._short_description = 'ooking at %s' % joinStringsGrammarPretty([keywordsString, geographicString, influenceLocationsString, influenceAreasString])
return doCapital()
def shutdownInstance(self, removeFromTwitterInstancesParent = True):
if self.is_shutdown:
return
if removeFromTwitterInstancesParent and self.parent_twitter_instances is not None:
# Will call this method with removeFromTwitterInstancesParent set to False.
self.parent_twitter_instances.removeTwitterInstanceByAuth(self.oauth)
else:
self.is_shutdown = True
instanceKey = unicode(self.instance_key)
logger.info('Shutdown instance called on instance %s' % instanceKey)
logger.info('Shutting down twitter thread on instance %s..' % instanceKey)
try:
self.twitter_thread.stop()
# might not have been initialized yet.
except AttributeError:
pass
# Don't wait on thread, cannot find a way to terminate post request in requests API so have
# to wait for next tweet or keep alive request to come from twitter before terminating.
#self.twitter_thread.join()
# Wait for current write to finish, avoid dropping collection and then when write completes
# collection is made again.
time.sleep(1.5)
logger.info('Dropping twitter user data on instance %s..' % instanceKey)
getUserCollection(instanceKey).drop()
logger.info('Dropping twitter tweet data on instance %s..' % instanceKey)
getTweetCollection(instanceKey).drop()
logger.info('Dropping twitter temporal influence data on instance %s..' % instanceKey)
getTemporalInfluenceCollection(instanceKey).drop()
if self.instance_setup_code is not None:
logger.info('Returning instance setup code %s on instance %s..' % (instanceKey, self.instance_setup_code))
unconsumeCode(self.instance_setup_code)
logger.info('Removing instance from instance %s lifetime collection..' % instanceKey)
removeInstance(instanceKey)
logger.info('Instance %s cleaned up successfully' % instanceKey)
def addTemporalEntry(self, temporalCollection, timeId, userProviderId, userPlaceId, followerProviderId, followerPlaceId, followerPlaceType):
if self.is_shutdown:
return
tupleUserCacheId = GeocodeResultAbstract.buildCacheIdTuple(userProviderId, userPlaceId)
dictUserCacheId = GeocodeResultAbstract.buildCacheId(userProviderId, userPlaceId)
lastTimeId = self.last_temporal_time_id_by_source.get(tupleUserCacheId,None)
destination = '%s_%s' % (followerPlaceType, followerPlaceId)
addTemporalEntry(temporalCollection, lastTimeId, timeId, dictUserCacheId, destination, followerProviderId)
self.last_temporal_time_id_by_source[tupleUserCacheId] = timeId
setInstanceTemporalSourceLastTime(self.instance_key, userProviderId, userPlaceId, timeId)
class TwitterInstances(object):
def __init__(self, dataCollection, tweetProvider):
super(TwitterInstances, self).__init__()
assert isinstance(dataCollection, DataCollection)
self._by_oauth = dict()
self._by_instance_key = dict()
self._lock = RLock()
self.data_collection = dataCollection
self.tweet_provider = tweetProvider
def add(self, twitterInstance):
assert isinstance(twitterInstance, TwitterInstance)
self._lock.acquire()
try:
self._by_instance_key[twitterInstance.instance_key] = twitterInstance
self._by_oauth[twitterInstance.oauth] = twitterInstance
finally:
self._lock.release()
def getUniqueInstanceKey(self):
def func():
instanceKey = unicode(getUniqueId())
while instanceKey in self._by_instance_key:
instanceKey = unicode(getUniqueId())
return instanceKey
return criticalSection(self._lock, func)
def createInstance(self, twitterAuthentication, geographic_setup_string, keywords, instance_setup_code):
def func():
twitterInstance = TwitterInstance(self.getUniqueInstanceKey(),
self,
twitterAuthentication,
geographic_setup_string,
keywords,
instance_setup_code)
return twitterInstance
return criticalSection(self._lock, func)
def getInstanceList(self):
return criticalSection(self._lock, lambda: list(self._by_instance_key.values()))
def isInstanceKeyInUse(self, instanceKey):
return criticalSection(self._lock, lambda: instanceKey in self._by_instance_key)
def isAuthInUse(self, oauth):
return criticalSection(self._lock, lambda: oauth in self._by_oauth)
def getInstanceByInstanceKey(self, instanceKey):
result = criticalSection(self._lock, lambda: self._by_instance_key.get(instanceKey, None))
return result
def getInstanceByAuth(self, oauth):
result = criticalSection(self._lock, lambda: self._by_oauth.get(oauth, None))
return result
def removeTwitterInstanceByInstanceKey(self, instanceKey):
self._lock.acquire()
try:
instance = self._by_instance_key.get(instanceKey)
if instance is None:
return None
assert isinstance(instance, TwitterInstance)
# Remove from dictionaries first so that it is no
# longer accessible from the rest of the application.
del self._by_instance_key[instanceKey]
del self._by_oauth[instance.oauth]
finally:
self._lock.release()
# Cleanup instance.
instance.shutdownInstance(False)
self.data_collection.removeInstanceData(instanceKey)
return instance
def removeTwitterInstanceByAuth(self, oauth):
self._lock.acquire()
try:
instance = self._by_oauth.get(oauth)
if instance is None:
return None
assert isinstance(instance, TwitterInstance)
# Remove from dictionaries first so that it is no
# longer accessible from the rest of the application.
del self._by_oauth[oauth]
del self._by_instance_key[instance.instance_key]
print unicode(self._by_instance_key)
finally:
self._lock.release()
# Cleanup instance.
instance.shutdownInstance(False)
self.data_collection.removeInstanceData(unicode(instance.instance_key))
return instance
def restartTwitterInstanceByAuth(twitterInstances, auth):
twitterInstance = twitterInstances.removeTwitterInstanceByAuth(auth)
if twitterInstance is None:
problemStr = 'Failed to remove instance with auth: %s' % unicode(auth)
logger.error(problemStr)
return None
assert isinstance(twitterInstance,TwitterInstance)
return TwitterInstance(twitterInstance.instance_key,
twitterInstance.parent_twitter_instances,
twitterInstance.twitter_authentication,
twitterInstance.geographic_setup_string,
twitterInstance.keywords,
twitterInstance.instance_setup_code)
def restartTwitterInstance(twitterInstances, twitterInstance):
restartTwitterInstanceByAuth(twitterInstances, twitterInstance.oauth)
class TwitterInstancesPruner(BaseThread):
""" This utility thread loops through all the twitter instances
and checks when they were last used, cleaning up old ones. """
def __init__(self, maxInactive, maxConstructAge, instances):
super(TwitterInstancesPruner, self).__init__('TwitterInstancesPruner',
criticalThread = True)
assert isinstance(instances, TwitterInstances)
self.max_inactive = maxInactive
self.max_construct_age = maxConstructAge
self.instances = instances
def _run(self):
while True:
copy = criticalSection(self.instances._lock, lambda: dict(self.instances._by_oauth))
for oauth, instance in copy.iteritems():
assert isinstance(instance, TwitterInstance)
if instance.enable_shutdown_after_no_usage and instance.age > self.max_inactive:
# I want to see this in the error log.
logger.critical('Cleaning up instance with oauth: %s, it has been inactive for > %dms' % (unicode(oauth), self.max_inactive))
self.instances.removeTwitterInstanceByAuth(oauth)
if instance.construct_age > self.max_construct_age:
logger.critical('Restarting instance with oauth: %s, it has been alive > %dms' % (unicode(oauth), self.max_construct_age))
result = restartTwitterInstanceByAuth(self.instances, oauth)
if result is not None:
logger.error('Failed to restart instance with oauth: %s, reason: %s' % (unicode(oauth),result))
time.sleep(2)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class RoutesOperations(object):
"""RoutesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2015-06-15".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-06-15"
self.config = config
def delete(
self, resource_group_name, route_table_name, route_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, route_table_name, route_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Route <azure.mgmt.network.v2015_06_15.models.Route>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Route', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, route_table_name, route_name, route_parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update
route operation.
:type route_parameters: :class:`Route
<azure.mgmt.network.v2015_06_15.models.Route>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Route
<azure.mgmt.network.v2015_06_15.models.Route>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(route_parameters, 'Route')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Route', response)
if response.status_code == 201:
deserialized = self._deserialize('Route', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, **operation_config):
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RoutePaged
<azure.mgmt.network.v2015_06_15.models.RoutePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RoutePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RoutePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
# Copyright 2013 Rackspace Hosting Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
View Helpers for Quark Plugin
"""
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from quark.db import ip_types
from quark import network_strategy
from quark import protocols
from quark import tags
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
STRATEGY = network_strategy.STRATEGY
PORT_TAG_REGISTRY = tags.PORT_TAG_REGISTRY
quark_view_opts = [
cfg.BoolOpt('show_allocation_pools',
default=True,
help=_('Controls whether or not to calculate and display'
'allocation pools or not')),
cfg.BoolOpt('show_ipam_strategy',
default=False,
help=_('Controls whether or not to show ipam_strategy')),
cfg.BoolOpt('show_subnet_ip_policy_id',
default=True,
help=_('Controls whether or not to show ip_policy_id for'
'subnets')),
cfg.BoolOpt('show_provider_subnet_ids',
default=True,
help=_('Controls whether or not to show the provider subnet '
'id specified in the network strategy or use the '
'real id.')),
]
CONF.register_opts(quark_view_opts, "QUARK")
def _is_default_route(route):
return route.value == 0
def _make_network_dict(network, fields=None):
shared_net = STRATEGY.is_provider_network(network["id"])
res = {"id": network["id"],
"name": network.get("name"),
"tenant_id": network.get("tenant_id"),
"admin_state_up": True,
"status": "ACTIVE",
"shared": shared_net}
if CONF.QUARK.show_ipam_strategy:
res['ipam_strategy'] = network.get("ipam_strategy")
if not shared_net:
if fields and "all_subnets" in fields:
res["subnets"] = [_make_subnet_dict(s)
for s in network.get("subnets", [])]
else:
res["subnets"] = [s["id"] for s in network.get("subnets", [])]
else:
res["subnets"] = STRATEGY.subnet_ids_for_network(network["id"])
return res
def _make_subnet_dict(subnet, fields=None):
dns_nameservers = [str(netaddr.IPAddress(dns["ip"]))
for dns in subnet.get("dns_nameservers", [])]
subnet_id = subnet.get("id")
if STRATEGY.is_provider_subnet(subnet_id):
net_id = STRATEGY.get_network_for_subnet(subnet_id)
else:
net_id = subnet["network_id"]
res = {"id": subnet_id,
"name": subnet.get("name"),
"tenant_id": subnet.get("tenant_id"),
"network_id": net_id,
"ip_version": subnet.get("ip_version"),
"dns_nameservers": dns_nameservers or [],
"cidr": subnet.get("cidr"),
"shared": STRATEGY.is_provider_network(net_id),
"enable_dhcp": None}
if CONF.QUARK.show_subnet_ip_policy_id:
res['ip_policy_id'] = subnet.get("ip_policy_id")
if (CONF.QUARK.show_allocation_pools and not
STRATEGY.is_provider_subnet(subnet_id)):
res["allocation_pools"] = subnet.get('allocation_pools', [])
else:
res["allocation_pools"] = []
def _host_route(route):
return {"destination": route["cidr"],
"nexthop": route["gateway"]}
res["gateway_ip"] = None
res["host_routes"] = []
default_found = False
for route in subnet.get("routes", []):
netroute = netaddr.IPNetwork(route["cidr"])
if _is_default_route(netroute):
# NOTE(mdietz): This has the potential to find more than one
# default route. Quark normally won't allow you to create
# more than one, but it's plausible one exists regardless.
# As such, we're going to pretend it isn't possible, but
# log it anyway.
if default_found:
LOG.info(_("Default route %(gateway_ip)s already found for "
"subnet %(id)s") % res)
res["gateway_ip"] = route["gateway"]
default_found = True
else:
res["host_routes"].append(_host_route(route))
return res
def _make_security_group_dict(security_group, fields=None):
res = {"id": security_group.get("id"),
"description": security_group.get("description"),
"name": security_group.get("name"),
"tenant_id": security_group.get("tenant_id")}
res["security_group_rules"] = [
_make_security_group_rule_dict(r) for r in security_group["rules"]]
return res
def _make_security_group_rule_dict(security_rule, fields=None):
ethertype = protocols.human_readable_ethertype(
security_rule.get("ethertype"))
protocol = protocols.human_readable_protocol(
security_rule.get("protocol"), ethertype)
res = {"id": security_rule.get("id"),
"ethertype": ethertype,
"direction": security_rule.get("direction"),
"tenant_id": security_rule.get("tenant_id"),
"port_range_max": security_rule.get("port_range_max"),
"port_range_min": security_rule.get("port_range_min"),
"protocol": protocol,
"remote_ip_prefix": security_rule.get("remote_ip_prefix"),
"security_group_id": security_rule.get("group_id"),
"remote_group_id": security_rule.get("remote_group_id")}
return res
def _ip_port_dict(ip, port, fields=None):
service = ip.get_service_for_port(port)
res = {"id": port.get("id"),
"device_id": port.get("device_id"),
"service": service}
return res
def _port_dict(port, fields=None):
res = {"id": port.get("id"),
"name": port.get("name"),
"network_id": port["network_id"],
"tenant_id": port.get("tenant_id"),
"mac_address": port.get("mac_address"),
"admin_state_up": port.get("admin_state_up"),
"status": "ACTIVE",
"security_groups": [group.get("id", None) for group in
port.get("security_groups", None)],
"device_id": port.get("device_id"),
"device_owner": port.get("device_owner")}
if "mac_address" in res and res["mac_address"]:
mac = str(netaddr.EUI(res["mac_address"])).replace('-', ':')
res["mac_address"] = mac
# NOTE(mdietz): more pythonic key in dict check fails here. Leave as get
if port.get("bridge"):
res["bridge"] = port["bridge"]
# NOTE(ClifHouck): This causes another trip to the DB since tags are
# are not eager loaded. According to mdietz this be a small impact on
# performance, but if the tag system gets used more on ports, we may
# want to eager load the tags.
try:
t = PORT_TAG_REGISTRY.get_all(port)
res.update(t)
except Exception as e:
# NOTE(morgabra) We really don't want to break port-listing if
# this goes sideways here, so we pass.
msg = ("Unknown error loading tags for port %s: %s"
% (port["id"], e))
LOG.exception(msg)
return res
def _make_port_address_dict(ip, port, fields=None):
enabled = ip.enabled_for_port(port)
subnet_id = ip.get("subnet_id")
net_id = ip.get("network_id")
show_provider_subnet_ids = CONF.QUARK.show_provider_subnet_ids
if STRATEGY.is_provider_network(net_id) and show_provider_subnet_ids:
subnet_id = STRATEGY.get_provider_subnet_id(
net_id, ip["version"])
ip_addr = {"subnet_id": subnet_id,
"ip_address": ip.formatted(),
"enabled": enabled}
if fields and "port_subnets" in fields:
ip_addr["subnet"] = _make_subnet_dict(ip["subnet"])
return ip_addr
def _make_port_for_ip_dict(ip, port, fields=None):
res = _ip_port_dict(ip, port)
return res
def _ip_is_fixed(port, ip):
at = ip.get('address_type')
return (not at or at in (ip_types.FIXED, ip_types.SHARED))
def _make_port_dict(port, fields=None):
res = _port_dict(port)
ips = []
for assoc in port.associations:
ips.append(assoc.ip_address)
res["fixed_ips"] = [_make_port_address_dict(ip, port, fields)
for ip in ips if _ip_is_fixed(port, ip)]
return res
def _make_ip_ports_list(ip, query, fields=None):
ports = []
for port in query:
port_dict = _ip_port_dict(ip, port, fields)
ports.append(port_dict)
return ports
def _make_ports_list(query, fields=None):
ports = []
for port in query:
port_dict = _port_dict(port, fields)
port_dict["fixed_ips"] = [_make_port_address_dict(ip, port, fields)
for ip in port.ip_addresses if
_ip_is_fixed(port, ip)]
ports.append(port_dict)
return ports
def _make_subnets_list(query, fields=None):
subnets = []
for subnet in query:
subnets.append(_make_subnet_dict(subnet, fields=fields))
return subnets
def _make_mac_range_dict(mac_range):
return {"id": mac_range["id"],
"cidr": mac_range["cidr"]}
def _make_segment_allocation_range_dict(sa_range, allocations=None):
size = len(xrange(sa_range["first_id"], sa_range["last_id"] + 1))
sa_dict = {
"id": sa_range["id"],
"segment_id": sa_range["segment_id"],
"segment_type": sa_range["segment_type"],
"first_id": sa_range["first_id"],
"last_id": sa_range["last_id"],
"do_not_use": sa_range["do_not_use"],
"size": size}
if allocations is not None:
sa_dict["free_ids"] = sa_dict["size"] - allocations
return sa_dict
def _make_route_dict(route):
return {"id": route["id"],
"cidr": route["cidr"],
"gateway": route["gateway"],
"subnet_id": route["subnet_id"]}
def _make_ip_dict(address):
return {"id": address["id"],
"network_id": address["network_id"],
"ip_address": address.formatted(),
"address": address.formatted(),
"port_ids": [assoc.port_id
for assoc in address["associations"]],
"subnet_id": address["subnet_id"],
"tenant_id": address["used_by_tenant_id"],
"version": address["version"],
"type": address['address_type']}
def _make_ip_policy_dict(ipp):
return {"id": ipp["id"],
"tenant_id": ipp["tenant_id"],
"name": ipp["name"],
"subnet_ids": [s["id"] for s in ipp["subnets"]],
"network_ids": [n["id"] for n in ipp["networks"]],
"exclude": [ippc["cidr"] for ippc in ipp["exclude"]]}
def _make_floating_ip_dict(flip, port_id=None):
if not port_id:
ports = flip.ports
port_id = None
if ports and len(ports) > 0:
port_id = None if not ports[0] else ports[0].id
fixed_ip = flip.fixed_ips[0] if flip.fixed_ips else None
return {"id": flip.get("id"),
"floating_network_id": flip.get("network_id"),
"router_id": CONF.QUARK.floating_ip_router_id,
"fixed_ip_address": None if not fixed_ip else fixed_ip.formatted(),
"floating_ip_address": flip.formatted(),
"tenant_id": flip.get("used_by_tenant_id"),
"status": "RESERVED" if not port_id else "ASSOCIATED",
"port_id": port_id}
def _make_scaling_ip_dict(flip):
# Can an IPAddress.fixed_ip have more than one port associated with it?
ports = []
for fixed_ip in flip.fixed_ips:
if fixed_ip.ports:
ports.append({"port_id": fixed_ip.ports[0].id,
"fixed_ip_address": fixed_ip.address_readable})
return {"id": flip.get("id"),
"scaling_ip_address": None if not flip else flip.formatted(),
"scaling_network_id": flip.get("network_id"),
"tenant_id": flip.get("used_by_tenant_id"),
"status": flip.get("status"),
"ports": ports}
def _make_job_dict(job):
return {"id": job.get('id'),
"action": job.get('action'),
"completed": job.get('completed'),
"tenant_id": job.get('tenant_id'),
"created_at": job.get('created_at')}
|
|
# import the necessary packages
from __future__ import print_function
from google.protobuf import text_format
from cStringIO import StringIO
from PIL import Image
import scipy.ndimage as nd
import numpy as np
import caffe
import os
class BatCountry:
def __init__(self, base_path, deploy_path, model_path,
patch_model="./tmp.prototxt", mean=(104.0, 116.0, 122.0),
channels=(2, 1, 0)):
# if the deploy path is None, set the default
if deploy_path is None:
deploy_path = base_path + "/deploy.prototxt"
# if the model path is None, set it to the default GoogleLeNet model
if model_path is None:
model_path = base_path + "/imagenet.caffemodel"
# check to see if the model should be patched to compute gradients
if patch_model:
model = caffe.io.caffe_pb2.NetParameter()
text_format.Merge(open(deploy_path).read(), model)
model.force_backward = True
f = open(patch_model, "w")
f.write(str(model))
f.close()
# load the network and store the patched model path
self.net = caffe.Classifier(patch_model, model_path, mean=np.float32(mean),
channel_swap=channels)
self.patch_model = patch_model
def dream(self, image, iter_n, octave_n, octave_scale=None,
end="inception_4c/output", clip=True, step_fn=None, objective_fn=None,
preprocess_fn=None, deprocess_fn=None, verbose=True, visualize=False,
**step_params):
if iter_n is None:
iter_n = 10
if octave_n is None:
octave_n = 4
if octave_scale is None:
octave_scale = 1.4
# if a step function has not been supplied, initialize it as the
# standard gradient ascent step
if step_fn is None:
step_fn = BatCountry.gradient_ascent_step
# if the objective function has not been supplied, initialize it
# as the L2 objective
if objective_fn is None:
objective_fn = BatCountry.L2_objective
# if the preprocess function has not been supplied, initialize it
if preprocess_fn is None:
preprocess_fn = BatCountry.preprocess
# if the deprocess function has not been supplied, initialize it
if deprocess_fn is None:
deprocess_fn = BatCountry.deprocess
# initialize the visualization list
visualizations = []
# prepare base image_dims for all octaves
octaves = [preprocess_fn(self.net, image)]
for i in xrange(octave_n - 1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale,
1.0 / octave_scale), order=1))
# allocate image for network-produced details
detail = np.zeros_like(octaves[-1])
src = self.net.blobs["data"]
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0 * h/ h1, 1.0 * w / w1), order=1)
# resize the network's input image size
src.reshape(1, 3, h, w)
src.data[0] = octave_base + detail
for i in xrange(iter_n):
step_fn(self.net, end=end, clip=clip, objective_fn=objective_fn,
**step_params)
# visualization
vis = deprocess_fn(self.net, src.data[0])
# adjust image contrast if clipping is disabled
if not clip:
vis = vis * (255.0 / np.percentile(vis, 99.98))
if verbose:
print("octave={}, iter={}, layer={}, image_dim={}".format(octave,
i, end, vis.shape))
# check to see if the visualization list should be
# updated
if visualize:
k = "octave_{}-iter_{}-layer_{}".format(octave, i,
end.replace("/", "_"))
visualizations.append((k, vis))
# extract details produced on the current octave
detail = src.data[0] - octave_base
# grab the resulting image
r = deprocess_fn(self.net, src.data[0])
# check to see if the visualizations should be included
if visualize:
r = (r, visualizations)
return r
@staticmethod
def gradient_ascent_step(net, step_size=1.5, end="inception_4c/output",
jitter=32, clip=True, objective_fn=None, **objective_params):
# if the objective function is None, initialize it as
# the standard L2 objective
if objective_fn is None:
objective_fn = BatCountry.L2_objective
# input image is stored in Net's 'data' blob
src = net.blobs["data"]
dst = net.blobs[end]
# apply jitter shift
ox, oy = np.random.randint(-jitter, jitter + 1, 2)
src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2)
net.forward(end=end)
objective_fn(dst, **objective_params)
net.backward(start=end)
g = src.diff[0]
# apply normalized ascent step to the input image
src.data[:] += step_size / np.abs(g).mean() * g
# unshift image
src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2)
# unshift image
if clip:
bias = net.transformer.mean["data"]
src.data[:] = np.clip(src.data, -bias, 255 - bias)
def layers(self):
# return the layers of the network
return self.net._layer_names
def cleanup(self):
# remove the patched model from disk
os.remove(self.patch_model)
def prepare_guide(self, image, end="inception_4c/output", maxW=224, maxH=224,
preprocess_fn=None):
# if the preprocess function has not been supplied, initialize it
if preprocess_fn is None:
preprocess_fn = BatCountry.preprocess
# grab dimensions of input image
(w, h) = image.size
# GoogLeNet was trained on images with maximum width and heights
# of 224 pixels -- if either dimension is larger than 224 pixels,
# then we'll need to do some resizing
nW, nH = 244, 244
if w != 244 or h != 244:
image = np.float32(image.resize((nW, nH), Image.BILINEAR))
(src, dst) = (self.net.blobs["data"], self.net.blobs[end])
src.reshape(1, 3, nH, nW)
src.data[0] = preprocess_fn(self.net, image)
self.net.forward(end=end)
guide_features = dst.data[0].copy()
return guide_features
@staticmethod
def L2_objective(dst):
dst.diff[:] = dst.data
@staticmethod
def guided_objective(dst, objective_features):
x = dst.data[0].copy()
y = objective_features
ch = x.shape[0]
x = x.reshape(ch,-1)
y = y.reshape(ch,-1)
# compute the matrix of dot-products with guide features
A = x.T.dot(y)
# select ones that match best
dst.diff[0].reshape(ch, -1)[:] = y[:,A.argmax(1)]
@staticmethod
def preprocess(net, img):
return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean["data"]
@staticmethod
def deprocess(net, img):
return np.dstack((img + net.transformer.mean["data"])[::-1])
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.endpoint_service import pagers
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import endpoint
from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint
from google.cloud.aiplatform_v1.types import endpoint_service
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport
from .client import EndpointServiceClient
class EndpointServiceAsyncClient:
"""A service for managing Vertex AI's Endpoints."""
_client: EndpointServiceClient
DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT
endpoint_path = staticmethod(EndpointServiceClient.endpoint_path)
parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path)
model_path = staticmethod(EndpointServiceClient.model_path)
parse_model_path = staticmethod(EndpointServiceClient.parse_model_path)
model_deployment_monitoring_job_path = staticmethod(
EndpointServiceClient.model_deployment_monitoring_job_path
)
parse_model_deployment_monitoring_job_path = staticmethod(
EndpointServiceClient.parse_model_deployment_monitoring_job_path
)
network_path = staticmethod(EndpointServiceClient.network_path)
parse_network_path = staticmethod(EndpointServiceClient.parse_network_path)
common_billing_account_path = staticmethod(
EndpointServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
EndpointServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(EndpointServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
EndpointServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
EndpointServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
EndpointServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(EndpointServiceClient.common_project_path)
parse_common_project_path = staticmethod(
EndpointServiceClient.parse_common_project_path
)
common_location_path = staticmethod(EndpointServiceClient.common_location_path)
parse_common_location_path = staticmethod(
EndpointServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EndpointServiceAsyncClient: The constructed client.
"""
return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EndpointServiceAsyncClient: The constructed client.
"""
return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return EndpointServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> EndpointServiceTransport:
"""Returns the transport used by the client instance.
Returns:
EndpointServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(EndpointServiceClient).get_transport_class, type(EndpointServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, EndpointServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the endpoint service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.EndpointServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = EndpointServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_endpoint(
self,
request: Union[endpoint_service.CreateEndpointRequest, dict] = None,
*,
parent: str = None,
endpoint: gca_endpoint.Endpoint = None,
endpoint_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates an Endpoint.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_endpoint():
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
endpoint = aiplatform_v1.Endpoint()
endpoint.display_name = "display_name_value"
request = aiplatform_v1.CreateEndpointRequest(
parent="parent_value",
endpoint=endpoint,
)
# Make the request
operation = client.create_endpoint(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateEndpointRequest, dict]):
The request object. Request message for
[EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint].
parent (:class:`str`):
Required. The resource name of the Location to create
the Endpoint in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`):
Required. The Endpoint to create.
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
endpoint_id (:class:`str`):
Immutable. The ID to use for endpoint, which will become
the final component of the endpoint resource name. If
not provided, Vertex AI will generate a value for this
ID.
This value should be 1-10 characters, and valid
characters are /[0-9]/. When using HTTP/JSON, this field
is populated based on a query string argument, such as
``?endpoint_id=12345``. This is the fallback for fields
that are not included in either the URI or the body.
This corresponds to the ``endpoint_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain
predictions and explanations.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, endpoint, endpoint_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.CreateEndpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if endpoint is not None:
request.endpoint = endpoint
if endpoint_id is not None:
request.endpoint_id = endpoint_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_endpoint,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_endpoint.Endpoint,
metadata_type=endpoint_service.CreateEndpointOperationMetadata,
)
# Done; return the response.
return response
async def get_endpoint(
self,
request: Union[endpoint_service.GetEndpointRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> endpoint.Endpoint:
r"""Gets an Endpoint.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_endpoint():
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetEndpointRequest(
name="name_value",
)
# Make the request
response = client.get_endpoint(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetEndpointRequest, dict]):
The request object. Request message for
[EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint]
name (:class:`str`):
Required. The name of the Endpoint resource. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Endpoint:
Models are deployed into it, and
afterwards Endpoint is called to obtain
predictions and explanations.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.GetEndpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_endpoint,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_endpoints(
self,
request: Union[endpoint_service.ListEndpointsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEndpointsAsyncPager:
r"""Lists Endpoints in a Location.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_endpoints():
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListEndpointsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_endpoints(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListEndpointsRequest, dict]):
The request object. Request message for
[EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints].
parent (:class:`str`):
Required. The resource name of the Location from which
to list the Endpoints. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsAsyncPager:
Response message for
[EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.ListEndpointsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_endpoints,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListEndpointsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_endpoint(
self,
request: Union[endpoint_service.UpdateEndpointRequest, dict] = None,
*,
endpoint: gca_endpoint.Endpoint = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_endpoint.Endpoint:
r"""Updates an Endpoint.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_update_endpoint():
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
endpoint = aiplatform_v1.Endpoint()
endpoint.display_name = "display_name_value"
request = aiplatform_v1.UpdateEndpointRequest(
endpoint=endpoint,
)
# Make the request
response = client.update_endpoint(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateEndpointRequest, dict]):
The request object. Request message for
[EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint].
endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`):
Required. The Endpoint which replaces
the resource on the server.
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. The update mask applies to the resource. See
[google.protobuf.FieldMask][google.protobuf.FieldMask].
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Endpoint:
Models are deployed into it, and
afterwards Endpoint is called to obtain
predictions and explanations.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([endpoint, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.UpdateEndpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if endpoint is not None:
request.endpoint = endpoint
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_endpoint,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("endpoint.name", request.endpoint.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_endpoint(
self,
request: Union[endpoint_service.DeleteEndpointRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes an Endpoint.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_delete_endpoint():
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteEndpointRequest(
name="name_value",
)
# Make the request
operation = client.delete_endpoint(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteEndpointRequest, dict]):
The request object. Request message for
[EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint].
name (:class:`str`):
Required. The name of the Endpoint resource to be
deleted. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.DeleteEndpointRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_endpoint,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
async def deploy_model(
self,
request: Union[endpoint_service.DeployModelRequest, dict] = None,
*,
endpoint: str = None,
deployed_model: gca_endpoint.DeployedModel = None,
traffic_split: Sequence[
endpoint_service.DeployModelRequest.TrafficSplitEntry
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deploys a Model into this Endpoint, creating a
DeployedModel within it.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_deploy_model():
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
deployed_model = aiplatform_v1.DeployedModel()
deployed_model.dedicated_resources.min_replica_count = 1803
deployed_model.model = "model_value"
request = aiplatform_v1.DeployModelRequest(
endpoint="endpoint_value",
deployed_model=deployed_model,
)
# Make the request
operation = client.deploy_model(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeployModelRequest, dict]):
The request object. Request message for
[EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel].
endpoint (:class:`str`):
Required. The name of the Endpoint resource into which
to deploy a Model. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
deployed_model (:class:`google.cloud.aiplatform_v1.types.DeployedModel`):
Required. The DeployedModel to be created within the
Endpoint. Note that
[Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split]
must be updated for the DeployedModel to start receiving
traffic, either as part of this call, or via
[EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint].
This corresponds to the ``deployed_model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]`):
A map from a DeployedModel's ID to the percentage of
this Endpoint's traffic that should be forwarded to that
DeployedModel.
If this field is non-empty, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split]
will be overwritten with it. To refer to the ID of the
just being deployed Model, a "0" should be used, and the
actual ID of the new DeployedModel will be filled in its
place by this method. The traffic percentage values must
add up to 100.
If this field is empty, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split]
is not updated.
This corresponds to the ``traffic_split`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.DeployModelResponse`
Response message for
[EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([endpoint, deployed_model, traffic_split])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.DeployModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if endpoint is not None:
request.endpoint = endpoint
if deployed_model is not None:
request.deployed_model = deployed_model
if traffic_split:
request.traffic_split.update(traffic_split)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.deploy_model,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
endpoint_service.DeployModelResponse,
metadata_type=endpoint_service.DeployModelOperationMetadata,
)
# Done; return the response.
return response
async def undeploy_model(
self,
request: Union[endpoint_service.UndeployModelRequest, dict] = None,
*,
endpoint: str = None,
deployed_model_id: str = None,
traffic_split: Sequence[
endpoint_service.UndeployModelRequest.TrafficSplitEntry
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Undeploys a Model from an Endpoint, removing a
DeployedModel from it, and freeing all resources it's
using.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_undeploy_model():
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.UndeployModelRequest(
endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
)
# Make the request
operation = client.undeploy_model(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UndeployModelRequest, dict]):
The request object. Request message for
[EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel].
endpoint (:class:`str`):
Required. The name of the Endpoint resource from which
to undeploy a Model. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
deployed_model_id (:class:`str`):
Required. The ID of the DeployedModel
to be undeployed from the Endpoint.
This corresponds to the ``deployed_model_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]`):
If this field is provided, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split]
will be overwritten with it. If last DeployedModel is
being undeployed from the Endpoint, the
[Endpoint.traffic_split] will always end up empty when
this call returns. A DeployedModel will be successfully
undeployed only if it doesn't have any traffic assigned
to it when this method executes, or if this field
unassigns any traffic to it.
This corresponds to the ``traffic_split`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.UndeployModelResponse`
Response message for
[EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([endpoint, deployed_model_id, traffic_split])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = endpoint_service.UndeployModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if endpoint is not None:
request.endpoint = endpoint
if deployed_model_id is not None:
request.deployed_model_id = deployed_model_id
if traffic_split:
request.traffic_split.update(traffic_split)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.undeploy_model,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
endpoint_service.UndeployModelResponse,
metadata_type=endpoint_service.UndeployModelOperationMetadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("EndpointServiceAsyncClient",)
|
|
##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
General utility client code for interfacing with DB-API 2.0 modules.
"""
from twext.enterprise.util import mapOracleOutputType
from twext.python.filepath import CachingFilePath
from txdav.common.icommondatastore import InternalDataStoreError
import datetime
import pg8000 as postgres
import six
try:
import os
# In order to encode and decode values going to and from the database,
# cx_Oracle depends on Oracle's NLS support, which in turn relies upon
# libclntsh's reading of environment variables. It doesn't matter what the
# database language is; the database may contain iCalendar data in many
# languages, but we MUST set NLS_LANG to a value that includes an encoding
# (character set?) that includes all of Unicode, so that the connection can
# encode and decode any valid unicode data. This is not to encode and
# decode bytes, but rather, to faithfully relay Python unicode strings to
# the database. The default connection encoding is US-ASCII, which is
# definitely no good. NLS_LANG needs to be set before the first call to
# connect(), not actually before the module gets imported, but this is as
# good a place as any. I am explicitly setting this rather than inheriting
# it, because it's not a configuration value in the sense that multiple
# values may possibly be correct; _only_ UTF-8 is ever correct to work with
# our software, and other values will fail CalDAVTester. (The state is,
# however, process-global; after the first call to connect(), all
# subsequent connections inherit this encoding even if the environment
# variable changes.) -glyph
os.environ['NLS_LANG'] = '.AL32UTF8'
import cx_Oracle
except ImportError:
cx_Oracle = None
class DiagnosticCursorWrapper(object):
"""
Diagnostic wrapper around a DB-API 2.0 cursor for debugging connection
status.
"""
def __init__(self, realCursor, connectionWrapper):
self.realCursor = realCursor
self.connectionWrapper = connectionWrapper
@property
def rowcount(self):
return self.realCursor.rowcount
@property
def description(self):
return self.realCursor.description
def execute(self, sql, args=()):
self.connectionWrapper.state = 'executing %r' % (sql,)
# Use log.debug
# sys.stdout.write(
# "Really executing SQL %r in thread %r\n" %
# ((sql % tuple(args)), thread.get_ident())
# )
self.realCursor.execute(sql, args)
def close(self):
self.realCursor.close()
def fetchall(self):
results = self.realCursor.fetchall()
# Use log.debug
# sys.stdout.write(
# "Really fetching results %r thread %r\n" %
# (results, thread.get_ident())
# )
return results
class OracleCursorWrapper(DiagnosticCursorWrapper):
"""
Wrapper for cx_Oracle DB-API connections which implements fetchall() to read
all CLOB objects into strings.
"""
def fetchall(self):
accum = []
for row in self.realCursor:
newRow = []
for column in row:
newRow.append(mapOracleOutputType(column))
accum.append(newRow)
return accum
def var(self, *args):
"""
Create a cx_Oracle variable bound to this cursor. (Forwarded in
addition to the standard methods so that implementors of
L{IDerivedParameter} do not need to be specifically aware of this
layer.)
"""
return self.realCursor.var(*args)
def mapArgs(self, args):
realArgs = []
for arg in args:
if isinstance(arg, str):
# We use NCLOB everywhere, so cx_Oracle requires a unicode-type
# input. But we mostly pass around utf-8 encoded bytes at the
# application layer as they consume less memory, so do the
# conversion here.
arg = arg.decode('utf-8')
if isinstance(arg, unicode) and len(arg) > 1024:
# This *may* cause a type mismatch, but none of the non-CLOB
# strings that we're passing would allow a value this large
# anyway. Smaller strings will be automatically converted by
# the bindings; larger ones will generate an error. I'm not
# sure why cx_Oracle itself doesn't just do the following hack
# automatically and internally for larger values too, but, here
# it is:
v = self.var(cx_Oracle.NCLOB, len(arg) + 1)
v.setvalue(0, arg)
elif isinstance(arg, datetime.datetime):
# By default when cx_Oracle is passed a datetime object it maps it to a
# cx_Oracle.DATETIME variable which does not serialize fraction seconds
# into the query, or call, arguments. However, for high volume systems,
# we really want sub-second resolution for things like the job queue,
# so we want to serialize datetime as cx_Oracle.TIMESTAMP.
v = self.var(cx_Oracle.TIMESTAMP)
v.setvalue(0, arg)
else:
v = arg
realArgs.append(v)
return realArgs
def execute(self, sql, args=()):
realArgs = self.mapArgs(args)
return super(OracleCursorWrapper, self).execute(sql, realArgs)
def callproc(self, name, args=()):
realArgs = self.mapArgs(args)
return self.realCursor.callproc(name, realArgs)
def callfunc(self, name, returnType, args=()):
realArgs = self.mapArgs(args)
return self.realCursor.callfunc(name, returnType, realArgs)
class DiagnosticConnectionWrapper(object):
"""
Diagnostic wrapper around a DB-API 2.0 connection for debugging connection
status.
"""
wrapper = DiagnosticCursorWrapper
def __init__(self, realConnection, label):
self.realConnection = realConnection
self.label = label
self.state = 'idle (start)'
def cursor(self):
return self.wrapper(self.realConnection.cursor(), self)
def close(self):
self.realConnection.close()
self.state = 'closed'
def commit(self):
self.realConnection.commit()
self.state = 'idle (after commit)'
def rollback(self):
self.realConnection.rollback()
self.state = 'idle (after rollback)'
class DBAPIParameters(object):
"""
Object that holds the parameters needed to configure a DBAPIConnector. Since this varies based on
the actual DB module in use, this class abstracts the parameters into separate properties that
are then used to create the actual parameters for each module.
"""
def __init__(self, endpoint=None, user=None, password=None, database=None, ssl=False, **kwargs):
"""
@param endpoint: endpoint string describing the connection
@type endpoint: L{str}
@param user: user name to connect as
@type user: L{str}
@param password: password to use
@type password: L{str}
@param database: database name to connect to
@type database: L{str}
"""
self.endpoint = endpoint
if self.endpoint.startswith("unix:"):
self.unixsocket = self.endpoint[5:]
if ":" in self.unixsocket:
self.unixsocket, self.port = self.unixsocket.split(":")
else:
self.port = None
self.host = None
elif self.endpoint.startswith("tcp:"):
self.unixsocket = None
self.host = self.endpoint[4:]
if ":" in self.host:
self.host, self.port = self.host.split(":")
else:
self.port = None
self.user = user
self.password = password
self.database = database
self.ssl = ssl
class DBAPIConnector(object):
"""
A simple wrapper for DB-API connectors.
@ivar dbModule: the DB-API module to use.
"""
wrapper = DiagnosticConnectionWrapper
def __init__(self, dbModule, preflight, *connectArgs, **connectKw):
self.dbModule = dbModule
self.connectArgs = connectArgs
self.connectKw = connectKw
self.preflight = preflight
def connect(self, label="<unlabeled>"):
connection = self.dbModule.connect(*self.connectArgs, **self.connectKw)
w = self.wrapper(connection, label)
self.preflight(w, **self.connectKw)
return w
@staticmethod
def connectorFor(dbtype, **kwargs):
if dbtype == "postgres":
return DBAPIConnector._connectorFor_module(postgres, **kwargs)
elif dbtype == "oracle":
return DBAPIConnector._connectorFor_module(cx_Oracle, **kwargs)
else:
raise InternalDataStoreError(
"Unknown database type: {}".format(dbtype)
)
@staticmethod
def _connectorFor_module(dbmodule, **kwargs):
m = getattr(DBAPIConnector, "_connectorFor_{}".format(dbmodule.__name__), None)
if m is None:
raise InternalDataStoreError(
"Unknown DBAPI module: {}".format(dbmodule)
)
return m(dbmodule, **kwargs)
@staticmethod
def _connectorFor_pgdb(dbmodule, **kwargs):
"""
Turn properties into pgdb kwargs
"""
params = DBAPIParameters(**kwargs)
dsn = "{0.host}:dbname={0.database}:{0.user}:{0.password}::".format(params)
dbkwargs = {}
if params.port:
dbkwargs["host"] = "{}:{}".format(params.host, params.port)
if "txnTimeoutSeconds" in kwargs:
dbkwargs["txnTimeoutSeconds"] = kwargs["txnTimeoutSeconds"]
return DBAPIConnector(postgres, postgresPreflight, dsn, **dbkwargs)
@staticmethod
def _connectorFor_pg8000(dbmodule, **kwargs):
"""
Turn properties into pg8000 kwargs
"""
params = DBAPIParameters(**kwargs)
dbkwargs = {
"user": params.user,
"password": params.password,
"database": params.database,
}
if params.ssl:
dbkwargs["ssl"] = params.ssl
if params.unixsocket:
dbkwargs["unix_sock"] = params.unixsocket
# We're using a socket file
socketFP = CachingFilePath(dbkwargs["unix_sock"])
if socketFP.isdir():
# We have been given the directory, not the actual socket file
socketFP = socketFP.child(".s.PGSQL.{}".format(params.port if params.port else "5432"))
dbkwargs["unix_sock"] = socketFP.path
if not socketFP.isSocket():
raise InternalDataStoreError(
"No such socket file: {}".format(socketFP.path)
)
else:
dbkwargs["host"] = params.host
if params.port:
dbkwargs["port"] = int(params.port)
if "txnTimeoutSeconds" in kwargs:
dbkwargs["txnTimeoutSeconds"] = kwargs["txnTimeoutSeconds"]
return DBAPIConnector(dbmodule, pg8000Preflight, **dbkwargs)
@staticmethod
def _connectorFor_cx_Oracle(self, **kwargs):
"""
Turn properties into DSN string
"""
dsn = "{0.user}/{0.password}@{0.host}:{0.port}/{0.database}".format(DBAPIParameters(**kwargs))
return OracleConnector(dsn)
class OracleConnectionWrapper(DiagnosticConnectionWrapper):
wrapper = OracleCursorWrapper
class OracleConnector(DBAPIConnector):
"""
A connector for cx_Oracle connections, with some special-cased behavior to
make it work more like other DB-API bindings.
Note: this is currently necessary to make our usage of twext.enterprise.dal
work with cx_Oracle, and should be factored somewhere higher-level.
"""
wrapper = OracleConnectionWrapper
def __init__(self, dsn):
super(OracleConnector, self).__init__(
cx_Oracle, oraclePreflight, dsn, threaded=True)
def oraclePreflight(connection, **kwargs):
"""
Pre-flight function for Oracle connections: set the timestamp format to be
something closely resembling our default assumption from Postgres.
"""
c = connection.cursor()
c.execute(
"alter session set NLS_TIMESTAMP_FORMAT = "
"'YYYY-MM-DD HH24:MI:SS.FF'"
)
c.execute(
"alter session set NLS_TIMESTAMP_TZ_FORMAT = "
"'YYYY-MM-DD HH:MI:SS.FF+TZH:TZM'"
)
connection.commit()
c.close()
def postgresPreflight(connection, **kwargs):
"""
Pre-flight function for PostgreSQL connections: enable standard conforming
strings, and set a non-infinite statement timeout.
"""
c = connection.cursor()
# Turn on standard conforming strings. This option is _required_ if
# you want to get correct behavior out of parameter-passing with the
# pgdb module. If it is not set then the server is potentially
# vulnerable to certain types of SQL injection.
c.execute("set standard_conforming_strings=on")
# Abort any statement that takes more than 30 seconds (default) to
# execute. This is necessary as a temporary workaround since it's
# hypothetically possible that different database operations could
# block each other, while executing SQL in the same process (in the
# same thread, since SQL executes in the main thread now). It's
# preferable to see some exceptions while we're in this state than to
# have the entire worker process hang.
c.execute("set statement_timeout={}".format(kwargs.get("txnTimeoutSeconds", 30) * 1000))
# pgdb (as per DB-API 2.0) automatically puts the connection into a
# 'executing a transaction' state when _any_ statement is executed on
# it (even these not-touching-any-data statements); make sure to commit
# first so that the application sees a fresh transaction, and the
# connection can safely be pooled without executing anything on it.
connection.commit()
c.close()
def pg8000Preflight(connection, **kwargs):
"""
Pre-flight function for pg8000/PostgreSQL connections: setup type mappings
in addition to the normal postgres preflight.
"""
# Do the base PostgreSQL preflight
postgresPreflight(connection, **kwargs)
# Patch pg8000 behavior to match what we need wrt text processing
def my_text_out(v):
return v.encode("utf-8") if isinstance(v, unicode) else str(v)
connection.realConnection.py_types[str] = (705, postgres.core.FC_TEXT, my_text_out)
connection.realConnection.py_types[six.text_type] = (705, postgres.core.FC_TEXT, my_text_out)
def my_text_recv(data, offset, length):
return str(data[offset: offset + length])
connection.realConnection.default_factory = lambda: (postgres.core.FC_TEXT, my_text_recv)
connection.realConnection.pg_types[19] = (postgres.core.FC_BINARY, my_text_recv)
connection.realConnection.pg_types[25] = (postgres.core.FC_BINARY, my_text_recv)
connection.realConnection.pg_types[705] = (postgres.core.FC_BINARY, my_text_recv)
connection.realConnection.pg_types[829] = (postgres.core.FC_TEXT, my_text_recv)
connection.realConnection.pg_types[1042] = (postgres.core.FC_BINARY, my_text_recv)
connection.realConnection.pg_types[1043] = (postgres.core.FC_BINARY, my_text_recv)
connection.realConnection.pg_types[2275] = (postgres.core.FC_BINARY, my_text_recv)
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as n
import numpy.random as nr
import random as r
from python_util.util import *
from python_util.data import *
from python_util.options import *
from python_util.gpumodel import *
import sys
import math as m
import layer as lay
from convdata import ImageDataProvider, CIFARDataProvider, DummyConvNetLogRegDataProvider
from os import linesep as NL
import copy as cp
import os
from python_util.convEMdata import EMDataProvider
class Driver(object):
def __init__(self, convnet):
self.convnet = convnet
def on_start_batch(self, batch_data, train):
pass
def on_finish_batch(self):
pass
class GradCheckDriver(Driver):
def on_start_batch(self, batch_data, train):
data = batch_data[2]
self.convnet.libmodel.checkGradients(data)
class TrainingDriver(Driver):
def on_start_batch(self, batch_data, train):
data = batch_data[2]
self.convnet.libmodel.startBatch(data, self.convnet.get_progress(), not train)
class MultiviewTestDriver(TrainingDriver):
def on_start_batch(self, batch_data, train):
self.write_output = False
if train:
TrainingDriver.on_start_batch(self, batch_data, train)
else:
data = batch_data[2]
num_views = self.convnet.test_data_provider.num_views
if self.convnet.test_out != "" and self.convnet.logreg_name != "":
self.write_output = True
self.test_file_name = os.path.join(self.convnet.test_out, 'test_preds_%d' % batch_data[1])
self.probs = n.zeros((data[0].shape[1]/num_views, self.convnet.test_data_provider.get_num_classes()), dtype=n.single)
self.convnet.libmodel.startMultiviewTest(data, num_views, self.probs, self.convnet.logreg_name)
else:
self.convnet.libmodel.startMultiviewTest(data, num_views)
def on_finish_batch(self):
if self.write_output:
if not os.path.exists(self.convnet.test_out):
os.makedirs(self.convnet.test_out)
pickle(self.test_file_name, {'data': self.probs,
'note': 'generated from %s' % self.convnet.save_file})
class FeatureWriterDriver(Driver):
def __init__(self, convnet):
Driver.__init__(self, convnet)
self.last_batch = convnet.test_batch_range[-1]
def on_start_batch(self, batch_data, train):
if train:
raise ModelStateException("FeatureWriter must be used in conjunction with --test-only=1. It writes test data features.")
self.batchnum, self.data = batch_data[1], batch_data[2]
if not os.path.exists(self.convnet.feature_path):
os.makedirs(self.convnet.feature_path)
self.num_ftrs = self.convnet.layers[self.convnet.write_features]['outputs']
self.ftrs = n.zeros((self.data[0].shape[1], self.num_ftrs), dtype=n.single)
self.convnet.libmodel.startFeatureWriter(self.data, [self.ftrs], [self.convnet.write_features])
def on_finish_batch(self):
if not self.convnet.numpy_dump:
path_out = os.path.join(self.convnet.feature_path, 'data_batch_%d' % self.batchnum)
pickle(path_out, {'data': self.ftrs, 'labels': self.data[1]})
print "Wrote feature file %s" % path_out
else:
#path_out = os.path.join(self.convnet.feature_path, 'data_batch_%d' % self.batchnum)
#n.savez_compressed(path_out, data=self.ftrs, labels=self.data[1])
#n.savez(path_out, data=self.ftrs, labels=self.data[1])
# xxx - workaround, in python 2.7 both pickle and zip object on which savez rely have a 32 bit max size
path_out = os.path.join(self.convnet.feature_path, 'data_batch_data_%d' % self.batchnum)
n.save(path_out, self.ftrs)
print "Wrote feature file %s" % path_out
path_out = os.path.join(self.convnet.feature_path, 'data_batch_lbls_%d' % self.batchnum)
n.save(path_out, self.data[1])
print "Wrote feature file %s" % path_out
path_out = os.path.join(self.convnet.feature_path, 'data_batch_%d' % self.batchnum)
n.save(path_out, self.data[0])
print "Wrote feature file %s" % path_out
if self.batchnum == self.last_batch:
#pickle(os.path.join(self.convnet.feature_path, 'batches.meta'), {'source_model':self.convnet.load_file,
# 'num_vis':self.num_ftrs,
# 'batch_size': self.convnet.test_data_provider.batch_meta['batch_size']})
pickle(os.path.join(self.convnet.feature_path, 'batches.meta'), {'source_model':self.convnet.load_file,
'num_vis':self.num_ftrs,
'batch_meta': self.convnet.test_data_provider.batch_meta})
self.convnet.test_data_provider.on_finish_featurebatch(self.convnet.feature_path, self.batchnum,
self.batchnum == self.last_batch)
class ConvNet(IGPUModel):
def __init__(self, op, load_dic, dp_params={}):
filename_options = []
for v in ('color_noise', 'multiview_test', 'inner_size', 'scalar_mean', 'minibatch_size', 'em_feature_path', 'init_load_path'):
#for v in ('color_noise', 'multiview_test', 'inner_size', 'scalar_mean', 'minibatch_size', 'em_feature_path'):
dp_params[v] = op.get_value(v)
IGPUModel.__init__(self, "ConvNet", op, load_dic, filename_options, dp_params=dp_params)
def import_model(self):
lib_name = "cudaconvnet._ConvNet"
print "========================="
print "Importing %s C++ module" % lib_name
self.libmodel = __import__(lib_name,fromlist=['_ConvNet'])
def init_model_lib(self):
self.libmodel.initModel(self.layers,
self.device_ids,
self.minibatch_size,
self.conserve_mem)
def init_model_state(self):
ms = self.model_state
layers = ms['layers'] if self.loaded_from_checkpoint else {}
ms['layers'] = lay.LayerParser.parse_layers(os.path.join(self.layer_path, self.layer_def),
os.path.join(self.layer_path, self.layer_params), self, layers=layers)
self.do_decouple_conv()
self.do_unshare_weights()
self.op.set_value('conv_to_local', [], parse=False)
self.op.set_value('unshare_weights', [], parse=False)
self.set_driver()
def do_decouple_conv(self):
# Convert convolutional layers to local
if len(self.op.get_value('conv_to_local')) > 0:
for lname in self.op.get_value('conv_to_local'):
if self.model_state['layers'][lname]['type'] == 'conv':
lay.LocalLayerParser.conv_to_local(self.model_state['layers'], lname)
def do_unshare_weights(self):
# Decouple weight matrices
if len(self.op.get_value('unshare_weights')) > 0:
for name_str in self.op.get_value('unshare_weights'):
if name_str:
name = lay.WeightLayerParser.get_layer_name(name_str)
if name is not None:
name, idx = name[0], name[1]
if name not in self.model_state['layers']:
raise ModelStateException("Layer '%s' does not exist; unable to unshare" % name)
layer = self.model_state['layers'][name]
lay.WeightLayerParser.unshare_weights(layer, self.model_state['layers'], matrix_idx=idx)
else:
raise ModelStateException("Invalid layer name '%s'; unable to unshare." % name_str)
def set_driver(self):
if self.op.get_value('check_grads'):
self.driver = GradCheckDriver(self)
elif self.op.get_value('multiview_test'):
self.driver = MultiviewTestDriver(self)
elif self.op.get_value('write_features'):
self.driver = FeatureWriterDriver(self)
else:
self.driver = TrainingDriver(self)
def fill_excused_options(self):
if self.op.get_value('check_grads'):
self.op.set_value('save_path', '')
self.op.set_value('train_batch_range', '0')
self.op.set_value('test_batch_range', '0')
self.op.set_value('data_path', '')
# Make sure the data provider returned data in proper format
def parse_batch_data(self, batch_data, train=True):
if max(d.dtype != n.single for d in batch_data[2]):
raise DataProviderException("All matrices returned by data provider must consist of single-precision floats.")
return batch_data
def start_batch(self, batch_data, train=True):
self.driver.on_start_batch(batch_data, train)
def finish_batch(self):
ret = IGPUModel.finish_batch(self)
self.driver.on_finish_batch()
return ret
def print_iteration(self):
print "%d.%d (%.2f%%)..." % (self.epoch, self.batchnum, 100 * self.get_progress()),
def print_train_time(self, compute_time_py):
print "(%.3f sec)" % (compute_time_py)
def print_costs(self, cost_outputs):
costs, num_cases = cost_outputs[0], cost_outputs[1]
children = set()
for errname in costs:
if sum(errname in self.layers[z]['children'] for z in costs) == 0:
# print self.layers[errname]['children']
for child in set(self.layers[errname]['children']) & set(costs.keys()):
costs[errname] = [v + u for v, u in zip(costs[errname], costs[child])]
children.add(child)
filtered_costs = eval(self.layers[errname]['outputFilter'])(costs[errname], num_cases)
print "%s: " % errname,
if 'outputFilterFormatter' not in self.layers[errname]:
print ", ".join("%.6f" % v for v in filtered_costs),
else:
print eval(self.layers[errname]['outputFilterFormatter'])(self,filtered_costs),
if m.isnan(filtered_costs[0]) or m.isinf(filtered_costs[0]):
print "<- error nan or inf!"
sys.exit(1)
for c in children:
del costs[c]
def print_train_results(self):
self.print_costs(self.train_outputs[-1])
def print_test_status(self):
pass
def print_test_results(self):
print NL + "======================Test output======================"
self.print_costs(self.test_outputs[-1])
if not self.test_only:
print NL + "----------------------Averages-------------------------"
self.print_costs(self.aggregate_test_outputs(self.test_outputs[-len(self.test_batch_range):]))
print NL + "-------------------------------------------------------",
for name,val in sorted(self.layers.items(), key=lambda x: x[1]['id']): # This is kind of hacky but will do for now.
l = self.layers[name]
if 'weights' in l:
wscales = [(l['name'], i, n.mean(n.abs(w)), n.mean(n.abs(wi))) for i,(w,wi) in enumerate(zip(l['weights'],l['weightsInc']))]
print ""
print NL.join("Layer '%s' weights[%d]: %e [%e] [%e]" % (s[0], s[1], s[2], s[3], s[3]/s[2] if s[2] > 0 else 0) for s in wscales),
print "%sLayer '%s' biases: %e [%e]" % (NL, l['name'], n.mean(n.abs(l['biases'])), n.mean(n.abs(l['biasesInc']))),
print ""
def conditional_save(self):
self.save_state()
def aggregate_test_outputs(self, test_outputs):
test_outputs = cp.deepcopy(test_outputs)
num_cases = sum(t[1] for t in test_outputs)
for i in xrange(1 ,len(test_outputs)):
for k,v in test_outputs[i][0].items():
for j in xrange(len(v)):
test_outputs[0][0][k][j] += test_outputs[i][0][k][j]
return (test_outputs[0][0], num_cases)
@classmethod
def get_options_parser(cls):
op = IGPUModel.get_options_parser()
op.add_option("mini", "minibatch_size", IntegerOptionParser, "Minibatch size", default=128)
op.add_option("layer-def", "layer_def", StringOptionParser, "Layer definition file", set_once=False)
op.add_option("layer-params", "layer_params", StringOptionParser, "Layer parameter file")
op.add_option("layer-path", "layer_path", StringOptionParser, "Layer file path prefix", default="")
op.add_option("check-grads", "check_grads", BooleanOptionParser, "Check gradients and quit?", default=0, excuses=['data_path','save_path', 'save_file_override', 'train_batch_range','test_batch_range'])
op.add_option("multiview-test", "multiview_test", BooleanOptionParser, "Cropped DP: test on multiple patches?", default=0)
op.add_option("inner-size", "inner_size", IntegerOptionParser, "Cropped DP: crop size (0 = don't crop)", default=0, set_once=True)
op.add_option("conv-to-local", "conv_to_local", ListOptionParser(StringOptionParser), "Convert given conv layers to unshared local", default=[])
op.add_option("unshare-weights", "unshare_weights", ListOptionParser(StringOptionParser), "Unshare weight matrices in given layers", default=[])
op.add_option("conserve-mem", "conserve_mem", BooleanOptionParser, "Conserve GPU memory (slower)?", default=0)
op.add_option("color-noise", "color_noise", FloatOptionParser, "Add PCA noise to color channels with given scale", default=0.0)
op.add_option("test-out", "test_out", StringOptionParser, "Output test case predictions to given path", default="", requires=['logreg_name', 'multiview_test'])
op.add_option("logreg-name", "logreg_name", StringOptionParser, "Logreg cost layer name (for --test-out)", default="")
op.add_option("scalar-mean", "scalar_mean", FloatOptionParser, "Subtract this scalar from image (-1 = don't)", default=-1)
op.add_option("write-features", "write_features", StringOptionParser, "Write test data features from given layer", default="", requires=['feature-path'])
op.add_option("feature-path", "feature_path", StringOptionParser, "Write test data features to this path (to be used with --write-features)", default="")
# options added just for EM data parser, some override value from EM .ini file
op.add_option("em-feature-path", "em_feature_path", StringOptionParser, "Write EM recon cubes to this path (to be used with --write-features)", default="")
op.add_option("init-load-path", "init_load_path", StringOptionParser, "Path where saved weights or other saved matrix values are stored", default="")
op.add_option("use-numpy-dump", "numpy_dump", BooleanOptionParser, "Save features in numpy format (to be used with --write-features)", default=0)
op.add_option("chunk-skip-list", "chunk_skip_list", ListOptionParser(IntegerOptionParser), "Skip these random EM chunks, usually for test, override .ini", default=[])
op.add_option("dim-ordering", "dim_ordering", StringOptionParser, "Which reslice ordering for EM provider, override .ini", default="")
op.delete_option('max_test_err')
op.options["testing_freq"].default = 57
op.options["num_epochs"].default = 50000
op.options['dp_type'].default = None
DataProvider.register_data_provider('dummy-lr-n', 'Dummy ConvNet logistic regression', DummyConvNetLogRegDataProvider)
DataProvider.register_data_provider('image', 'JPEG-encoded image data provider', ImageDataProvider)
DataProvider.register_data_provider('cifar', 'CIFAR-10 data provider', CIFARDataProvider)
DataProvider.register_data_provider('emdata', 'Electron Microscopy data provider', EMDataProvider)
return op
if __name__ == "__main__":
# nr.seed(6)
op = ConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ConvNet(op, load_dic)
model.start()
|
|
#!/usr/bin/env python
"""
Handler library for Linux IaaS
JSON def:
HandlerEnvironment.json
[{
"name": "ExampleHandlerLinux",
"seqNo": "seqNo",
"version": "1.0",
"handlerEnvironment": {
"logFolder": "<your log folder location>",
"configFolder": "<your config folder location>",
"statusFolder": "<your status folder location>",
"heartbeatFile": "<your heartbeat file location>",
}
}]
{
"handlerSettings":
{
"protectedSettings":
{
"Password": "UserPassword"
},
"publicSettings":
{
"UserName": "UserName",
"Expiration": "Password expiration date in yyy-mm-dd"
}
}
}
Example ./config/1.settings
"{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1","protectedSettings":
"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]}"
Example HeartBeat
{
"version": 1.0,
"heartbeat" : {
"status": "ready",
"code": 0,
"Message": "Sample Handler running. Waiting for a new configuration from user."
}
}
Status uses either non-localized 'message' or localized 'formattedMessage' but not both.
{
"version": 1.0,
"timestampUTC": "<current utc time>",
"status" : {
"name": "<Handler workload name>",
"operation": "<name of the operation being performed>",
"configurationAppliedTime": "<UTC time indicating when the configuration was last successfully applied>",
"status": "<transitioning | error | success | warning>",
"code": <Valid integer status code>,
"message": {
"id": "id of the localized resource",
"params": [
"MyParam0",
"MyParam1"
]
},
"formattedMessage": {
"lang": "Lang[-locale]",
"message": "formatted user message"
}
}
}
"""
import os
import sys
import imp
import base64
import json
import time
# waagent has no '.py' therefore create waagent module import manually.
waagent=imp.load_source('waagent','/usr/sbin/waagent')
def doParse(Log,operation):
handler_env=None
config=None
ctxt=None
code=0
# get the HandlerEnvironment.json. it should always be in ./
waagent.Log('cwd is ' + os.path.realpath(os.path.curdir))
handler_env_file='./HandlerEnvironment.json'
if not os.path.isfile(handler_env_file):
waagent.Error("Unable to locate " + handler_env_file)
sys.exit(1)
ctxt=waagent.GetFileContents(handler_env_file)
if ctxt == None :
waagent.Error("Unable to read " + handler_env_file)
try:
handler_env=json.loads(ctxt)
except:
pass
if handler_env == None :
waagent.Error("JSON error processing " + handler_env_file)
sys.exit(1)
if type(handler_env) == list:
handler_env = handler_env[0]
# parse the dirs
name='NULL'
seqNo='0'
version='0.0'
config_dir='./'
log_dir='./'
status_dir='./'
heartbeat_file='NULL.log'
name=handler_env['name']
seqNo=handler_env['seqNo']
version=str(handler_env['version'])
config_dir=handler_env['handlerEnvironment']['configFolder']
log_dir=handler_env['handlerEnvironment']['logFolder']
status_dir=handler_env['handlerEnvironment']['statusFolder']
heartbeat_file=handler_env['handlerEnvironment']['heartbeatFile']
# always get the newest settings file
code,settings_file=waagent.RunGetOutput('ls -rt ' + config_dir + '/*.settings | tail -1')
if code != 0:
waagent.Error("Unable to locate a .settings file!")
sys.exit(1)
settings_file=settings_file[:-1]
# get our incarnation # from the number of the .settings file
incarnation=os.path.splitext(os.path.basename(settings_file))[0]
waagent.Log('Incarnation is ' + incarnation)
status_file=status_dir+'/'+incarnation+'.status'
waagent.Log("setting file path is" + settings_file)
ctxt=None
ctxt=waagent.GetFileContents(settings_file)
if ctxt == None :
waagent.Error('Unable to read ' + settings_file + '. ')
doExit(name,seqNo,version,1,status_file,heartbeat_file,operation,'error','1', operation+' Failed', 'Read .settings', 'error', '1','Unable to read ' + settings_file + '. ','NotReady','1','Exiting')
waagent.Log("Read: " + ctxt)
# parse json
config = None
try:
config=json.loads(ctxt)
except:
waagent.Error('JSON exception decoding ' + ctxt)
if config == None:
waagent.Error("JSON error processing " + settings_file)
return (name,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config)
# doExit(name,seqNo,version,1,status_file,heartbeat_file,operation,'errior','1', operation + ' Failed', 'Parse Config', 'error', '1', 'JSON error processing ' + settings_file,'NotReady','1','Exiting')
# sys.exit(1)
print repr(config)
if config['runtimeSettings'][0]['handlerSettings'].has_key('protectedSettings') == True:
thumb=config['runtimeSettings'][0]['handlerSettings']['protectedSettingsCertThumbprint']
cert=waagent.LibDir+'/'+thumb+'.crt'
pkey=waagent.LibDir+'/'+thumb+'.prv'
waagent.SetFileContents('/tmp/kk',config['runtimeSettings'][0]['handlerSettings']['protectedSettings'])
cleartxt=None
cleartxt=waagent.RunGetOutput("base64 -d /tmp/kk | openssl smime -inform DER -decrypt -recip " + cert + " -inkey " + pkey )[1]
if cleartxt == None:
waagent.Error("OpenSSh decode error using thumbprint " + thumb )
doExit(name,seqNo,version,1,status_file,heartbeat_file,operation,'errior','1', operation + ' Failed', 'Parse Config', 'error', '1', 'OpenSsh decode error using thumbprint ' + thumb,'NotReady','1','Exiting')
sys.exit(1)
jctxt=''
try:
jctxt=json.loads(cleartxt)
except:
waagent.Error('JSON exception decoding ' + cleartxt)
config['runtimeSettings'][0]['handlerSettings']['protectedSettings']=jctxt
waagent.Log('Config decoded correctly.')
return (name,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config)
def doStatusReport(name,seqNo,version,stat_file,current_utc, started_at_utc, workload_name, operation_name, status, status_code, status_message, sub_workload_name, sub_status, sub_status_code, sub_status_message):
#'{"handlerName":"Chef.Bootstrap.WindowsAzure.ChefClient","handlerVersion":"11.12.0.0","status":"NotReady","code":1,"formattedMessage":{"lang":"en-US","message":"Enable command of plugin (name: Chef.Bootstrap.WindowsAzure.ChefClient, version 11.12.0.0) failed with exception Command C:/Packages/Plugins/Chef.Bootstrap.WindowsAzure.ChefClient/11.12.0.0/enable.cmd of Chef.Bootstrap.WindowsAzure.ChefClient has exited with Exit code: 1"}},{"handlerName":"Microsoft.Compute.BGInfo","handlerVersion":"1.1","status":"Ready","formattedMessage":{"lang":"en-US","message":"plugin (name: Microsoft.Compute.BGInfo, version: 1.1) enabled successfully."}}'
stat_rept='{"handlerName":"' + name + '","handlerVersion":"'+version+ '","status":"' +status + '","code":' + status_code + ',"formattedMessage":{"lang":"en-US","message":"' + status_message + '"}}'
cur_file=stat_file+'_current'
with open(cur_file,'w+') as f:
f.write(stat_rept)
# if inc.status exists, rename the inc.status to inc.status_sent
if os.path.exists(stat_file) == True:
os.rename(stat_file,stat_file+'_sent')
# rename inc.status_current to inc.status
os.rename(cur_file,stat_file)
# remove inc.status_sent
if os.path.exists(stat_file+'_sent') == True:
os.unlink(stat_file+'_sent')
def doHealthReport(heartbeat_file,status,code,message):
# heartbeat
health_report='[{"version":"1.0","heartbeat":{"status":"' + status+ '","code":"'+ code + '","Message":"' + message + '"}}]'
if waagent.SetFileContents(heartbeat_file,health_report) == None :
waagent.Error('Unable to wite heartbeat info to ' + heartbeat_file)
def doExit(name,seqNo,version,exit_code,status_file,heartbeat_file,operation,status,code,message,sub_operation,sub_status,sub_code,sub_message,health_state,health_code,health_message):
doStatusReport(name,seqNo,version,status_file,time.strftime("%Y-%M-%dT%H:%M:%SZ", time.gmtime()),time.strftime("%Y-%M-%dT%H:%M:%SZ", time.gmtime()),name,
operation,status,code,message,sub_operation,sub_status,sub_code,sub_message)
doHealthReport(heartbeat_file,'NotReady','1','Exiting')
sys.exit(exit_code)
|
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-01-15 23:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ringapp', '0041_auto_20171021_2116'),
]
operations = [
migrations.RemoveField(
model_name='citation',
name='publication',
),
migrations.RemoveField(
model_name='commequivalents',
name='property',
),
migrations.RemoveField(
model_name='comminvariance',
name='example',
),
migrations.RemoveField(
model_name='comminvariance',
name='metaproperty',
),
migrations.RemoveField(
model_name='comminvariance',
name='property',
),
migrations.RemoveField(
model_name='comminvariance',
name='theorem',
),
migrations.RemoveField(
model_name='commlogic',
name='theorem',
),
migrations.RemoveField(
model_name='commringproperty',
name='property',
),
migrations.RemoveField(
model_name='commringproperty',
name='ring',
),
migrations.RemoveField(
model_name='equivalents',
name='property',
),
migrations.DeleteModel(
name='FAQ',
),
migrations.RemoveField(
model_name='glossary',
name='reference',
),
migrations.RemoveField(
model_name='invariance',
name='example',
),
migrations.RemoveField(
model_name='invariance',
name='metaproperty',
),
migrations.RemoveField(
model_name='invariance',
name='property',
),
migrations.RemoveField(
model_name='invariance',
name='theorem',
),
migrations.RemoveField(
model_name='logic',
name='theorem',
),
migrations.RemoveField(
model_name='property',
name='comm_version',
),
migrations.RemoveField(
model_name='ring',
name='keywords',
),
migrations.RemoveField(
model_name='ring',
name='reference',
),
migrations.RemoveField(
model_name='ring',
name='user',
),
migrations.RemoveField(
model_name='ringproperty',
name='property',
),
migrations.RemoveField(
model_name='ringproperty',
name='ring',
),
migrations.RemoveField(
model_name='test_ringproperty',
name='property',
),
migrations.RemoveField(
model_name='test_ringproperty',
name='ring',
),
migrations.RemoveField(
model_name='theorem',
name='characterizes',
),
migrations.RemoveField(
model_name='theorem',
name='comm_characterizes',
),
migrations.RemoveField(
model_name='theorem',
name='reference',
),
migrations.DeleteModel(
name='CommRing',
),
migrations.AlterModelOptions(
name='keyword',
options={'ordering': ('name',)},
),
migrations.AlterField(
model_name='keyword',
name='description',
field=models.TextField(blank=True, max_length=400, null=True),
),
migrations.AlterField(
model_name='keyword',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='news',
name='content',
field=models.TextField(blank=True, max_length=400, null=True),
),
migrations.AlterField(
model_name='suggestion',
name='citation',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='suggestion',
name='description',
field=models.TextField(blank=True, max_length=400, null=True),
),
migrations.AlterField(
model_name='suggestion',
name='response',
field=models.TextField(blank=True, max_length=200, null=True),
),
migrations.DeleteModel(
name='Citation',
),
migrations.DeleteModel(
name='CommEquivalents',
),
migrations.DeleteModel(
name='CommInvariance',
),
migrations.DeleteModel(
name='CommLogic',
),
migrations.DeleteModel(
name='CommProperty',
),
migrations.DeleteModel(
name='CommRingProperty',
),
migrations.DeleteModel(
name='Equivalents',
),
migrations.DeleteModel(
name='Glossary',
),
migrations.DeleteModel(
name='Invariance',
),
migrations.DeleteModel(
name='Logic',
),
migrations.DeleteModel(
name='Metaproperty',
),
migrations.DeleteModel(
name='Property',
),
migrations.DeleteModel(
name='Publication',
),
migrations.DeleteModel(
name='Ring',
),
migrations.DeleteModel(
name='RingProperty',
),
migrations.DeleteModel(
name='test_Ring',
),
migrations.DeleteModel(
name='test_RingProperty',
),
migrations.DeleteModel(
name='Theorem',
),
]
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Service Manager
"""
import datetime
import time
from oslo.config import cfg
from nova.cells import messaging
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import context
from nova import exception
from nova import manager
from nova.openstack.common import importutils
from nova.openstack.common import periodic_task
from nova.openstack.common import timeutils
cell_manager_opts = [
cfg.StrOpt('driver',
default='nova.cells.rpc_driver.CellsRPCDriver',
help='Cells communication driver to use'),
cfg.IntOpt("instance_updated_at_threshold",
default=3600,
help="Number of seconds after an instance was updated "
"or deleted to continue to update cells"),
cfg.IntOpt("instance_update_num_instances",
default=1,
help="Number of instances to update per periodic task run")
]
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_manager_opts, group='cells')
class CellsManager(manager.Manager):
"""The nova-cells manager class. This class defines RPC
methods that the local cell may call. This class is NOT used for
messages coming from other cells. That communication is
driver-specific.
Communication to other cells happens via the messaging module. The
MessageRunner from that module will handle routing the message to
the correct cell via the communications driver. Most methods below
create 'targeted' (where we want to route a message to a specific cell)
or 'broadcast' (where we want a message to go to multiple cells)
messages.
Scheduling requests get passed to the scheduler class.
"""
RPC_API_VERSION = '1.12'
def __init__(self, *args, **kwargs):
# Mostly for tests.
cell_state_manager = kwargs.pop('cell_state_manager', None)
super(CellsManager, self).__init__(service_name='cells',
*args, **kwargs)
if cell_state_manager is None:
cell_state_manager = cells_state.CellStateManager
self.state_manager = cell_state_manager()
self.msg_runner = messaging.MessageRunner(self.state_manager)
cells_driver_cls = importutils.import_class(
CONF.cells.driver)
self.driver = cells_driver_cls()
self.instances_to_heal = iter([])
def post_start_hook(self):
"""Have the driver start its consumers for inter-cell communication.
Also ask our child cells for their capacities and capabilities so
we get them more quickly than just waiting for the next periodic
update. Receiving the updates from the children will cause us to
update our parents. If we don't have any children, just update
our parents immediately.
"""
# FIXME(comstud): There's currently no hooks when services are
# stopping, so we have no way to stop consumers cleanly.
self.driver.start_consumers(self.msg_runner)
ctxt = context.get_admin_context()
if self.state_manager.get_child_cells():
self.msg_runner.ask_children_for_capabilities(ctxt)
self.msg_runner.ask_children_for_capacities(ctxt)
else:
self._update_our_parents(ctxt)
@periodic_task.periodic_task
def _update_our_parents(self, ctxt):
"""Update our parent cells with our capabilities and capacity
if we're at the bottom of the tree.
"""
self.msg_runner.tell_parents_our_capabilities(ctxt)
self.msg_runner.tell_parents_our_capacities(ctxt)
@periodic_task.periodic_task
def _heal_instances(self, ctxt):
"""Periodic task to send updates for a number of instances to
parent cells.
On every run of the periodic task, we will attempt to sync
'CONF.cells.instance_update_num_instances' number of instances.
When we get the list of instances, we shuffle them so that multiple
nova-cells services aren't attempting to sync the same instances
in lockstep.
If CONF.cells.instance_update_at_threshold is set, only attempt
to sync instances that have been updated recently. The CONF
setting defines the maximum number of seconds old the updated_at
can be. Ie, a threshold of 3600 means to only update instances
that have modified in the last hour.
"""
if not self.state_manager.get_parent_cells():
# No need to sync up if we have no parents.
return
info = {'updated_list': False}
def _next_instance():
try:
instance = self.instances_to_heal.next()
except StopIteration:
if info['updated_list']:
return
threshold = CONF.cells.instance_updated_at_threshold
updated_since = None
if threshold > 0:
updated_since = timeutils.utcnow() - datetime.timedelta(
seconds=threshold)
self.instances_to_heal = cells_utils.get_instances_to_sync(
ctxt, updated_since=updated_since, shuffle=True,
uuids_only=True)
info['updated_list'] = True
try:
instance = self.instances_to_heal.next()
except StopIteration:
return
return instance
rd_context = ctxt.elevated(read_deleted='yes')
for i in xrange(CONF.cells.instance_update_num_instances):
while True:
# Yield to other greenthreads
time.sleep(0)
instance_uuid = _next_instance()
if not instance_uuid:
return
try:
instance = self.db.instance_get_by_uuid(rd_context,
instance_uuid)
except exception.InstanceNotFound:
continue
self._sync_instance(ctxt, instance)
break
def _sync_instance(self, ctxt, instance):
"""Broadcast an instance_update or instance_destroy message up to
parent cells.
"""
if instance['deleted']:
self.instance_destroy_at_top(ctxt, instance)
else:
self.instance_update_at_top(ctxt, instance)
def schedule_run_instance(self, ctxt, host_sched_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s)
and forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(ctxt, our_cell,
host_sched_kwargs)
def build_instances(self, ctxt, build_inst_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s) and
forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.build_instances(ctxt, our_cell, build_inst_kwargs)
def get_cell_info_for_neighbors(self, _ctxt):
"""Return cell information for our neighbor cells."""
return self.state_manager.get_cell_info_for_neighbors()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
response = self.msg_runner.run_compute_api_method(ctxt,
cell_name,
method_info,
call)
if call:
return response.value_or_raise()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
self.msg_runner.instance_update_at_top(ctxt, instance)
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
self.msg_runner.instance_destroy_at_top(ctxt, instance)
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
self.msg_runner.instance_delete_everywhere(ctxt, instance,
delete_type)
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
self.msg_runner.instance_fault_create_at_top(ctxt, instance_fault)
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info)
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
self.msg_runner.sync_instances(ctxt, project_id, updated_since,
deleted)
def service_get_all(self, ctxt, filters):
"""Return services in this cell and in all child cells."""
responses = self.msg_runner.service_get_all(ctxt, filters)
ret_services = []
# 1 response per cell. Each response is a list of services.
for response in responses:
services = response.value_or_raise()
for service in services:
cells_utils.add_cell_to_service(service, response.cell_name)
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, ctxt, host_name):
"""Return a service entry for a compute host in a certain cell."""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_get_by_compute_host(ctxt,
cell_name,
host_name)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def service_update(self, ctxt, host_name, binary, params_to_update):
"""
Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
:returns: the service reference
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_update(
ctxt, cell_name, host_name, binary, params_to_update)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def proxy_rpc_to_manager(self, ctxt, topic, rpc_message, call, timeout):
"""Proxy an RPC message as-is to a manager."""
compute_topic = CONF.compute_topic
cell_and_host = topic[len(compute_topic) + 1:]
cell_name, host_name = cells_utils.split_cell_and_item(cell_and_host)
response = self.msg_runner.proxy_rpc_to_manager(ctxt, cell_name,
host_name, topic, rpc_message, call, timeout)
return response.value_or_raise()
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'host' is not None, host will be of the format 'cell!name@host',
with '@host' being optional. The query will be directed to the
appropriate cell and return all task logs, or task logs matching
the host if specified.
'state' also may be None. If it's not, filter by the state as well.
"""
if host is None:
cell_name = None
else:
cell_name, host = cells_utils.split_cell_and_item(host)
# If no cell name was given, assume that the host name is the
# cell_name and that the target is all hosts
if cell_name is None:
cell_name, host = host, cell_name
responses = self.msg_runner.task_log_get_all(ctxt, cell_name,
task_name, period_beginning, period_ending,
host=host, state=state)
# 1 response per cell. Each response is a list of task log
# entries.
ret_task_logs = []
for response in responses:
task_logs = response.value_or_raise()
for task_log in task_logs:
cells_utils.add_cell_to_task_log(task_log,
response.cell_name)
ret_task_logs.append(task_log)
return ret_task_logs
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID in a specific cell."""
cell_name, compute_id = cells_utils.split_cell_and_item(
compute_id)
response = self.msg_runner.compute_node_get(ctxt, cell_name,
compute_id)
node = response.value_or_raise()
cells_utils.add_cell_to_compute_node(node, cell_name)
return node
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells."""
responses = self.msg_runner.compute_node_get_all(ctxt,
hypervisor_match=hypervisor_match)
# 1 response per cell. Each response is a list of compute_node
# entries.
ret_nodes = []
for response in responses:
nodes = response.value_or_raise()
for node in nodes:
cells_utils.add_cell_to_compute_node(node,
response.cell_name)
ret_nodes.append(node)
return ret_nodes
def compute_node_stats(self, ctxt):
"""Return compute node stats totals from all cells."""
responses = self.msg_runner.compute_node_stats(ctxt)
totals = {}
for response in responses:
data = response.value_or_raise()
for key, val in data.iteritems():
totals.setdefault(key, 0)
totals[key] += val
return totals
def actions_get(self, ctxt, cell_name, instance_uuid):
response = self.msg_runner.actions_get(ctxt, cell_name, instance_uuid)
return response.value_or_raise()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
response = self.msg_runner.action_get_by_request_id(ctxt, cell_name,
instance_uuid,
request_id)
return response.value_or_raise()
def action_events_get(self, ctxt, cell_name, action_id):
response = self.msg_runner.action_events_get(ctxt, cell_name,
action_id)
return response.value_or_raise()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
self.msg_runner.consoleauth_delete_tokens(ctxt, instance_uuid)
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
instance = self.db.instance_get_by_uuid(ctxt, instance_uuid)
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
response = self.msg_runner.validate_console_port(ctxt,
instance['cell_name'], instance_uuid, console_port,
console_type)
return response.value_or_raise()
def get_capacities(self, ctxt, cell_name):
return self.state_manager.get_capacities(cell_name)
def bdm_update_or_create_at_top(self, ctxt, bdm, create=None):
"""BDM was created/updated in this cell. Tell the API cells."""
self.msg_runner.bdm_update_or_create_at_top(ctxt, bdm, create=create)
def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None,
volume_id=None):
"""BDM was destroyed for instance in this cell. Tell the API cells."""
self.msg_runner.bdm_destroy_at_top(ctxt, instance_uuid,
device_name=device_name,
volume_id=volume_id)
def get_migrations(self, ctxt, filters):
"""Fetch migrations applying the filters."""
target_cell = None
if "cell_name" in filters:
_path_cell_sep = cells_utils._PATH_CELL_SEP
target_cell = '%s%s%s' % (CONF.cells.name, _path_cell_sep,
filters['cell_name'])
responses = self.msg_runner.get_migrations(ctxt, target_cell,
False, filters)
migrations = []
for response in responses:
migrations += response.value_or_raise()
return migrations
def start_instance(self, ctxt, instance):
"""Start an instance in its cell."""
self.msg_runner.start_instance(ctxt, instance)
def stop_instance(self, ctxt, instance, do_cast=True):
"""Stop an instance in its cell."""
response = self.msg_runner.stop_instance(ctxt, instance,
do_cast=do_cast)
if not do_cast:
return response.value_or_raise()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2016, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Wrapper for gdi32.dll in ctypes.
"""
from defines import *
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- Helpers ------------------------------------------------------------------
#--- Types --------------------------------------------------------------------
#--- Constants ----------------------------------------------------------------
# GDI object types
OBJ_PEN = 1
OBJ_BRUSH = 2
OBJ_DC = 3
OBJ_METADC = 4
OBJ_PAL = 5
OBJ_FONT = 6
OBJ_BITMAP = 7
OBJ_REGION = 8
OBJ_METAFILE = 9
OBJ_MEMDC = 10
OBJ_EXTPEN = 11
OBJ_ENHMETADC = 12
OBJ_ENHMETAFILE = 13
OBJ_COLORSPACE = 14
GDI_OBJ_LAST = OBJ_COLORSPACE
# Ternary raster operations
SRCCOPY = 0x00CC0020 # dest = source
SRCPAINT = 0x00EE0086 # dest = source OR dest
SRCAND = 0x008800C6 # dest = source AND dest
SRCINVERT = 0x00660046 # dest = source XOR dest
SRCERASE = 0x00440328 # dest = source AND (NOT dest)
NOTSRCCOPY = 0x00330008 # dest = (NOT source)
NOTSRCERASE = 0x001100A6 # dest = (NOT src) AND (NOT dest)
MERGECOPY = 0x00C000CA # dest = (source AND pattern)
MERGEPAINT = 0x00BB0226 # dest = (NOT source) OR dest
PATCOPY = 0x00F00021 # dest = pattern
PATPAINT = 0x00FB0A09 # dest = DPSnoo
PATINVERT = 0x005A0049 # dest = pattern XOR dest
DSTINVERT = 0x00550009 # dest = (NOT dest)
BLACKNESS = 0x00000042 # dest = BLACK
WHITENESS = 0x00FF0062 # dest = WHITE
NOMIRRORBITMAP = 0x80000000 # Do not Mirror the bitmap in this call
CAPTUREBLT = 0x40000000 # Include layered windows
# Region flags
ERROR = 0
NULLREGION = 1
SIMPLEREGION = 2
COMPLEXREGION = 3
RGN_ERROR = ERROR
# CombineRgn() styles
RGN_AND = 1
RGN_OR = 2
RGN_XOR = 3
RGN_DIFF = 4
RGN_COPY = 5
RGN_MIN = RGN_AND
RGN_MAX = RGN_COPY
# StretchBlt() modes
BLACKONWHITE = 1
WHITEONBLACK = 2
COLORONCOLOR = 3
HALFTONE = 4
MAXSTRETCHBLTMODE = 4
STRETCH_ANDSCANS = BLACKONWHITE
STRETCH_ORSCANS = WHITEONBLACK
STRETCH_DELETESCANS = COLORONCOLOR
STRETCH_HALFTONE = HALFTONE
# PolyFill() modes
ALTERNATE = 1
WINDING = 2
POLYFILL_LAST = 2
# Layout orientation options
LAYOUT_RTL = 0x00000001 # Right to left
LAYOUT_BTT = 0x00000002 # Bottom to top
LAYOUT_VBH = 0x00000004 # Vertical before horizontal
LAYOUT_ORIENTATIONMASK = LAYOUT_RTL + LAYOUT_BTT + LAYOUT_VBH
LAYOUT_BITMAPORIENTATIONPRESERVED = 0x00000008
# Stock objects
WHITE_BRUSH = 0
LTGRAY_BRUSH = 1
GRAY_BRUSH = 2
DKGRAY_BRUSH = 3
BLACK_BRUSH = 4
NULL_BRUSH = 5
HOLLOW_BRUSH = NULL_BRUSH
WHITE_PEN = 6
BLACK_PEN = 7
NULL_PEN = 8
OEM_FIXED_FONT = 10
ANSI_FIXED_FONT = 11
ANSI_VAR_FONT = 12
SYSTEM_FONT = 13
DEVICE_DEFAULT_FONT = 14
DEFAULT_PALETTE = 15
SYSTEM_FIXED_FONT = 16
# Metafile functions
META_SETBKCOLOR = 0x0201
META_SETBKMODE = 0x0102
META_SETMAPMODE = 0x0103
META_SETROP2 = 0x0104
META_SETRELABS = 0x0105
META_SETPOLYFILLMODE = 0x0106
META_SETSTRETCHBLTMODE = 0x0107
META_SETTEXTCHAREXTRA = 0x0108
META_SETTEXTCOLOR = 0x0209
META_SETTEXTJUSTIFICATION = 0x020A
META_SETWINDOWORG = 0x020B
META_SETWINDOWEXT = 0x020C
META_SETVIEWPORTORG = 0x020D
META_SETVIEWPORTEXT = 0x020E
META_OFFSETWINDOWORG = 0x020F
META_SCALEWINDOWEXT = 0x0410
META_OFFSETVIEWPORTORG = 0x0211
META_SCALEVIEWPORTEXT = 0x0412
META_LINETO = 0x0213
META_MOVETO = 0x0214
META_EXCLUDECLIPRECT = 0x0415
META_INTERSECTCLIPRECT = 0x0416
META_ARC = 0x0817
META_ELLIPSE = 0x0418
META_FLOODFILL = 0x0419
META_PIE = 0x081A
META_RECTANGLE = 0x041B
META_ROUNDRECT = 0x061C
META_PATBLT = 0x061D
META_SAVEDC = 0x001E
META_SETPIXEL = 0x041F
META_OFFSETCLIPRGN = 0x0220
META_TEXTOUT = 0x0521
META_BITBLT = 0x0922
META_STRETCHBLT = 0x0B23
META_POLYGON = 0x0324
META_POLYLINE = 0x0325
META_ESCAPE = 0x0626
META_RESTOREDC = 0x0127
META_FILLREGION = 0x0228
META_FRAMEREGION = 0x0429
META_INVERTREGION = 0x012A
META_PAINTREGION = 0x012B
META_SELECTCLIPREGION = 0x012C
META_SELECTOBJECT = 0x012D
META_SETTEXTALIGN = 0x012E
META_CHORD = 0x0830
META_SETMAPPERFLAGS = 0x0231
META_EXTTEXTOUT = 0x0a32
META_SETDIBTODEV = 0x0d33
META_SELECTPALETTE = 0x0234
META_REALIZEPALETTE = 0x0035
META_ANIMATEPALETTE = 0x0436
META_SETPALENTRIES = 0x0037
META_POLYPOLYGON = 0x0538
META_RESIZEPALETTE = 0x0139
META_DIBBITBLT = 0x0940
META_DIBSTRETCHBLT = 0x0b41
META_DIBCREATEPATTERNBRUSH = 0x0142
META_STRETCHDIB = 0x0f43
META_EXTFLOODFILL = 0x0548
META_SETLAYOUT = 0x0149
META_DELETEOBJECT = 0x01f0
META_CREATEPALETTE = 0x00f7
META_CREATEPATTERNBRUSH = 0x01F9
META_CREATEPENINDIRECT = 0x02FA
META_CREATEFONTINDIRECT = 0x02FB
META_CREATEBRUSHINDIRECT = 0x02FC
META_CREATEREGION = 0x06FF
# Metafile escape codes
NEWFRAME = 1
ABORTDOC = 2
NEXTBAND = 3
SETCOLORTABLE = 4
GETCOLORTABLE = 5
FLUSHOUTPUT = 6
DRAFTMODE = 7
QUERYESCSUPPORT = 8
SETABORTPROC = 9
STARTDOC = 10
ENDDOC = 11
GETPHYSPAGESIZE = 12
GETPRINTINGOFFSET = 13
GETSCALINGFACTOR = 14
MFCOMMENT = 15
GETPENWIDTH = 16
SETCOPYCOUNT = 17
SELECTPAPERSOURCE = 18
DEVICEDATA = 19
PASSTHROUGH = 19
GETTECHNOLGY = 20
GETTECHNOLOGY = 20
SETLINECAP = 21
SETLINEJOIN = 22
SETMITERLIMIT = 23
BANDINFO = 24
DRAWPATTERNRECT = 25
GETVECTORPENSIZE = 26
GETVECTORBRUSHSIZE = 27
ENABLEDUPLEX = 28
GETSETPAPERBINS = 29
GETSETPRINTORIENT = 30
ENUMPAPERBINS = 31
SETDIBSCALING = 32
EPSPRINTING = 33
ENUMPAPERMETRICS = 34
GETSETPAPERMETRICS = 35
POSTSCRIPT_DATA = 37
POSTSCRIPT_IGNORE = 38
MOUSETRAILS = 39
GETDEVICEUNITS = 42
GETEXTENDEDTEXTMETRICS = 256
GETEXTENTTABLE = 257
GETPAIRKERNTABLE = 258
GETTRACKKERNTABLE = 259
EXTTEXTOUT = 512
GETFACENAME = 513
DOWNLOADFACE = 514
ENABLERELATIVEWIDTHS = 768
ENABLEPAIRKERNING = 769
SETKERNTRACK = 770
SETALLJUSTVALUES = 771
SETCHARSET = 772
STRETCHBLT = 2048
METAFILE_DRIVER = 2049
GETSETSCREENPARAMS = 3072
QUERYDIBSUPPORT = 3073
BEGIN_PATH = 4096
CLIP_TO_PATH = 4097
END_PATH = 4098
EXT_DEVICE_CAPS = 4099
RESTORE_CTM = 4100
SAVE_CTM = 4101
SET_ARC_DIRECTION = 4102
SET_BACKGROUND_COLOR = 4103
SET_POLY_MODE = 4104
SET_SCREEN_ANGLE = 4105
SET_SPREAD = 4106
TRANSFORM_CTM = 4107
SET_CLIP_BOX = 4108
SET_BOUNDS = 4109
SET_MIRROR_MODE = 4110
OPENCHANNEL = 4110
DOWNLOADHEADER = 4111
CLOSECHANNEL = 4112
POSTSCRIPT_PASSTHROUGH = 4115
ENCAPSULATED_POSTSCRIPT = 4116
POSTSCRIPT_IDENTIFY = 4117
POSTSCRIPT_INJECTION = 4118
CHECKJPEGFORMAT = 4119
CHECKPNGFORMAT = 4120
GET_PS_FEATURESETTING = 4121
GDIPLUS_TS_QUERYVER = 4122
GDIPLUS_TS_RECORD = 4123
SPCLPASSTHROUGH2 = 4568
#--- Structures ---------------------------------------------------------------
# typedef struct _RECT {
# LONG left;
# LONG top;
# LONG right;
# LONG bottom;
# }RECT, *PRECT;
class RECT(Structure):
_fields_ = [
('left', LONG),
('top', LONG),
('right', LONG),
('bottom', LONG),
]
PRECT = POINTER(RECT)
LPRECT = PRECT
# typedef struct tagPOINT {
# LONG x;
# LONG y;
# } POINT;
class POINT(Structure):
_fields_ = [
('x', LONG),
('y', LONG),
]
PPOINT = POINTER(POINT)
LPPOINT = PPOINT
# typedef struct tagBITMAP {
# LONG bmType;
# LONG bmWidth;
# LONG bmHeight;
# LONG bmWidthBytes;
# WORD bmPlanes;
# WORD bmBitsPixel;
# LPVOID bmBits;
# } BITMAP, *PBITMAP;
class BITMAP(Structure):
_fields_ = [
("bmType", LONG),
("bmWidth", LONG),
("bmHeight", LONG),
("bmWidthBytes", LONG),
("bmPlanes", WORD),
("bmBitsPixel", WORD),
("bmBits", LPVOID),
]
PBITMAP = POINTER(BITMAP)
LPBITMAP = PBITMAP
#--- High level classes -------------------------------------------------------
#--- gdi32.dll ----------------------------------------------------------------
# HGDIOBJ SelectObject(
# __in HDC hdc,
# __in HGDIOBJ hgdiobj
# );
def SelectObject(hdc, hgdiobj):
_SelectObject = windll.gdi32.SelectObject
_SelectObject.argtypes = [HDC, HGDIOBJ]
_SelectObject.restype = HGDIOBJ
_SelectObject.errcheck = RaiseIfZero
return _SelectObject(hdc, hgdiobj)
# HGDIOBJ GetStockObject(
# __in int fnObject
# );
def GetStockObject(fnObject):
_GetStockObject = windll.gdi32.GetStockObject
_GetStockObject.argtypes = [ctypes.c_int]
_GetStockObject.restype = HGDIOBJ
_GetStockObject.errcheck = RaiseIfZero
return _GetStockObject(fnObject)
# DWORD GetObjectType(
# __in HGDIOBJ h
# );
def GetObjectType(h):
_GetObjectType = windll.gdi32.GetObjectType
_GetObjectType.argtypes = [HGDIOBJ]
_GetObjectType.restype = DWORD
_GetObjectType.errcheck = RaiseIfZero
return _GetObjectType(h)
# int GetObject(
# __in HGDIOBJ hgdiobj,
# __in int cbBuffer,
# __out LPVOID lpvObject
# );
def GetObject(hgdiobj, cbBuffer = None, lpvObject = None):
_GetObject = windll.gdi32.GetObject
_GetObject.argtypes = [HGDIOBJ, ctypes.c_int, LPVOID]
_GetObject.restype = ctypes.c_int
_GetObject.errcheck = RaiseIfZero
# Both cbBuffer and lpvObject can be omitted, the correct
# size and structure to return are automatically deduced.
# If lpvObject is given it must be a ctypes object, not a pointer.
# Always returns a ctypes object.
if cbBuffer is not None:
if lpvObject is None:
lpvObject = ctypes.create_string_buffer("", cbBuffer)
elif lpvObject is not None:
cbBuffer = sizeof(lpvObject)
else: # most likely case, both are None
t = GetObjectType(hgdiobj)
if t == OBJ_PEN:
cbBuffer = sizeof(LOGPEN)
lpvObject = LOGPEN()
elif t == OBJ_BRUSH:
cbBuffer = sizeof(LOGBRUSH)
lpvObject = LOGBRUSH()
elif t == OBJ_PAL:
cbBuffer = _GetObject(hgdiobj, 0, None)
lpvObject = (WORD * (cbBuffer // sizeof(WORD)))()
elif t == OBJ_FONT:
cbBuffer = sizeof(LOGFONT)
lpvObject = LOGFONT()
elif t == OBJ_BITMAP: # try the two possible types of bitmap
cbBuffer = sizeof(DIBSECTION)
lpvObject = DIBSECTION()
try:
_GetObject(hgdiobj, cbBuffer, byref(lpvObject))
return lpvObject
except WindowsError:
cbBuffer = sizeof(BITMAP)
lpvObject = BITMAP()
elif t == OBJ_EXTPEN:
cbBuffer = sizeof(LOGEXTPEN)
lpvObject = LOGEXTPEN()
else:
cbBuffer = _GetObject(hgdiobj, 0, None)
lpvObject = ctypes.create_string_buffer("", cbBuffer)
_GetObject(hgdiobj, cbBuffer, byref(lpvObject))
return lpvObject
# LONG GetBitmapBits(
# __in HBITMAP hbmp,
# __in LONG cbBuffer,
# __out LPVOID lpvBits
# );
def GetBitmapBits(hbmp):
_GetBitmapBits = windll.gdi32.GetBitmapBits
_GetBitmapBits.argtypes = [HBITMAP, LONG, LPVOID]
_GetBitmapBits.restype = LONG
_GetBitmapBits.errcheck = RaiseIfZero
bitmap = GetObject(hbmp, lpvObject = BITMAP())
cbBuffer = bitmap.bmWidthBytes * bitmap.bmHeight
lpvBits = ctypes.create_string_buffer("", cbBuffer)
_GetBitmapBits(hbmp, cbBuffer, byref(lpvBits))
return lpvBits.raw
# HBITMAP CreateBitmapIndirect(
# __in const BITMAP *lpbm
# );
def CreateBitmapIndirect(lpbm):
_CreateBitmapIndirect = windll.gdi32.CreateBitmapIndirect
_CreateBitmapIndirect.argtypes = [PBITMAP]
_CreateBitmapIndirect.restype = HBITMAP
_CreateBitmapIndirect.errcheck = RaiseIfZero
return _CreateBitmapIndirect(lpbm)
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
|
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Quota service definition and implementation.
Contains message and service definitions for a simple quota service. The
service maintains a set of quotas for users that can be deducted from in
a single transaction. The requests to do this can be configured so that if
one quota check fails, none of the quota changes will take effect.
The service is configured using a QuotaConfig object and can be passed an
existing quota state (useful for if the service quits unexpectedly and is
being restored from checkpoint). For this reason it is necessary to use
a factory instead of the default constructor. For example:
quota_config = QuotaConfig(
buckets = [ QuotaBucket('DISK', 1000000),
QuotaBucket('EMAILS', 100, refresh_every=24 * 60 * 60),
])
quota_state = {}
quota_service = QuotaService.new_factory(quota_config, quota_state)
Every on-going request to the quota service shares the same configuration and
state objects.
Individual quota buckets can be specified to refresh to their original amounts
at regular intervals. These intervals are specified in seconds. The example
above specifies that the email quota is refreshed to 100 emails every day.
It is up to the client using the quota service to respond correctly to the
response of the quota service. It does not try to raise an exception on
dential.
"""
import threading
import time
from protorpc import messages
from protorpc import remote
from protorpc import util
class QuotaCheck(messages.Message):
"""Result of checking quota of a single bucket.
Fields:
name: Name of quota bucket to check.
tokens: Number of tokens to check for quota or deduct. A negative value
can be used to credit quota buckets.
mode: Quota check-mode. See Mode enumeration class for more details.
"""
class Mode(messages.Enum):
"""Mode for individual bucket quota check.
Values:
ALL: All tokens must be available for consumption or else quota check
fails and all deductions/credits are ignored.
SOME: At least some tokens must be available for consumption. This check
will only fail if the remaining tokens in the bucket are already at
zero.
CHECK_ALL: All tokens must be available in bucket or else quota check
fails and all other deductions/credits are ignored. This will not cause
a deduction to occur for the indicated bucket.
CHECK_ALL: At least some tokens must be available in bucket or else quota
check fails and all other deductions/credits are ignored. This will
not cause a deduction to occur for the indicated bucket.
"""
ALL = 1
SOME = 2
CHECK_ALL = 3
CHECK_SOME = 4
name = messages.StringField(1, required=True)
tokens = messages.IntegerField(2, required=True)
mode = messages.EnumField(Mode, 3, default=Mode.ALL)
class QuotaRequest(messages.Message):
"""A request to check or deduct tokens from a users bucket.
Fields:
user: User to check or deduct quota for.
quotas: Quotas to check or deduct against.
"""
user = messages.StringField(1, required=True)
quotas = messages.MessageField(QuotaCheck, 2, repeated=True)
class CheckResult(messages.Message):
"""Quota check results.
Fields:
status: Status of quota check for bucket. See Status enum for details.
available: Number of actual tokens available or consumed. Will be
less than the number of requested tokens when bucket has fewer
tokens than requested.
"""
class Status(messages.Enum):
"""Status of check result.
Values:
OK: All requested tokens are available or were deducted.
SOME: Some requested tokens are available or were deducted. This will
cause any deductions to fail if the request mode is ALL or CHECK_ALL.
NONE: No tokens were available. Quota check is considered to have failed.
"""
OK = 1
SOME = 2
NONE = 3
status = messages.EnumField(Status, 1, required=True)
available = messages.IntegerField(2, required=True)
class QuotaResponse(messages.Message):
""" Response to QuotaRequest.
Fields:
all_status: Overall status of quota request. If no quota tokens were
available at all, this will be NONE. If some tokens were available, even
if some buckets had no tokens, this will be SOME. If all tokens were
available this will be OK.
denied: If true, it means that some required quota check has failed. Any
deductions in the request will be ignored, even if those individual
buckets had adequate tokens.
results: Specific results of quota check for each requested bucket. The
names are not included as they will have a one to one correspondence with
buckets indicated in the request.
"""
all_status = messages.EnumField(CheckResult.Status, 1, required=True)
denied = messages.BooleanField(2, required=True)
results = messages.MessageField(CheckResult, 3, repeated=True)
class QuotaConfig(messages.Message):
"""Quota configuration.
Structure used for configuring quota server. This message is not used
directly in the service definition, but is used to configure the
implementation.
Fields:
buckets: Individual bucket configurations. Bucket configurations are
specified per server and are configured for any user that is requested.
"""
class Bucket(messages.Message):
"""Individual bucket configuration.
Fields:
name: Bucket name.
initial_tokens: Number of tokens initially configured for this bucket.
refresh_every: Number of seconds after which initial tokens are restored.
If this value is None, tokens are never restored once used, unless
credited by the application.
"""
name = messages.StringField(1, required=True)
initial_tokens = messages.IntegerField(2, required=True)
refresh_every = messages.IntegerField(4)
buckets = messages.MessageField(Bucket, 1, repeated=True)
class QuotaStateRequest(messages.Message):
"""Request state of all quota buckets for a single user.
Used for determining how many tokens remain in all the users quota buckets.
Fields:
user: The user to get buckets for.
"""
user = messages.StringField(1, required=True)
class BucketState(messages.Message):
"""State of an individual quota bucket.
Fields:
name: Name of bucket.
remaining_tokens: Number of tokens that remain in bucket.
"""
name = messages.StringField(1, required=True)
remaining_tokens = messages.IntegerField(2, required=True)
class QuotaStateResponse(messages.Message):
"""Response to QuotaStateRequest containing set of bucket states for user."""
bucket_states = messages.MessageField(BucketState, 1, repeated=True)
class QuotaState(object):
"""Quota state class, used by implementation of service.
This class is responsible for managing all the bucket states for a user.
Quota checks and deductions must be done in the context of a transaction. If
a transaction fails, it can be rolled back so that the values of the
individual buckets are preserved, even if previous checks and deductions
succeeded.
"""
@util.positional(3)
def __init__(self, state, buckets):
"""Constructor.
Args:
state: A dictionary that is used to contain the state, mapping buckets to
tuples (remaining_tokens, next_refresh):
remaining_tokens: Number of tokens remaining in the bucket.
next_refresh: Time when bucket needs to be refilled with initial
tokens.
buckets: A dictionary that maps buckets to BucketConfig objects.
"""
self.__state = state
self.__buckets = buckets
self.__lock = threading.Lock() # Used at transaction commit time.
self.__transaction = threading.local()
self.__transaction.changes = None # Dictionary bucket -> token deduction.
# Can be negative indicating credit.
self.__transaction.time = None # Time at which transaction began.
def in_transaction(self):
return self.__transaction.changes is not None
def begin_transaction(self):
"""Begin quota transaction."""
assert not self.in_transaction()
self.__transaction.changes = {}
self.__transaction.time = int(time.time())
self.__lock.acquire()
def commit_transaction(self):
"""Commit deductions of quota transaction."""
assert self.in_transaction()
for name, change in self.__transaction.changes.iteritems():
remaining_tokens, next_refresh = self.__state[name]
new_tokens = max(0, remaining_tokens + change)
self.__state[name] = new_tokens, next_refresh
self.__transaction.changes = None
self.__lock.release()
def abort_transaction(self):
"""Roll back transaction ignoring quota changes."""
assert self.in_transaction()
self.__transaction.changes = None
self.__lock.release()
def get_remaining_tokens(self, name):
"""Get remaining tokens for a bucket.
This function must be called within a transaction.
Args:
name: Bucket name.
Returns:
Integer of remaining tokens in users quota bucket.
"""
assert self.in_transaction()
changes = self.__transaction.changes.get(name, 0)
remaining_tokens, next_refresh = self.__state.get(name, (None, None))
if remaining_tokens is not None and (
next_refresh is None or
next_refresh >= self.__transaction.time):
return remaining_tokens + changes
bucket = self.__buckets.get(name, None)
if bucket is None:
return None
if bucket.refresh_every:
next_refresh = self.__transaction.time + bucket.refresh_every
else:
next_refresh = None
self.__state[name] = bucket.initial_tokens, next_refresh
return bucket.initial_tokens + changes
def check_quota(self, name, tokens):
"""Check to determine if there are enough quotas in a bucket.
Args:
name: Name of bucket to check.
tokens: Number of tokens to check for availability. Can be negative.
Returns:
The count of requested tokens or if insufficient, the number of tokens
available.
"""
assert self.in_transaction()
assert name not in self.__transaction.changes
remaining_tokens = self.get_remaining_tokens(name)
if remaining_tokens is None:
return None
return min(tokens, remaining_tokens)
def deduct_quota(self, name, tokens):
"""Add a quota deduction to the transaction.
Args:
name: Name of bucket to deduct from.
tokens: Number of tokens to request.
Returns:
The count of requested tokens or if insufficient, the number of tokens
available that will be deducted upon transaction commit.
"""
available_tokens = self.check_quota(name, tokens)
if available_tokens is None:
return None
diff = max(0, tokens - available_tokens)
self.__transaction.changes[name] = -(tokens - diff)
return available_tokens
class QuotaService(remote.Service):
"""Quota service."""
__state_lock = threading.Lock()
def __init__(self, config, states):
"""Constructor.
NOTE: This constructor requires parameters which means a factory function
must be used for instantiating the QuotaService.
Args:
config: An instance of QuotaConfig.
states: Dictionary mapping user -> QuotaState objects.
"""
self.__states = states
self.__config = config
self.__buckets = {}
for bucket in self.__config.buckets:
self.__buckets[bucket.name] = bucket
def __get_state(self, user):
"""Get the state of a user.
If no user state exists, this function will create one and store
it for access later.
user: User string to get quota state for.
"""
state = self.__states.get(user, None)
if state is None:
state = QuotaState({}, self.__buckets)
# TODO: Potentially problematic bottleneck.
self.__state_lock.acquire()
try:
self.__states[user] = state
finally:
self.__state_lock.release()
return state
@remote.method(QuotaRequest, QuotaResponse)
def check_quota(self, request):
"""Perform a quota check for a user."""
state = self.__get_state(request.user)
response = QuotaResponse(all_status=CheckResult.Status.OK)
response.denied = False
state.begin_transaction()
try:
for quota in request.quotas:
if quota.mode in (QuotaCheck.Mode.CHECK_ALL,
QuotaCheck.Mode.CHECK_SOME):
func = state.check_quota
else:
func = state.deduct_quota
available = func(quota.name, quota.tokens)
if available is None:
raise remote.ApplicationError(
'Unknown quota %s requested' % quota.name)
result = CheckResult(available=available)
response.results.append(result)
if available == quota.tokens:
result.status = CheckResult.Status.OK
if response.all_status == CheckResult.Status.NONE:
result.status = CheckResult.Status.SOME
elif available == 0:
result.status = CheckResult.Status.NONE
if response.all_status == CheckResult.Status.OK:
response.all_status = CheckResult.Status.NONE
response.denied = True
else:
result.status = CheckResult.Status.SOME
response.all_status = CheckResult.Status.SOME
if quota.mode in (QuotaCheck.Mode.ALL, QuotaCheck.Mode.CHECK_ALL):
response.denied = True
if response.denied:
state.abort_transaction()
else:
state.commit_transaction()
except:
state.abort_transaction()
raise
return response
@remote.method(QuotaStateRequest, QuotaStateResponse)
def get_quota_state(self, request):
"""Get current state of users quota buckets."""
state = self.__get_state(request.user)
state.begin_transaction()
try:
response = QuotaStateResponse()
for name in sorted(self.__buckets.keys()):
bucket_state = BucketState(
name=name,
remaining_tokens=state.get_remaining_tokens(name))
response.bucket_states.append(bucket_state)
return response
finally:
state.abort_transaction()
|
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import object
from pyMKL import pardisoinit, pardiso, mkl_get_version
from ctypes import POINTER, byref, c_longlong, c_int
import numpy as np
import scipy.sparse as sp
from numpy import ctypeslib
"""
mtype options
1 -> real and structurally symmetric
2 -> real and symmetric positive definite
-2 -> real and symmetric indefinite
3 -> complex and structurally symmetric
4 -> complex and Hermitian positive definite
-4 -> complex and Hermitian indefinite
6 -> complex and symmetric
11 -> real and nonsymmetric
13 -> complex and nonsymmetric
phase options
11 -> Analysis
12 -> Analysis, numerical factorization
13 -> Analysis, numerical factorization, solve, iterative refinement
22 -> Numerical factorization
23 -> Numerical factorization, solve, iterative refinement
33 -> Solve, iterative refinement
331 -> like phase=33, but only forward substitution
332 -> like phase=33, but only diagonal substitution (if available)
333 -> like phase=33, but only backward substitution
0 -> Release internal memory for L and U matrix number mnum
-1 -> Release all internal memory for all matrices
"""
class pardisoSolver(object):
"""Wrapper class for Intel MKL Pardiso solver. """
def __init__(self, A, mtype=11, verbose=False, singularity_check=True):
'''
Parameters
----------
A : scipy.sparse.csr.csr_matrix
sparse matrix in csr format.
mtype : int, optional
flag specifying the matrix type. The possible types are:
- 1 : real and structurally symmetric (not supported)
- 2 : real and symmetric positive definite
- -2 : real and symmetric indefinite
- 3 : complex and structurally symmetric (not supported)
- 4 : complex and Hermitian positive definite
- -4 : complex and Hermitian indefinite
- 6 : complex and symmetric
- 11 : real and nonsymmetric (default)
- 13 : complex and nonsymmetric
verbose : bool, optional
flag for verbose output. Default is False.
Returns
-------
None
'''
self.mtype = mtype
if mtype in [1, 3]:
msg = "mtype = 1/3 - structurally symmetric matrices not supported"
raise NotImplementedError(msg)
elif mtype in [2, -2, 4, -4, 6, 11, 13]:
pass
else:
msg = "Invalid mtype: mtype={}".format(mtype)
raise ValueError(msg)
self.singularity_check = singularity_check
self.n = A.shape[0]
if mtype in [4, -4, 6, 13]:
# Complex matrix
self.dtype = np.complex128
elif mtype in [2, -2, 11]:
# Real matrix
self.dtype = np.float64
self.ctypes_dtype = ctypeslib.ndpointer(self.dtype)
# If A is symmetric, store only the upper triangular portion
if mtype in [2, -2, 4, -4, 6]:
A = sp.triu(A, format='csr')
elif mtype in [11, 13]:
A = A.tocsr()
if not A.has_sorted_indices:
A.sort_indices()
self.a = A.data
self.ia = A.indptr
self.ja = A.indices
self._MKL_a = self.a.ctypes.data_as(self.ctypes_dtype)
self._MKL_ia = self.ia.ctypes.data_as(POINTER(c_int))
self._MKL_ja = self.ja.ctypes.data_as(POINTER(c_int))
# Hardcode some parameters for now...
self.maxfct = 1
self.mnum = 1
self.perm = 0
if verbose:
self.msglvl = 1
else:
self.msglvl = 0
# Initialize handle to data structure
self.pt = np.zeros(64, np.int64)
self._MKL_pt = self.pt.ctypes.data_as(POINTER(c_longlong))
# Initialize parameters
self.iparm = np.zeros(64, dtype=np.int32)
self._MKL_iparm = self.iparm.ctypes.data_as(POINTER(c_int))
# Initialize pardiso
pardisoinit(self._MKL_pt, byref(c_int(self.mtype)), self._MKL_iparm)
verstring = mkl_get_version()
# Set iparm
if '11.3.3' in verstring:
self.iparm[1] = 0
else:
self.iparm[1] = 3 # Use parallel nested dissection for reordering
self.iparm[23] = 1 # Use parallel factorization
self.iparm[34] = 1 # Zero base indexing
self.error = 0
def clear(self):
'''
Clear the memory allocated from the solver.
'''
self.run_pardiso(phase=-1)
def factor(self):
out = self.run_pardiso(phase=12)
def solve(self, rhs):
x = self.run_pardiso(phase=33, rhs=rhs)
return x
def run_pardiso(self, phase, rhs=None):
'''
Run specified phase of the Pardiso solver.
Parameters
----------
phase : int
Flag setting the analysis type of the solver:
- 11 : Analysis
- 12 : Analysis, numerical factorization
- 13 : Analysis, numerical factorization, solve, iterative refinement
- 22 : Numerical factorization
- 23 : Numerical factorization, solve, iterative refinement
- 33 : Solve, iterative refinement
- 331 : like phase=33, but only forward substitution
- 332 : like phase=33, but only diagonal substitution (if available)
- 333 : like phase=33, but only backward substitution
- 0 : Release internal memory for L and U matrix number mnum
- -1 : Release all internal memory for all matrices
rhs : ndarray, optional
Right hand side of the equation `A x = rhs`. Can either be a vector
(array of dimension 1) or a matrix (array of dimension 2). Default
is None.
Returns
-------
x : ndarray
Solution of the system `A x = rhs`, if `rhs` is provided. Is either
a vector or a column matrix.
'''
if rhs is None:
nrhs = 0
x = np.zeros(1)
rhs = np.zeros(1)
else:
if rhs.ndim == 1:
nrhs = 1
elif rhs.ndim == 2:
nrhs = rhs.shape[1]
else:
msg = "Right hand side must either be a 1 or 2 dimensional "+\
"array. Higher order right hand sides are not supported."
raise NotImplementedError(msg)
rhs = rhs.astype(self.dtype).flatten(order='f')
x = np.zeros(nrhs*self.n, dtype=self.dtype)
MKL_rhs = rhs.ctypes.data_as(self.ctypes_dtype)
MKL_x = x.ctypes.data_as(self.ctypes_dtype)
MKL_err = c_int(0)
pardiso(self._MKL_pt, # pt
byref(c_int(self.maxfct)), # maxfct
byref(c_int(self.mnum)), # mnum
byref(c_int(self.mtype)), # mtype
byref(c_int(phase)), # phase
byref(c_int(self.n)), # n
self._MKL_a, # a
self._MKL_ia, # ia
self._MKL_ja, # ja
byref(c_int(self.perm)), # perm
byref(c_int(nrhs)), # nrhs
self._MKL_iparm, # iparm
byref(c_int(self.msglvl)), # msglvl
MKL_rhs, # b
MKL_x, # x
byref(MKL_err)) # error
self.error = MKL_err.value
if self.singularity_check and self.iparm[13] > 0:
raise RuntimeError("Pardiso - Number of perturbed pivot elements = " + repr(self.iparm[13]) + ". This could mean that the matrix is singular.")
if nrhs > 1:
x = x.reshape((self.n, nrhs), order='f')
return x
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Formats and displays profiling information."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import numpy as np
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import profiling
from tensorflow.python.debug.lib import source_utils
RL = debugger_cli_common.RichLine
SORT_OPS_BY_OP_NAME = "node"
SORT_OPS_BY_OP_TYPE = "op_type"
SORT_OPS_BY_OP_TIME = "op_time"
SORT_OPS_BY_EXEC_TIME = "exec_time"
SORT_OPS_BY_START_TIME = "start_time"
SORT_OPS_BY_LINE = "line"
_DEVICE_NAME_FILTER_FLAG = "device_name_filter"
_NODE_NAME_FILTER_FLAG = "node_name_filter"
_OP_TYPE_FILTER_FLAG = "op_type_filter"
class ProfileDataTableView(object):
"""Table View of profiling data."""
def __init__(self, profile_datum_list, time_unit=cli_shared.TIME_UNIT_US):
"""Constructor.
Args:
profile_datum_list: List of `ProfileDatum` objects.
time_unit: must be in cli_shared.TIME_UNITS.
"""
self._profile_datum_list = profile_datum_list
self.formatted_start_time = [
datum.start_time for datum in profile_datum_list]
self.formatted_op_time = [
cli_shared.time_to_readable_str(datum.op_time,
force_time_unit=time_unit)
for datum in profile_datum_list]
self.formatted_exec_time = [
cli_shared.time_to_readable_str(
datum.node_exec_stats.all_end_rel_micros,
force_time_unit=time_unit)
for datum in profile_datum_list]
self._column_names = ["Node",
"Op Type",
"Start Time (us)",
"Op Time (%s)" % time_unit,
"Exec Time (%s)" % time_unit,
"Filename:Lineno(function)"]
self._column_sort_ids = [SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE,
SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME,
SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE]
def value(self,
row,
col,
device_name_filter=None,
node_name_filter=None,
op_type_filter=None):
"""Get the content of a cell of the table.
Args:
row: (int) row index.
col: (int) column index.
device_name_filter: Regular expression to filter by device name.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
Returns:
A debuggre_cli_common.RichLine object representing the content of the
cell, potentially with a clickable MenuItem.
Raises:
IndexError: if row index is out of range.
"""
menu_item = None
if col == 0:
text = self._profile_datum_list[row].node_exec_stats.node_name
elif col == 1:
text = self._profile_datum_list[row].op_type
elif col == 2:
text = str(self.formatted_start_time[row])
elif col == 3:
text = str(self.formatted_op_time[row])
elif col == 4:
text = str(self.formatted_exec_time[row])
elif col == 5:
command = "ps"
if device_name_filter:
command += " --%s %s" % (_DEVICE_NAME_FILTER_FLAG,
device_name_filter)
if node_name_filter:
command += " --%s %s" % (_NODE_NAME_FILTER_FLAG, node_name_filter)
if op_type_filter:
command += " --%s %s" % (_OP_TYPE_FILTER_FLAG, op_type_filter)
command += " %s --init_line %d" % (
self._profile_datum_list[row].file_path,
self._profile_datum_list[row].line_number)
menu_item = debugger_cli_common.MenuItem(None, command)
text = self._profile_datum_list[row].file_line_func
else:
raise IndexError("Invalid column index %d." % col)
return RL(text, font_attr=menu_item)
def row_count(self):
return len(self._profile_datum_list)
def column_count(self):
return len(self._column_names)
def column_names(self):
return self._column_names
def column_sort_id(self, col):
return self._column_sort_ids[col]
def _list_profile_filter(
profile_datum,
node_name_regex,
file_path_regex,
op_type_regex,
op_time_interval,
exec_time_interval,
min_lineno=-1,
max_lineno=-1):
"""Filter function for list_profile command.
Args:
profile_datum: A `ProfileDatum` object.
node_name_regex: Regular expression pattern object to filter by name.
file_path_regex: Regular expression pattern object to filter by file path.
op_type_regex: Regular expression pattern object to filter by op type.
op_time_interval: `Interval` for filtering op time.
exec_time_interval: `Interval` for filtering exec time.
min_lineno: Lower bound for 1-based line number, inclusive.
If <= 0, has no effect.
max_lineno: Upper bound for 1-based line number, exclusive.
If <= 0, has no effect.
# TODO(cais): Maybe filter by function name.
Returns:
True iff profile_datum should be included.
"""
if node_name_regex and not node_name_regex.match(
profile_datum.node_exec_stats.node_name):
return False
if file_path_regex:
if (not profile_datum.file_path or
not file_path_regex.match(profile_datum.file_path)):
return False
if (min_lineno > 0 and profile_datum.line_number and
profile_datum.line_number < min_lineno):
return False
if (max_lineno > 0 and profile_datum.line_number and
profile_datum.line_number >= max_lineno):
return False
if (profile_datum.op_type is not None and op_type_regex and
not op_type_regex.match(profile_datum.op_type)):
return False
if op_time_interval is not None and not op_time_interval.contains(
profile_datum.op_time):
return False
if exec_time_interval and not exec_time_interval.contains(
profile_datum.node_exec_stats.all_end_rel_micros):
return False
return True
def _list_profile_sort_key(profile_datum, sort_by):
"""Get a profile_datum property to sort by in list_profile command.
Args:
profile_datum: A `ProfileDatum` object.
sort_by: (string) indicates a value to sort by.
Must be one of SORT_BY* constants.
Returns:
profile_datum property to sort by.
"""
if sort_by == SORT_OPS_BY_OP_NAME:
return profile_datum.node_exec_stats.node_name
elif sort_by == SORT_OPS_BY_OP_TYPE:
return profile_datum.op_type
elif sort_by == SORT_OPS_BY_LINE:
return profile_datum.file_line_func
elif sort_by == SORT_OPS_BY_OP_TIME:
return profile_datum.op_time
elif sort_by == SORT_OPS_BY_EXEC_TIME:
return profile_datum.node_exec_stats.all_end_rel_micros
else: # sort by start time
return profile_datum.node_exec_stats.all_start_micros
class ProfileAnalyzer(object):
"""Analyzer for profiling data."""
def __init__(self, graph, run_metadata):
"""ProfileAnalyzer constructor.
Args:
graph: (tf.Graph) Python graph object.
run_metadata: A `RunMetadata` protobuf object.
Raises:
ValueError: If run_metadata is None.
"""
self._graph = graph
if not run_metadata:
raise ValueError("No RunMetadata passed for profile analysis.")
self._run_metadata = run_metadata
self._arg_parsers = {}
ap = argparse.ArgumentParser(
description="List nodes profile information.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-d",
"--%s" % _DEVICE_NAME_FILTER_FLAG,
dest=_DEVICE_NAME_FILTER_FLAG,
type=str,
default="",
help="filter device name by regex.")
ap.add_argument(
"-n",
"--%s" % _NODE_NAME_FILTER_FLAG,
dest=_NODE_NAME_FILTER_FLAG,
type=str,
default="",
help="filter node name by regex.")
ap.add_argument(
"-t",
"--%s" % _OP_TYPE_FILTER_FLAG,
dest=_OP_TYPE_FILTER_FLAG,
type=str,
default="",
help="filter op type by regex.")
# TODO(annarev): allow file filtering at non-stack top position.
ap.add_argument(
"-f",
"--file_path_filter",
dest="file_path_filter",
type=str,
default="",
help="filter by file name at the top position of node's creation "
"stack that does not belong to TensorFlow library.")
ap.add_argument(
"--min_lineno",
dest="min_lineno",
type=int,
default=-1,
help="(Inclusive) lower bound for 1-based line number in source file. "
"If <= 0, has no effect.")
ap.add_argument(
"--max_lineno",
dest="max_lineno",
type=int,
default=-1,
help="(Exclusive) upper bound for 1-based line number in source file. "
"If <= 0, has no effect.")
ap.add_argument(
"-e",
"--execution_time",
dest="execution_time",
type=str,
default="",
help="Filter by execution time interval "
"(includes compute plus pre- and post -processing time). "
"Supported units are s, ms and us (default). "
"E.g. -e >100s, -e <100, -e [100us,1000ms]")
ap.add_argument(
"-o",
"--op_time",
dest="op_time",
type=str,
default="",
help="Filter by op time interval (only includes compute time). "
"Supported units are s, ms and us (default). "
"E.g. -e >100s, -e <100, -e [100us,1000ms]")
ap.add_argument(
"-s",
"--sort_by",
dest="sort_by",
type=str,
default=SORT_OPS_BY_START_TIME,
help=("the field to sort the data by: (%s)" %
" | ".join([SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE,
SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME,
SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE])))
ap.add_argument(
"-r",
"--reverse",
dest="reverse",
action="store_true",
help="sort the data in reverse (descending) order")
ap.add_argument(
"--time_unit",
dest="time_unit",
type=str,
default=cli_shared.TIME_UNIT_US,
help="Time unit (" + " | ".join(cli_shared.TIME_UNITS) + ")")
self._arg_parsers["list_profile"] = ap
ap = argparse.ArgumentParser(
description="Print a Python source file with line-level profile "
"information",
usage=argparse.SUPPRESS)
ap.add_argument(
"source_file_path",
type=str,
help="Path to the source_file_path")
ap.add_argument(
"--cost_type",
type=str,
choices=["exec_time", "op_time"],
default="exec_time",
help="Type of cost to display")
ap.add_argument(
"--time_unit",
dest="time_unit",
type=str,
default=cli_shared.TIME_UNIT_US,
help="Time unit (" + " | ".join(cli_shared.TIME_UNITS) + ")")
ap.add_argument(
"-d",
"--%s" % _DEVICE_NAME_FILTER_FLAG,
dest=_DEVICE_NAME_FILTER_FLAG,
type=str,
default="",
help="Filter device name by regex.")
ap.add_argument(
"-n",
"--%s" % _NODE_NAME_FILTER_FLAG,
dest=_NODE_NAME_FILTER_FLAG,
type=str,
default="",
help="Filter node name by regex.")
ap.add_argument(
"-t",
"--%s" % _OP_TYPE_FILTER_FLAG,
dest=_OP_TYPE_FILTER_FLAG,
type=str,
default="",
help="Filter op type by regex.")
ap.add_argument(
"--init_line",
dest="init_line",
type=int,
default=0,
help="The 1-based line number to scroll to initially.")
self._arg_parsers["print_source"] = ap
def list_profile(self, args, screen_info=None):
"""Command handler for list_profile.
List per-operation profile information.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
screen_cols = 80
if screen_info and "cols" in screen_info:
screen_cols = screen_info["cols"]
parsed = self._arg_parsers["list_profile"].parse_args(args)
op_time_interval = (command_parser.parse_time_interval(parsed.op_time)
if parsed.op_time else None)
exec_time_interval = (
command_parser.parse_time_interval(parsed.execution_time)
if parsed.execution_time else None)
node_name_regex = (re.compile(parsed.node_name_filter)
if parsed.node_name_filter else None)
file_path_regex = (re.compile(parsed.file_path_filter)
if parsed.file_path_filter else None)
op_type_regex = (re.compile(parsed.op_type_filter)
if parsed.op_type_filter else None)
output = debugger_cli_common.RichTextLines([""])
device_name_regex = (re.compile(parsed.device_name_filter)
if parsed.device_name_filter else None)
data_generator = self._get_profile_data_generator()
device_count = len(self._run_metadata.step_stats.dev_stats)
for index in range(device_count):
device_stats = self._run_metadata.step_stats.dev_stats[index]
if not device_name_regex or device_name_regex.match(device_stats.device):
profile_data = [
datum for datum in data_generator(device_stats)
if _list_profile_filter(
datum, node_name_regex, file_path_regex, op_type_regex,
op_time_interval, exec_time_interval,
min_lineno=parsed.min_lineno, max_lineno=parsed.max_lineno)]
profile_data = sorted(
profile_data,
key=lambda datum: _list_profile_sort_key(datum, parsed.sort_by),
reverse=parsed.reverse)
output.extend(
self._get_list_profile_lines(
device_stats.device, index, device_count,
profile_data, parsed.sort_by, parsed.reverse, parsed.time_unit,
device_name_filter=parsed.device_name_filter,
node_name_filter=parsed.node_name_filter,
op_type_filter=parsed.op_type_filter,
screen_cols=screen_cols))
return output
def _get_profile_data_generator(self):
"""Get function that generates `ProfileDatum` objects.
Returns:
A function that generates `ProfileDatum` objects.
"""
node_to_file_path = {}
node_to_line_number = {}
node_to_func_name = {}
node_to_op_type = {}
for op in self._graph.get_operations():
for trace_entry in reversed(op.traceback):
file_path = trace_entry[0]
line_num = trace_entry[1]
func_name = trace_entry[2]
if not source_utils.guess_is_tensorflow_py_library(file_path):
break
node_to_file_path[op.name] = file_path
node_to_line_number[op.name] = line_num
node_to_func_name[op.name] = func_name
node_to_op_type[op.name] = op.type
def profile_data_generator(device_step_stats):
for node_stats in device_step_stats.node_stats:
if node_stats.node_name == "_SOURCE" or node_stats.node_name == "_SINK":
continue
yield profiling.ProfileDatum(
device_step_stats.device,
node_stats,
node_to_file_path.get(node_stats.node_name, ""),
node_to_line_number.get(node_stats.node_name, 0),
node_to_func_name.get(node_stats.node_name, ""),
node_to_op_type.get(node_stats.node_name, ""))
return profile_data_generator
def _get_list_profile_lines(
self, device_name, device_index, device_count,
profile_datum_list, sort_by, sort_reverse, time_unit,
device_name_filter=None, node_name_filter=None, op_type_filter=None,
screen_cols=80):
"""Get `RichTextLines` object for list_profile command for a given device.
Args:
device_name: (string) Device name.
device_index: (int) Device index.
device_count: (int) Number of devices.
profile_datum_list: List of `ProfileDatum` objects.
sort_by: (string) Identifier of column to sort. Sort identifier
must match value of SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE,
SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_MEMORY or SORT_OPS_BY_LINE.
sort_reverse: (bool) Whether to sort in descending instead of default
(ascending) order.
time_unit: time unit, must be in cli_shared.TIME_UNITS.
device_name_filter: Regular expression to filter by device name.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
screen_cols: (int) Number of columns available on the screen (i.e.,
available screen width).
Returns:
`RichTextLines` object containing a table that displays profiling
information for each op.
"""
profile_data = ProfileDataTableView(profile_datum_list, time_unit=time_unit)
# Calculate total time early to calculate column widths.
total_op_time = sum(datum.op_time for datum in profile_datum_list)
total_exec_time = sum(datum.node_exec_stats.all_end_rel_micros
for datum in profile_datum_list)
device_total_row = [
"Device Total", "",
cli_shared.time_to_readable_str(total_op_time,
force_time_unit=time_unit),
cli_shared.time_to_readable_str(total_exec_time,
force_time_unit=time_unit)]
# Calculate column widths.
column_widths = [
len(column_name) for column_name in profile_data.column_names()]
for col in range(len(device_total_row)):
column_widths[col] = max(column_widths[col], len(device_total_row[col]))
for col in range(len(column_widths)):
for row in range(profile_data.row_count()):
column_widths[col] = max(
column_widths[col], len(profile_data.value(
row,
col,
device_name_filter=device_name_filter,
node_name_filter=node_name_filter,
op_type_filter=op_type_filter)))
column_widths[col] += 2 # add margin between columns
# Add device name.
output = [RL("-" * screen_cols)]
device_row = "Device %d of %d: %s" % (
device_index + 1, device_count, device_name)
output.append(RL(device_row))
output.append(RL())
# Add headers.
base_command = "list_profile"
row = RL()
for col in range(profile_data.column_count()):
column_name = profile_data.column_names()[col]
sort_id = profile_data.column_sort_id(col)
command = "%s -s %s" % (base_command, sort_id)
if sort_by == sort_id and not sort_reverse:
command += " -r"
head_menu_item = debugger_cli_common.MenuItem(None, command)
row += RL(column_name, font_attr=[head_menu_item, "bold"])
row += RL(" " * (column_widths[col] - len(column_name)))
output.append(row)
# Add data rows.
for row in range(profile_data.row_count()):
new_row = RL()
for col in range(profile_data.column_count()):
new_cell = profile_data.value(
row,
col,
device_name_filter=device_name_filter,
node_name_filter=node_name_filter,
op_type_filter=op_type_filter)
new_row += new_cell
new_row += RL(" " * (column_widths[col] - len(new_cell)))
output.append(new_row)
# Add stat totals.
row_str = ""
for col in range(len(device_total_row)):
row_str += ("{:<%d}" % column_widths[col]).format(device_total_row[col])
output.append(RL())
output.append(RL(row_str))
return debugger_cli_common.rich_text_lines_from_rich_line_list(output)
def _measure_list_profile_column_widths(self, profile_data):
"""Determine the maximum column widths for each data list.
Args:
profile_data: list of ProfileDatum objects.
Returns:
List of column widths in the same order as columns in data.
"""
num_columns = len(profile_data.column_names())
widths = [len(column_name) for column_name in profile_data.column_names()]
for row in range(profile_data.row_count()):
for col in range(num_columns):
widths[col] = max(
widths[col], len(str(profile_data.row_values(row)[col])) + 2)
return widths
_LINE_COST_ATTR = cli_shared.COLOR_CYAN
_LINE_NUM_ATTR = cli_shared.COLOR_YELLOW
_NUM_NODES_HEAD = "#nodes"
_NUM_EXECS_SUB_HEAD = "(#execs)"
_LINENO_HEAD = "lineno"
_SOURCE_HEAD = "source"
def print_source(self, args, screen_info=None):
"""Print a Python source file with line-level profile information.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
del screen_info
parsed = self._arg_parsers["print_source"].parse_args(args)
device_name_regex = (re.compile(parsed.device_name_filter)
if parsed.device_name_filter else None)
profile_data = []
data_generator = self._get_profile_data_generator()
device_count = len(self._run_metadata.step_stats.dev_stats)
for index in range(device_count):
device_stats = self._run_metadata.step_stats.dev_stats[index]
if device_name_regex and not device_name_regex.match(device_stats.device):
continue
profile_data.extend([datum for datum in data_generator(device_stats)])
source_annotation = source_utils.annotate_source_against_profile(
profile_data,
os.path.expanduser(parsed.source_file_path),
node_name_filter=parsed.node_name_filter,
op_type_filter=parsed.op_type_filter)
if not source_annotation:
return debugger_cli_common.RichTextLines(
["The source file %s does not contain any profile information for "
"the previous Session run under the following "
"filters:" % parsed.source_file_path,
" --%s: %s" % (_DEVICE_NAME_FILTER_FLAG, parsed.device_name_filter),
" --%s: %s" % (_NODE_NAME_FILTER_FLAG, parsed.node_name_filter),
" --%s: %s" % (_OP_TYPE_FILTER_FLAG, parsed.op_type_filter)])
max_total_cost = 0
for line_index in source_annotation:
total_cost = self._get_total_cost(source_annotation[line_index],
parsed.cost_type)
max_total_cost = max(max_total_cost, total_cost)
source_lines, line_num_width = source_utils.load_source(
parsed.source_file_path)
cost_bar_max_length = 10
total_cost_head = parsed.cost_type
column_widths = {
"cost_bar": cost_bar_max_length + 3,
"total_cost": len(total_cost_head) + 3,
"num_nodes_execs": len(self._NUM_EXECS_SUB_HEAD) + 1,
"line_number": line_num_width,
}
head = RL(
" " * column_widths["cost_bar"] +
total_cost_head +
" " * (column_widths["total_cost"] - len(total_cost_head)) +
self._NUM_NODES_HEAD +
" " * (column_widths["num_nodes_execs"] - len(self._NUM_NODES_HEAD)),
font_attr=self._LINE_COST_ATTR)
head += RL(self._LINENO_HEAD, font_attr=self._LINE_NUM_ATTR)
sub_head = RL(
" " * (column_widths["cost_bar"] +
column_widths["total_cost"]) +
self._NUM_EXECS_SUB_HEAD +
" " * (column_widths["num_nodes_execs"] -
len(self._NUM_EXECS_SUB_HEAD)) +
" " * column_widths["line_number"],
font_attr=self._LINE_COST_ATTR)
sub_head += RL(self._SOURCE_HEAD, font_attr="bold")
lines = [head, sub_head]
output_annotations = {}
for i, line in enumerate(source_lines):
lineno = i + 1
if lineno in source_annotation:
annotation = source_annotation[lineno]
cost_bar = self._render_normalized_cost_bar(
self._get_total_cost(annotation, parsed.cost_type), max_total_cost,
cost_bar_max_length)
annotated_line = cost_bar
annotated_line += " " * (column_widths["cost_bar"] - len(cost_bar))
total_cost = RL(cli_shared.time_to_readable_str(
self._get_total_cost(annotation, parsed.cost_type),
force_time_unit=parsed.time_unit),
font_attr=self._LINE_COST_ATTR)
total_cost += " " * (column_widths["total_cost"] - len(total_cost))
annotated_line += total_cost
file_path_filter = re.escape(parsed.source_file_path) + "$"
command = "lp --file_path_filter %s --min_lineno %d --max_lineno %d" % (
file_path_filter, lineno, lineno + 1)
if parsed.device_name_filter:
command += " --%s %s" % (_DEVICE_NAME_FILTER_FLAG,
parsed.device_name_filter)
if parsed.node_name_filter:
command += " --%s %s" % (_NODE_NAME_FILTER_FLAG,
parsed.node_name_filter)
if parsed.op_type_filter:
command += " --%s %s" % (_OP_TYPE_FILTER_FLAG,
parsed.op_type_filter)
menu_item = debugger_cli_common.MenuItem(None, command)
num_nodes_execs = RL("%d(%d)" % (annotation.node_count,
annotation.node_exec_count),
font_attr=[self._LINE_COST_ATTR, menu_item])
num_nodes_execs += " " * (
column_widths["num_nodes_execs"] - len(num_nodes_execs))
annotated_line += num_nodes_execs
else:
annotated_line = RL(
" " * sum(column_widths[col_name] for col_name in column_widths
if col_name != "line_number"))
line_num_column = RL(" L%d" % (lineno), self._LINE_NUM_ATTR)
line_num_column += " " * (
column_widths["line_number"] - len(line_num_column))
annotated_line += line_num_column
annotated_line += line
lines.append(annotated_line)
if parsed.init_line == lineno:
output_annotations[
debugger_cli_common.INIT_SCROLL_POS_KEY] = len(lines) - 1
return debugger_cli_common.rich_text_lines_from_rich_line_list(
lines, annotations=output_annotations)
def _get_total_cost(self, aggregated_profile, cost_type):
if cost_type == "exec_time":
return aggregated_profile.total_exec_time
elif cost_type == "op_time":
return aggregated_profile.total_op_time
else:
raise ValueError("Unsupported cost type: %s" % cost_type)
def _render_normalized_cost_bar(self, cost, max_cost, length):
"""Render a text bar representing a normalized cost.
Args:
cost: the absolute value of the cost.
max_cost: the maximum cost value to normalize the absolute cost with.
length: (int) length of the cost bar, in number of characters, excluding
the brackets on the two ends.
Returns:
An instance of debugger_cli_common.RichTextLine.
"""
num_ticks = int(np.ceil(float(cost) / max_cost * length))
num_ticks = num_ticks or 1 # Minimum is 1 tick.
output = RL("[", font_attr=self._LINE_COST_ATTR)
output += RL("|" * num_ticks + " " * (length - num_ticks),
font_attr=["bold", self._LINE_COST_ATTR])
output += RL("]", font_attr=self._LINE_COST_ATTR)
return output
def get_help(self, handler_name):
return self._arg_parsers[handler_name].format_help()
def create_profiler_ui(graph,
run_metadata,
ui_type="curses",
on_ui_exit=None,
config=None):
"""Create an instance of CursesUI based on a `tf.Graph` and `RunMetadata`.
Args:
graph: Python `Graph` object.
run_metadata: A `RunMetadata` protobuf object.
ui_type: (str) requested UI type, e.g., "curses", "readline".
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
config: An instance of `cli_config.CLIConfig`.
Returns:
(base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer
commands and tab-completions registered.
"""
del config # Currently unused.
analyzer = ProfileAnalyzer(graph, run_metadata)
cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit)
cli.register_command_handler(
"list_profile",
analyzer.list_profile,
analyzer.get_help("list_profile"),
prefix_aliases=["lp"])
cli.register_command_handler(
"print_source",
analyzer.print_source,
analyzer.get_help("print_source"),
prefix_aliases=["ps"])
return cli
|
|
"""SCons.Executor
A module for executing actions with specific lists of target and source
Nodes.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import string
from SCons.Debug import logInstanceCreation
import SCons.Memoize
class Executor:
"""A class for controlling instances of executing an action.
This largely exists to hold a single association of an action,
environment, list of environment override dictionaries, targets
and sources for later processing as needed.
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self, action, env=None, overridelist=[{}],
targets=[], sources=[], builder_kw={}):
if __debug__: logInstanceCreation(self, 'Executor.Executor')
self.set_action_list(action)
self.pre_actions = []
self.post_actions = []
self.env = env
self.overridelist = overridelist
self.targets = targets
self.sources = sources[:]
self.builder_kw = builder_kw
self._memo = {}
def set_action_list(self, action):
import SCons.Util
if not SCons.Util.is_List(action):
if not action:
import SCons.Errors
raise SCons.Errors.UserError, "Executor must have an action."
action = [action]
self.action_list = action
def get_action_list(self):
return self.pre_actions + self.action_list + self.post_actions
def get_build_env(self):
"""Fetch or create the appropriate build Environment
for this Executor.
"""
# Create the build environment instance with appropriate
# overrides. These get evaluated against the current
# environment's construction variables so that users can
# add to existing values by referencing the variable in
# the expansion.
overrides = {}
for odict in self.overridelist:
overrides.update(odict)
import SCons.Defaults
env = self.env or SCons.Defaults.DefaultEnvironment()
build_env = env.Override(overrides)
return build_env
def get_build_scanner_path(self, scanner):
"""Fetch the scanner path for this executor's targets
and sources.
"""
env = self.get_build_env()
try:
cwd = self.targets[0].cwd
except (IndexError, AttributeError):
cwd = None
return scanner.path(env, cwd, self.targets, self.sources)
def get_kw(self, kw={}):
result = self.builder_kw.copy()
result.update(kw)
return result
def do_nothing(self, target, exitstatfunc, kw):
pass
def do_execute(self, target, exitstatfunc, kw):
"""Actually execute the action list."""
env = self.get_build_env()
kw = self.get_kw(kw)
for act in self.get_action_list():
apply(act,
(self.targets, self.sources, env, exitstatfunc),
kw)
# use extra indirection because with new-style objects (Python 2.2
# and above) we can't override special methods, and nullify() needs
# to be able to do this.
def __call__(self, target, exitstatfunc, **kw):
self.do_execute(target, exitstatfunc, kw)
def cleanup(self):
self._memo = {}
def add_sources(self, sources):
"""Add source files to this Executor's list. This is necessary
for "multi" Builders that can be called repeatedly to build up
a source file list for a given target."""
slist = filter(lambda x, s=self.sources: x not in s, sources)
self.sources.extend(slist)
def add_pre_action(self, action):
self.pre_actions.append(action)
def add_post_action(self, action):
self.post_actions.append(action)
# another extra indirection for new-style objects and nullify...
def my_str(self):
env = self.get_build_env()
get = lambda action, t=self.targets, s=self.sources, e=env: \
action.genstring(t, s, e)
return string.join(map(get, self.get_action_list()), "\n")
def __str__(self):
return self.my_str()
def nullify(self):
self.cleanup()
self.do_execute = self.do_nothing
self.my_str = lambda S=self: ''
memoizer_counters.append(SCons.Memoize.CountValue('get_contents'))
def get_contents(self):
"""Fetch the signature contents. This is the main reason this
class exists, so we can compute this once and cache it regardless
of how many target or source Nodes there are.
"""
try:
return self._memo['get_contents']
except KeyError:
pass
env = self.get_build_env()
get = lambda action, t=self.targets, s=self.sources, e=env: \
action.get_contents(t, s, e)
result = string.join(map(get, self.get_action_list()), "")
self._memo['get_contents'] = result
return result
def get_timestamp(self):
"""Fetch a time stamp for this Executor. We don't have one, of
course (only files do), but this is the interface used by the
timestamp module.
"""
return 0
def scan_targets(self, scanner):
self.scan(scanner, self.targets)
def scan_sources(self, scanner):
if self.sources:
self.scan(scanner, self.sources)
def scan(self, scanner, node_list):
"""Scan a list of this Executor's files (targets or sources) for
implicit dependencies and update all of the targets with them.
This essentially short-circuits an N*M scan of the sources for
each individual target, which is a hell of a lot more efficient.
"""
map(lambda N: N.disambiguate(), node_list)
env = self.get_build_env()
select_specific_scanner = lambda t: (t[0], t[1].select(t[0]))
remove_null_scanners = lambda t: not t[1] is None
add_scanner_path = lambda t, s=self: \
(t[0], t[1], s.get_build_scanner_path(t[1]))
if scanner:
scanner_list = map(lambda n, s=scanner: (n, s), node_list)
else:
kw = self.get_kw()
get_initial_scanners = lambda n, e=env, kw=kw: \
(n, n.get_env_scanner(e, kw))
scanner_list = map(get_initial_scanners, node_list)
scanner_list = filter(remove_null_scanners, scanner_list)
scanner_list = map(select_specific_scanner, scanner_list)
scanner_list = filter(remove_null_scanners, scanner_list)
scanner_path_list = map(add_scanner_path, scanner_list)
deps = []
for node, scanner, path in scanner_path_list:
deps.extend(node.get_implicit_deps(env, scanner, path))
for tgt in self.targets:
tgt.add_to_implicit(deps)
def get_missing_sources(self):
"""
"""
return filter(lambda s: s.missing(), self.sources)
def _get_unignored_sources_key(self, ignore=()):
return tuple(ignore)
memoizer_counters.append(SCons.Memoize.CountDict('get_unignored_sources', _get_unignored_sources_key))
def get_unignored_sources(self, ignore=()):
ignore = tuple(ignore)
try:
memo_dict = self._memo['get_unignored_sources']
except KeyError:
memo_dict = {}
self._memo['get_unignored_sources'] = memo_dict
else:
try:
return memo_dict[ignore]
except KeyError:
pass
sourcelist = self.sources
if ignore:
sourcelist = filter(lambda s, i=ignore: not s in i, sourcelist)
memo_dict[ignore] = sourcelist
return sourcelist
def _process_sources_key(self, func, ignore=()):
return (func, tuple(ignore))
memoizer_counters.append(SCons.Memoize.CountDict('process_sources', _process_sources_key))
def process_sources(self, func, ignore=()):
memo_key = (func, tuple(ignore))
try:
memo_dict = self._memo['process_sources']
except KeyError:
memo_dict = {}
self._memo['process_sources'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
result = map(func, self.get_unignored_sources(ignore))
memo_dict[memo_key] = result
return result
_Executor = Executor
class Null(_Executor):
"""A null Executor, with a null build Environment, that does
nothing when the rest of the methods call it.
This might be able to disapper when we refactor things to
disassociate Builders from Nodes entirely, so we're not
going to worry about unit tests for this--at least for now.
"""
def __init__(self, *args, **kw):
if __debug__: logInstanceCreation(self, 'Executor.Null')
kw['action'] = []
apply(_Executor.__init__, (self,), kw)
def get_build_env(self):
class NullEnvironment:
def get_scanner(self, key):
return None
return NullEnvironment()
def get_build_scanner_path(self):
return None
def cleanup(self):
pass
|