input
stringlengths 0
2.17k
| instruction
stringlengths 18
2.94k
| output
stringlengths 47
3.36k
|
---|---|---|
from openedoo_project import db
from openedoo.core.libs import Blueprint
from .controllers.employee import EmployeeLogin, EmployeeLogout, AddEmployee, \
AssignEmployeeAsTeacher, EmployeeDashboard, EditEmployee, DeleteEmployee, \
SearchEmployee, AddSubject
module_employee = Blueprint('module_employee', __name__,
template_folder='templates',
static_folder='static')
module_employee.add_url_rule('/admin/dashboard',
view_func=EmployeeDashboard.as_view('dashboard'))
module_employee.add_url_rule('/admin/login',
view_func=EmployeeLogin.as_view('login'))
module_employee.add_url_rule('/admin/logout',
view_func=EmployeeLogout.as_view('logout'))
module_employee.add_url_rule('/admin/add',
view_func=AddEmployee.as_view('add'))
module_employee.add_url_rule('/admin/edit',
view_func=EditEmployee.as_view('edit'))
assignEmployeeAsTeacherView = AssignEmployeeAsTeacher.as_view('assign')
module_employee.add_url_rule('/admin/assign',
view_func=assignEmployeeAsTeacherView)
module_employee.add_url_rule('/admin/delete',
view_func=DeleteEmployee.as_view('delete'))
module_employee.add_url_rule('/search',
view_func=SearchEmployee.as_view('search'))
module_employee.add_url_rule('/admin/subject/add',
view_func=AddSubject.as_view('add_subject'))
| Make dashboard route become admin's default
| from openedoo_project import db
from openedoo.core.libs import Blueprint
from .controllers.employee import EmployeeLogin, EmployeeLogout, AddEmployee, \
AssignEmployeeAsTeacher, EmployeeDashboard, EditEmployee, DeleteEmployee, \
SearchEmployee, AddSubject
module_employee = Blueprint('module_employee', __name__,
template_folder='templates',
static_folder='static')
module_employee.add_url_rule('/admin',
view_func=EmployeeDashboard.as_view('dashboard'))
module_employee.add_url_rule('/admin/login',
view_func=EmployeeLogin.as_view('login'))
module_employee.add_url_rule('/admin/logout',
view_func=EmployeeLogout.as_view('logout'))
module_employee.add_url_rule('/admin/add',
view_func=AddEmployee.as_view('add'))
module_employee.add_url_rule('/admin/edit',
view_func=EditEmployee.as_view('edit'))
assignEmployeeAsTeacherView = AssignEmployeeAsTeacher.as_view('assign')
module_employee.add_url_rule('/admin/assign',
view_func=assignEmployeeAsTeacherView)
module_employee.add_url_rule('/admin/delete',
view_func=DeleteEmployee.as_view('delete'))
module_employee.add_url_rule('/search',
view_func=SearchEmployee.as_view('search'))
module_employee.add_url_rule('/admin/subject/add',
view_func=AddSubject.as_view('add_subject'))
|
"""
WSGI config for oabutton project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "oabutton.settings")
from django.core.wsgi import get_wsgi_application
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from dj_static import Cling
application = Cling(get_wsgi_application())
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| Enable Django secure mode in WSGI module
| """
WSGI config for oabutton project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "oabutton.settings")
# Enable Django secure mode (see http://security.stackexchange.com/a/8970)
os.environ["HTTPS"] = "on"
from django.core.wsgi import get_wsgi_application
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from dj_static import Cling
application = Cling(get_wsgi_application())
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
from .mdp import MDP
from .game_mdp import GameMDP
from ..utils import utility
class FixedGameMDP(GameMDP):
def __init__(self, game, opp_player, opp_idx):
'''
opp_player: the opponent player
opp_idx: the idx of the opponent player in the game
'''
self._game = game
self._opp_player = opp_player
self._opp_idx = opp_idx
self._agent_idx = opp_idx ^ 1
self._states = {}
def reward(self, game, move, next_game):
return utility(next_game, self._agent_idx) if next_game.is_over() else 0
def start_state(self):
new_game = self._game.copy()
if not new_game.is_over() and new_game.cur_player() == self._opp_idx:
chosen_move = self._opp_player.choose_move(new_game)
new_game.make_move(chosen_move)
return new_game
def transitions(self, game, move):
if game.is_over():
return []
new_game = game.copy().make_move(move)
if not new_game.is_over() and new_game.cur_player() == self._opp_idx:
chosen_move = self._opp_player.choose_move(new_game)
new_game.make_move(chosen_move)
return [(new_game, 1.0)]
| Call super __init__ in GameMDP
| from .mdp import MDP
from .game_mdp import GameMDP
from ..utils import utility
class FixedGameMDP(GameMDP):
def __init__(self, game, opp_player, opp_idx):
'''
opp_player: the opponent player
opp_idx: the idx of the opponent player in the game
'''
super(FixedGameMDP, self).__init__(game)
self._opp_player = opp_player
self._opp_idx = opp_idx
self._agent_idx = opp_idx ^ 1
def reward(self, game, move, next_game):
return utility(next_game, self._agent_idx) if next_game.is_over() else 0
def start_state(self):
new_game = self._game.copy()
if not new_game.is_over() and new_game.cur_player() == self._opp_idx:
chosen_move = self._opp_player.choose_move(new_game)
new_game.make_move(chosen_move)
return new_game
def transitions(self, game, move):
if game.is_over():
return []
new_game = game.copy().make_move(move)
if not new_game.is_over() and new_game.cur_player() == self._opp_idx:
chosen_move = self._opp_player.choose_move(new_game)
new_game.make_move(chosen_move)
return [(new_game, 1.0)]
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Thomas Bechtold <tbechtold@suse.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List # noqa: F401, pylint: disable=unused-import
import tarfile
import zipfile
def _get_archive_filelist(filename):
# type: (str) -> List[str]
names = [] # type: List[str]
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as tar_file:
names = sorted(tar_file.getnames())
elif zipfile.is_zipfile(filename):
with zipfile.ZipFile(filename) as zip_file:
names = sorted(zip_file.namelist())
else:
raise Exception("Can not get filenames from '%s'. "
"Not a tar or zip file" % filename)
if "./" in names:
names.remove("./")
return names
| Raise a ValueError from _get_archive_filelist instead of Exception
Raising the Exception base class is considered bad style, as the more
specialized child classes carry more information about the kind of error that
occurred. And often no-one actually tries to catch the Exception class.
| # -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Thomas Bechtold <tbechtold@suse.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List # noqa: F401, pylint: disable=unused-import
import tarfile
import zipfile
def _get_archive_filelist(filename):
# type: (str) -> List[str]
names = [] # type: List[str]
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as tar_file:
names = sorted(tar_file.getnames())
elif zipfile.is_zipfile(filename):
with zipfile.ZipFile(filename) as zip_file:
names = sorted(zip_file.namelist())
else:
raise ValueError("Can not get filenames from '{!s}'. "
"Not a tar or zip file".format(filename))
if "./" in names:
names.remove("./")
return names
|
import sys
from labonneboite.importer import util as import_util
from labonneboite.importer import settings
if __name__ == "__main__":
filename = import_util.detect_runnable_file("etablissements")
if filename:
with open(settings.JENKINS_ETAB_PROPERTIES_FILENAME, "w") as f:
f.write("LBB_ETABLISSEMENT_INPUT_FILE=%s\n" % filename)
sys.exit(0)
else:
sys.exit(-1)
| Add a run method for the entry point
| import sys
from labonneboite.importer import util as import_util
from labonneboite.importer import settings
def run():
filename = import_util.detect_runnable_file("etablissements")
if filename:
with open(settings.JENKINS_ETAB_PROPERTIES_FILENAME, "w") as f:
f.write("LBB_ETABLISSEMENT_INPUT_FILE=%s\n" % filename)
sys.exit(0)
else:
sys.exit(-1)
if __name__ == '__main__':
run() |
#!/usr/bin/python2
from Crypto.Cipher import AES
import os, sys
flag = open("flag", "r").read()
key = open("key", "r").read().strip()
welcome = """
{{welcome_message}}
"""
def encrypt():
cipher = AES.new(key.decode('hex'), AES.MODE_ECB)
return cipher.encrypt(flag).encode("hex")
# flush output immediately
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
print welcome
print "KEY: " + key
print "MESSAGE: " + encrypt()
| Fix bug in problem caused by flag file having new lines
| #!/usr/bin/python2
from Crypto.Cipher import AES
import os, sys
flag = open("flag", "r").read().strip()
key = open("key", "r").read().strip()
welcome = """
{{welcome_message}}
"""
def encrypt():
cipher = AES.new(key.decode('hex'), AES.MODE_ECB)
return cipher.encrypt(flag).encode("hex")
# flush output immediately
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
print welcome
print "KEY: " + key
print "MESSAGE: " + encrypt()
|
# Tweepy
# Copyright 2009-2022 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy.asynchronoous
Asynchronous interfaces with the Twitter API
"""
try:
import aiohttp
import oauthlib
except ModuleNotFoundError:
from tweepy.errors import TweepyException
raise TweepyException(
"tweepy.asynchronous requires aiohttp and oauthlib to be installed"
)
from tweepy.asynchronous.streaming import AsyncStream
from tweepy.asynchronous.client import AsyncClient
| Check for async_lru when importing asynchronous subpackage
| # Tweepy
# Copyright 2009-2022 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy.asynchronoous
Asynchronous interfaces with the Twitter API
"""
try:
import aiohttp
import async_lru
import oauthlib
except ModuleNotFoundError:
from tweepy.errors import TweepyException
raise TweepyException(
"tweepy.asynchronous requires aiohttp, async_lru, and oauthlib to be "
"installed"
)
from tweepy.asynchronous.streaming import AsyncStream
from tweepy.asynchronous.client import AsyncClient
|
import time
import os
from tweepy import API
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
from credentials import *
class listener(StreamListener):
def __init__(self, api, start_time, time_limit=60):
self.time = start_time
self.limit = time_limit
self.tweet_data = []
self.api = api
def on_error(self, error):
print("Returned error code %s" % error)
return False
def on_status(self, status):
print(status.text)
if __name__ == "__main__":
start_time = time.time() # grabs the system time
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = API(auth)
twitterStream = Stream(auth, listener(api, start_time, time_limit=20)) # initialize Stream object with a time out limit
twitterStream.filter(follow=['25073877'],async=True)
| Remove unused code + follow only specific user status
|
from time import ctime
from tweepy import API
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
from credentials import *
from tweepy.utils import import_simplejson
json = import_simplejson()
class listener(StreamListener):
def __init__(self, api, followed_user):
self.tweet_data = []
self.api = api
self.followed_user = followed_user
def on_error(self, error):
print("Returned error code %s" % error)
return False
def on_status(self, status):
if status.user.id == self.followed_user:
print("Tweeting at %s" % ctime())
if __name__ == "__main__":
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = API(auth)
followed_user = 25073877
twitterStream = Stream(auth, listener(api, followed_user))
twitterStream.filter(follow=[str(followed_user)], async=True)
|
"""Geometry classes and factories
"""
from geo import box, shape, asShape, mapping
from point import Point, asPoint
from linestring import LineString, asLineString
from polygon import Polygon, asPolygon
from multipoint import MultiPoint, asMultiPoint
from multilinestring import MultiLineString, asMultiLineString
from multipolygon import MultiPolygon, asMultiPolygon
from collection import GeometryCollection
__all__ = [
'box', 'shape', 'asShape', 'Point', 'asPoint', 'LineString', 'asLineString',
'Polygon', 'asPolygon', 'MultiPoint', 'asMultiPoint',
'MultiLineString', 'asMultiLineString', 'MultiPolygon', 'asMultiPolygon',
'GeometryCollection', 'mapping'
]
| Add missing cap and join style imports
| """Geometry classes and factories
"""
from base import CAP_STYLE, JOIN_STYLE
from geo import box, shape, asShape, mapping
from point import Point, asPoint
from linestring import LineString, asLineString
from polygon import Polygon, asPolygon
from multipoint import MultiPoint, asMultiPoint
from multilinestring import MultiLineString, asMultiLineString
from multipolygon import MultiPolygon, asMultiPolygon
from collection import GeometryCollection
__all__ = [
'box', 'shape', 'asShape', 'Point', 'asPoint', 'LineString', 'asLineString',
'Polygon', 'asPolygon', 'MultiPoint', 'asMultiPoint',
'MultiLineString', 'asMultiLineString', 'MultiPolygon', 'asMultiPolygon',
'GeometryCollection', 'mapping', 'CAP_STYLE', 'JOIN_STYLE'
]
|
# Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
if self.industry_id in self.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
| partner_industry_Secondary: Make api constrains multi to avoid error when create a company with 2 contacts
| # Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
for partner in self:
if partner.industry_id in partner.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
|
import os
import unittest
from carbonate.list import listMetrics
class ListTest(unittest.TestCase):
metrics_tree = ["foo",
"foo/sprockets.wsp",
"foo/widgets.wsp",
"ham",
"ham/bones.wsp",
"ham/hocks.wsp"]
expected_metrics = ["foo.sprockets",
"foo.widgets",
"ham.bones",
"ham.hocks"]
rootdir = os.path.join(os.curdir, 'test_storage')
@classmethod
def setUpClass(cls):
os.system("rm -rf %s" % cls.rootdir)
os.mkdir(cls.rootdir)
for f in cls.metrics_tree:
if f.endswith('wsp'):
open(os.path.join(cls.rootdir, f), 'w').close()
else:
os.mkdir(os.path.join(cls.rootdir, f))
def test_list(self):
res = list(listMetrics(self.rootdir))
self.assertEqual(res, self.expected_metrics)
def test_list_with_trailing_slash(self):
res = list(listMetrics(self.rootdir + '/'))
self.assertEqual(res, self.expected_metrics)
@classmethod
def tearDownClass(cls):
os.system("rm -rf %s" % cls.rootdir)
| Make sure we're sorting results
| import os
import unittest
from carbonate.list import listMetrics
class ListTest(unittest.TestCase):
metrics_tree = ["foo",
"foo/sprockets.wsp",
"foo/widgets.wsp",
"ham",
"ham/bones.wsp",
"ham/hocks.wsp"]
expected_metrics = ["foo.sprockets",
"foo.widgets",
"ham.bones",
"ham.hocks"]
rootdir = os.path.join(os.curdir, 'test_storage')
@classmethod
def setUpClass(cls):
os.system("rm -rf %s" % cls.rootdir)
os.mkdir(cls.rootdir)
for f in cls.metrics_tree:
if f.endswith('wsp'):
open(os.path.join(cls.rootdir, f), 'w').close()
else:
os.mkdir(os.path.join(cls.rootdir, f))
def test_list(self):
res = sorted(list(listMetrics(self.rootdir)))
self.assertEqual(res, self.expected_metrics)
def test_list_with_trailing_slash(self):
res = sorted(list(listMetrics(self.rootdir + '/')))
self.assertEqual(res, self.expected_metrics)
@classmethod
def tearDownClass(cls):
os.system("rm -rf %s" % cls.rootdir)
|
#using the pymssql driver
import pymssql
#Connect to your database.
#Replace server name, username, password, and database name with your credentials
conn = pymssql.connect(server='yourserver.database.windows.net',
user='yourusername@yourserver', password='yourpassword',
database='AdventureWorks')
cursor = conn.cursor()
#Execute a simple select statement.
#Replace schema name and table name with your own
cursor.execute('SELECT c.CustomerID, c.CompanyName,COUNT(soh.SalesOrderID) AS OrderCount FROM SalesLT.Customer AS c LEFT OUTER JOIN SalesLT.SalesOrderHeader AS soh ON c.CustomerID = soh.CustomerID GROUP BY c.CustomerID, c.CompanyName ORDER BY OrderCount DESC;')
row = cursor.fetchone()
#Print results from select statement.
while row:
print str(row[0]) + " " + str(row[1]) + " " + str(row[2])
row = cursor.fetchone()
#INSERT
#Execute an insert statement
cursor.execute("INSERT SalesLT.Product (Name, ProductNumber, StandardCost, ListPrice, SellStartDate) OUTPUT INSERTED.ProductID VALUES ('SQL Server Express', 'SQLEXPRESS', 0, 0, CURRENT_TIMESTAMP)")
row = cursor.fetchone()
#Print the ID of the inserted row.
while row:
print "Inserted Product ID : " +str(row[0])
row = cursor.fetchone() | Fix white space in python example.
| #using the pymssql driver
import pymssql
#Connect to your database.
#Replace server name, username, password, and database name with your credentials
conn = pymssql.connect(server='yourserver.database.windows.net',
user='yourusername@yourserver', password='yourpassword',
database='AdventureWorks')
cursor = conn.cursor()
#Execute a simple select statement.
#Replace schema name and table name with your own
cursor.execute('SELECT c.CustomerID, c.CompanyName,COUNT(soh.SalesOrderID) AS OrderCount FROM SalesLT.Customer AS c LEFT OUTER JOIN SalesLT.SalesOrderHeader AS soh ON c.CustomerID = soh.CustomerID GROUP BY c.CustomerID, c.CompanyName ORDER BY OrderCount DESC;')
row = cursor.fetchone()
#Print results from select statement.
while row:
print str(row[0]) + " " + str(row[1]) + " " + str(row[2])
row = cursor.fetchone()
#INSERT
#Execute an insert statement
cursor.execute("INSERT SalesLT.Product (Name, ProductNumber, StandardCost, ListPrice, SellStartDate) OUTPUT INSERTED.ProductID VALUES ('SQL Server Express', 'SQLEXPRESS', 0, 0, CURRENT_TIMESTAMP)")
row = cursor.fetchone()
#Print the ID of the inserted row.
while row:
print "Inserted Product ID : " +str(row[0])
row = cursor.fetchone()
|
# -*- coding: utf-8 -*-
#
##############################################################################
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_companyweb
| [ADD] Add checks on init file
| # -*- coding: utf-8 -*-
#
##############################################################################
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_companyweb
checks = [
test_companyweb,
]
|
"""Test functions for util.mrbump_util"""
import cPickle
import os
import unittest
from ample.constants import AMPLE_PKL, SHARE_DIR
from ample.util import mrbump_util
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath( os.path.dirname( __file__ ) )
cls.ample_share = SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share,'testfiles')
def test_final_summary(self):
pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)
if not os.path.isfile(pkl): return
with open(pkl) as f: d = cPickle.load(f)
summary = mrbump_util.finalSummary(d)
self.assertIsNotNone(summary)
def test_topfiles(self):
topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.testfiles_dir, AMPLE_PKL)).topFiles()
self.assertEqual(len(topf),3)
self.assertIn('info',topf[2])
if __name__ == "__main__":
unittest.main()
| Update unit test for changes to topf
| """Test functions for util.mrbump_util"""
import cPickle
import os
import unittest
from ample.constants import AMPLE_PKL, SHARE_DIR
from ample.util import mrbump_util
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath( os.path.dirname( __file__ ) )
cls.ample_share = SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share,'testfiles')
def test_final_summary(self):
pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)
if not os.path.isfile(pkl): return
with open(pkl) as f: d = cPickle.load(f)
summary = mrbump_util.finalSummary(d)
self.assertIsNotNone(summary)
def test_topfiles(self):
topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.testfiles_dir, AMPLE_PKL)).topFiles()
self.assertEqual(len(topf),3)
self.assertEqual(topf[2]['source'],'SHELXE trace of MR result')
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import, division, print_function, unicode_literals
import csv
from io import BytesIO
from aspen import renderers
class Renderer(renderers.Renderer):
def render_content(self, context):
context['response'].headers['Content-Type'] = 'text/plain'
rows = eval(self.compiled, globals(), context)
if not rows:
return ''
f = BytesIO()
w = csv.writer(f)
if hasattr(rows[0], '_fields'):
w.writerow(rows[0]._fields)
w.writerows(rows)
f.seek(0)
return f.read()
class Factory(renderers.Factory):
Renderer = Renderer
| Remove line that sets content type text/plain
| from __future__ import absolute_import, division, print_function, unicode_literals
import csv
from io import BytesIO
from aspen import renderers
class Renderer(renderers.Renderer):
def render_content(self, context):
rows = eval(self.compiled, globals(), context)
if not rows:
return ''
f = BytesIO()
w = csv.writer(f)
if hasattr(rows[0], '_fields'):
w.writerow(rows[0]._fields)
w.writerows(rows)
f.seek(0)
return f.read()
class Factory(renderers.Factory):
Renderer = Renderer
|
#
# Konstrukteur - Static website generator
# Copyright 2013 Sebastian Fastner
#
__all__ = ["parse"]
from jasy.env.State import session
from jasy.core import Console
from bs4 import BeautifulSoup
def parse(filename):
""" HTML parser class for Konstrukteur """
page = {}
parsedContent = BeautifulSoup(open(filename, "rt").read())
page["content"] = "".join([str(tag) for tag in parsedContent.find("body").contents])
page["title"] = parsedContent.title.string
for meta in parsedContent.find_all("meta"):
page[meta["name"].lower()] = meta["contents"]
return page | Add summary to html parser
| #
# Konstrukteur - Static website generator
# Copyright 2013 Sebastian Fastner
#
__all__ = ["parse"]
from jasy.env.State import session
from jasy.core import Console
from bs4 import BeautifulSoup
def parse(filename):
""" HTML parser class for Konstrukteur """
page = {}
parsedContent = BeautifulSoup(open(filename, "rt").read())
body = parsedContent.find("body")
page["content"] = "".join([str(tag) for tag in body.contents])
page["title"] = parsedContent.title.string
page["summary"] = body.p.get_text()
for meta in parsedContent.find_all("meta"):
page[meta["name"].lower()] = meta["contents"]
return page |
from fabric.api import *
def update():
require('code_root')
git_pull()
restart_web_server()
def git_pull():
run('cd %s; git stash; git pull' % (env.code_root))
def restart_web_server():
"Restart the web server"
run('%s/apache2/bin/restart' % env.code_root_parent)
def migrate():
run('cd %s; python manage.py migrate --settings=%s' % (env.code_root, env.settings_file))
def collect_static():
run('cd %s; python manage.py collectstatic --settings=%s --noinput' % (env.code_root, env.settings_file))
def pip_install():
run('cd %s; pip install -r requirements/frozen.txt' % (env.code_root))
def publish_changes():
update()
pip_install()
migrate()
collect_static()
restart_web_server() | Add some info in the fab tasks
| from fabric.api import *
def update():
"""Requires code_root env variable. Does a git pull and restarts the web server"""
require('code_root')
git_pull()
restart_web_server()
def git_pull():
"""Does a git stash then a git pull on the project"""
run('cd %s; git stash; git pull' % (env.code_root))
def restart_web_server():
"""Restart the web server"""
run('%s/apache2/bin/restart' % env.code_root_parent)
def migrate():
"""Runs python manage.py migrate"""
run('cd %s; python manage.py migrate --settings=%s' % (env.code_root, env.settings_file))
def collect_static():
"""Runs python manage.py collect_static --noinput"""
run('cd %s; python manage.py collectstatic --settings=%s --noinput' % (env.code_root, env.settings_file))
def pip_install():
"""Runs pip install -r requirements/frozen.txt (for example site)"""
run('cd %s; pip install -r requirements/frozen.txt' % (env.code_root))
def publish_changes():
"""Runs these functions in order (git_pull, pip_install, migrate, collect_static, restart_web_server)"""
git_pull()
pip_install()
migrate()
collect_static()
restart_web_server() |
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from openstack_dashboard import api
def get_interfaces_data(self):
try:
router_id = self.kwargs['router_id']
router = api.quantum.router_get(self.request, router_id)
# Note(rods): Filter off the port on the mgt network
ports = [api.quantum.Port(p) for p in router.ports
if p['device_owner'] != 'network:router_management']
except Exception:
ports = []
msg = _(
'Port list can not be retrieved for router ID %s' %
self.kwargs.get('router_id')
)
exceptions.handle(self.request, msg)
for p in ports:
p.set_id_as_name_if_empty()
return ports
| Remove wrong reference to quantum
Change-Id: Ic3d8b26e061e85c1d128a79b115fd2da4412e705
Signed-off-by: Rosario Di Somma <73b2fe5f91895aea2b4d0e8942a5edf9f18fa897@dreamhost.com>
| from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from openstack_dashboard import api
def get_interfaces_data(self):
try:
router_id = self.kwargs['router_id']
router = api.quantum.router_get(self.request, router_id)
# Note(rods): Filter off the port on the mgt network
ports = [api.neutron.Port(p) for p in router.ports
if p['device_owner'] != 'network:router_management']
except Exception:
ports = []
msg = _(
'Port list can not be retrieved for router ID %s' %
self.kwargs.get('router_id')
)
exceptions.handle(self.request, msg)
for p in ports:
p.set_id_as_name_if_empty()
return ports
|
from django.conf import settings
SESSION_REDIS_HOST = getattr(settings, 'SESSION_REDIS_HOST', 'localhost')
SESSION_REDIS_PORT = getattr(settings, 'SESSION_REDIS_PORT', 6379)
SESSION_REDIS_DB = getattr(settings, 'SESSION_REDIS_DB', 0)
SESSION_REDIS_PREFIX = getattr(settings, 'SESSION_REDIS_PREFIX', 'session')
SESSION_REDIS_PASSWORD = getattr(settings, 'SESSION_REDIS_PASSWORD', None)
SESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH = getattr(
settings, 'SESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH', None
)
| Fix redis prefix for existing sessions
| from django.conf import settings
SESSION_REDIS_HOST = getattr(settings, 'SESSION_REDIS_HOST', 'localhost')
SESSION_REDIS_PORT = getattr(settings, 'SESSION_REDIS_PORT', 6379)
SESSION_REDIS_DB = getattr(settings, 'SESSION_REDIS_DB', 0)
SESSION_REDIS_PREFIX = getattr(settings, 'SESSION_REDIS_PREFIX', '')
SESSION_REDIS_PASSWORD = getattr(settings, 'SESSION_REDIS_PASSWORD', None)
SESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH = getattr(
settings, 'SESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH', None
)
|
'''
Module for running arbitrairy tests
'''
import time
def echo(text):
'''
Return a string - used for testing the connection
CLI Example:
salt '*' test.echo 'foo bar baz quo qux'
'''
print 'Echo got called!'
return text
def ping():
'''
Just used to make sure the minion is up and responding
Return True
CLI Example:
salt '*' test.ping
'''
return True
def fib(num):
'''
Return a fibonachi sequence up to the passed number, and the time it took
to compute in seconds. Used for performance tests
CLI Example:
salt '*' test.fib 3
'''
start = time.time()
a, b = 0, 1
ret = [0]
while b < num:
ret.append(b)
a, b = b, a + b
return ret, time.time() - start
def collatz(start):
'''
Execute the collatz conjecture from the passed starting number, returns
the sequence and the time it took to compute. Used for performance tests.
CLI Example:
salt '*' test.collatz 3
'''
start = time.time()
steps = []
while start != 1:
steps.append(start)
if start > 1:
if start % 2 == 0:
start = start / 2
else:
start = start * 3 + 1
return steps, time.time() - start
| Fix assignment issue in coallatz
| '''
Module for running arbitrairy tests
'''
import time
def echo(text):
'''
Return a string - used for testing the connection
CLI Example:
salt '*' test.echo 'foo bar baz quo qux'
'''
print 'Echo got called!'
return text
def ping():
'''
Just used to make sure the minion is up and responding
Return True
CLI Example:
salt '*' test.ping
'''
return True
def fib(num):
'''
Return a fibonachi sequence up to the passed number, and the time it took
to compute in seconds. Used for performance tests
CLI Example:
salt '*' test.fib 3
'''
start = time.time()
a, b = 0, 1
ret = [0]
while b < num:
ret.append(b)
a, b = b, a + b
return ret, time.time() - start
def collatz(start):
'''
Execute the collatz conjecture from the passed starting number, returns
the sequence and the time it took to compute. Used for performance tests.
CLI Example:
salt '*' test.collatz 3
'''
begin = time.time()
steps = []
while start != 1:
steps.append(start)
if start > 1:
if start % 2 == 0:
start = start / 2
else:
start = start * 3 + 1
return steps, time.time() - begin
|
from flask import Flask, request
app = Flask(__name__)
@app.route('/retrieve', methods=['POST'])
def get():
public_key = request.form['PUBLIC_KEY']
enc_index = request.form['ENC_INDEX']
return "/retrieve index '{index}' with key '{key}'".format(index=enc_index, key=public_key)
@app.route('/set', methods=['POST'])
def put():
enc_index = request.form['ENC_INDEX']
enc_data = request.form['ENC_DATA']
return "/set '{index}' to '{data}'".format(data=enc_data, index=enc_index)
if __name__ == '__main__':
app.run(debug=True)
| Add switch (-d or --debug) for debug mode
| import argparse
from flask import Flask, request
parser = argparse.ArgumentParser(description="Start a Blindstore server.")
parser.add_argument('-d', '--debug', action='store_true',
help="enable Flask debug mode. DO NOT use in production.")
args = parser.parse_args()
app = Flask(__name__)
@app.route('/retrieve', methods=['POST'])
def get():
public_key = request.form['PUBLIC_KEY']
enc_index = request.form['ENC_INDEX']
return "/retrieve index '{index}' with key '{key}'".format(index=enc_index, key=public_key)
@app.route('/set', methods=['POST'])
def put():
enc_index = request.form['ENC_INDEX']
enc_data = request.form['ENC_DATA']
return "/set '{index}' to '{data}'".format(data=enc_data, index=enc_index)
if __name__ == '__main__':
app.run(debug=args.debug)
|
from .tabulate import _text_type
def pad(field, total, char=u" "):
return field + (char * (total - len(field)))
def get_separator(num, header_len, data_len):
total_len = header_len + data_len + 1
sep = u"-[ RECORD {0} ]".format(num)
if len(sep) < header_len:
sep = pad(sep, header_len - 1, u"-") + u"+"
if len(sep) < total_len:
sep = pad(sep, total_len, u"-")
return sep + u"\n"
def expanded_table(rows, headers):
header_len = max([len(x) for x in headers])
max_row_len = 0
results = []
padded_headers = [pad(x, header_len) + u" |" for x in headers]
header_len += 2
for row in rows:
row_len = max([len(_text_type(x)) for x in row])
row_result = []
if row_len > max_row_len:
max_row_len = row_len
for header, value in zip(padded_headers, row):
row_result.append(u"%s %s" % (header, value))
results.append('\n'.join(row_result))
output = []
for i, result in enumerate(results):
output.append(get_separator(i, header_len, max_row_len))
output.append(result)
output.append('\n')
return ''.join(output)
| Fix formatting issue for \G.
Closes #49
| from .tabulate import _text_type
def pad(field, total, char=u" "):
return field + (char * (total - len(field)))
def get_separator(num, header_len, data_len):
sep = u"***************************[ %d. row ]***************************\n" % (num + 1)
return sep
def expanded_table(rows, headers):
header_len = max([len(x) for x in headers])
max_row_len = 0
results = []
padded_headers = [pad(x, header_len) + u" |" for x in headers]
header_len += 2
for row in rows:
row_len = max([len(_text_type(x)) for x in row])
row_result = []
if row_len > max_row_len:
max_row_len = row_len
for header, value in zip(padded_headers, row):
row_result.append(u"%s %s" % (header, value))
results.append('\n'.join(row_result))
output = []
for i, result in enumerate(results):
output.append(get_separator(i, header_len, max_row_len))
output.append(result)
output.append('\n')
return ''.join(output)
|
from JsonStats.FetchStats.Plugins import *
from . import TestCase
import JsonStats.FetchStats.Plugins
from JsonStats.FetchStats import Fetcher
class TestPluginMount(TestCase):
def setUp(self):
# Do stuff that has to happen on every test in this instance
self.fetcher = Fetcher
def test_get_plugins(self):
"""
Verify that after loading plugins we can see them attached to
the Mount.
"""
discovered = len(self.fetcher.get_plugins())
expected = len(JsonStats.FetchStats.Plugins.__all__)
self.assertEqual(discovered, expected)
| Fix the plugin mount text. And make it way more intelligent.
| from . import TestCase
import JsonStats.FetchStats.Plugins
from JsonStats.FetchStats import Fetcher
class TestPluginMount(TestCase):
def setUp(self):
# Do stuff that has to happen on every test in this instance
self.fetcher = Fetcher
class _example_plugin(Fetcher):
def __init__(self):
self.context = 'testplugin'
self._load_data()
def _load_data(self):
self._loaded(True)
def dump(self):
return {}
def dump_json(self):
return self.json.dumps(self.dump())
self.example_plugin = _example_plugin
def test_get_plugins(self):
"""
Verify that after loading plugins we can see them attached to
the Mount.
"""
example_plugin = self.example_plugin()
discovered = len(self.fetcher.get_plugins())
assert discovered == 1
|
import os
from cffitsio._cfitsio import ffi, lib
def test_create_file(tmpdir):
filename = str(tmpdir.join('test.fits'))
f = ffi.new('fitsfile **')
status = ffi.new('int *')
lib.fits_create_file(f, filename, status)
assert status[0] == 0
assert os.path.isfile(filename)
| Add test for open file
| import os
from cffitsio._cfitsio import ffi, lib
def test_create_file(tmpdir):
filename = str(tmpdir.join('test.fits'))
f = ffi.new('fitsfile **')
status = ffi.new('int *')
lib.fits_create_file(f, filename, status)
assert status[0] == 0
assert os.path.isfile(filename)
def test_open_file(test_file):
f = ffi.new('fitsfile **')
status = ffi.new('int *')
lib.fits_open_file(f, test_file, 0, status)
assert status[0] == 0
|
from __future__ import unicode_literals
from ..base import Resource
class Document(Resource):
_resource_name = 'documents'
_is_listable = False
_as_is_fields = ['id', 'href', 'mime_type', 'content', 'type']
_date_time_fields_utc = ['date_created']
_resource_fields = [
('booking', 'Booking'),
]
class Invoice(Resource):
_resource_name = 'invoices'
_is_listable = False
_as_is_fields = ['id', 'href', 'audience']
_date_time_fields_utc = ['date_created']
_resource_fields = [
('document', Document),
('booking', 'Booking'),
]
| Add audience field to Document resource
- 'audience' field is displayed on list of invoices (bookings/<booking_id>/invoices) and now, also on list of documents (bookings/<booking_id>/documents) to match what is being returned in the API | from __future__ import unicode_literals
from ..base import Resource
class Document(Resource):
_resource_name = 'documents'
_is_listable = False
_as_is_fields = ['id', 'href', 'mime_type', 'content', 'type', 'audience']
_date_time_fields_utc = ['date_created']
_resource_fields = [
('booking', 'Booking'),
]
class Invoice(Resource):
_resource_name = 'invoices'
_is_listable = False
_as_is_fields = ['id', 'href', 'audience']
_date_time_fields_utc = ['date_created']
_resource_fields = [
('document', Document),
('booking', 'Booking'),
]
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
long_description = open(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read()
setup(
name='pymux',
author='Jonathan Slenders',
version='0.3',
license='LICENSE',
url='https://github.com/jonathanslenders/',
description='Pure Python terminal multiplexer.',
long_description=long_description,
packages=find_packages('.'),
install_requires = [
'prompt_toolkit==0.56',
'pyte>=0.4.10',
'six>=1.9.0',
'docopt>=0.6.2',
],
entry_points={
'console_scripts': [
'pymux = pymux.entry_points.run_pymux:run',
]
},
)
| Upgrade to prompt_toolkit 0.57. (Should give much better performance.)
| #!/usr/bin/env python
import os
from setuptools import setup, find_packages
long_description = open(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read()
setup(
name='pymux',
author='Jonathan Slenders',
version='0.3',
license='LICENSE',
url='https://github.com/jonathanslenders/',
description='Pure Python terminal multiplexer.',
long_description=long_description,
packages=find_packages('.'),
install_requires = [
'prompt_toolkit==0.57',
'pyte>=0.4.10',
'six>=1.9.0',
'docopt>=0.6.2',
],
entry_points={
'console_scripts': [
'pymux = pymux.entry_points.run_pymux:run',
]
},
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='geodjango-timezones',
version='0.1',
description='Models to store and scripts to load timezone shapefiles to be usable inside a GeoDjango application.',
author='Adam Fast',
author_email='',
url='https://github.com/adamfast/geodjango_timezones',
packages=find_packages(),
package_data={
},
include_package_data=True,
install_requires=['pytz'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
| Increment version - go ahead and call it 1.0, even. | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='geodjango-timezones',
version='1.0',
description='Models to store and scripts to load timezone shapefiles to be usable inside a GeoDjango application.',
author='Adam Fast',
author_email='',
url='https://github.com/adamfast/geodjango_timezones',
packages=find_packages(),
package_data={
},
include_package_data=True,
install_requires=['pytz'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
|
"""
Pick a word from /usr/share/dict/words
"""
import subprocess
from sys import exit
import random
def choose(difficulty):
(min, max) = difficulty
cmd = "/usr/bin/grep -E '^.{{{},{}}}$' /usr/share/dict/words".format(min, max)
obj = subprocess.run(cmd,
shell=True,
stdout=subprocess.PIPE)
result = obj.stdout.decode('utf-8').strip().split("\n")
return random.choice(result) | Add a main guard for testing
| """
Pick a word from /usr/share/dict/words
"""
import subprocess
from sys import exit
import random
def choose(difficulty):
(min, max) = difficulty
cmd = "/usr/bin/grep -E '^.{{{},{}}}$' /usr/share/dict/words".format(min, max)
obj = subprocess.run(cmd,
shell=True,
stdout=subprocess.PIPE)
result = obj.stdout.decode('utf-8').strip().split("\n")
return random.choice(result)
if __name__ == "__main__":
choose((3, 8)) |
from django.shortcuts import render
from news.models import Article, Event
from door.models import DoorStatus
from datetime import datetime
from itertools import chain
def index(request):
number_of_news = 4
# Sorts the news to show the events nearest in future and then fill in with the newest articles
event_list = Event.objects.filter(time_end__gte=datetime.now())[0:number_of_news:-1]
article_list = Article.objects.order_by('-pub_date')[0:number_of_news - len(event_list)]
news_list = list(chain(event_list, article_list))
try:
door_status = DoorStatus.objects.get(name='hackerspace').status
except DoorStatus.DoesNotExist:
door_status = True
context = {
'news_list': news_list,
'door_status': door_status,
}
return render(request, 'index.html', context)
def test404(request):
return render(request, '404.html')
| Change number of news on frontpage
| from django.shortcuts import render
from news.models import Article, Event
from door.models import DoorStatus
from datetime import datetime
from itertools import chain
def index(request):
number_of_news = 3
# Sorts the news to show the events nearest in future and then fill in with the newest articles
event_list = Event.objects.filter(time_end__gte=datetime.now())[0:number_of_news:-1]
article_list = Article.objects.order_by('-pub_date')[0:number_of_news - len(event_list)]
news_list = list(chain(event_list, article_list))
try:
door_status = DoorStatus.objects.get(name='hackerspace').status
except DoorStatus.DoesNotExist:
door_status = True
context = {
'news_list': news_list,
'door_status': door_status,
}
return render(request, 'index.html', context)
def test404(request):
return render(request, '404.html')
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 02:37:25 2013
@author: Jan
"""
import unittest
class PlotStyleTest(unittest.TestCase):
def test_default(self):
from opengrid.library.plotting import plot_style
plt = plot_style()
class CarpetTest(unittest.TestCase):
def test_default(self):
import numpy as np
import pandas as pd
from opengrid.library import plotting
index = pd.date_range('2015-1-1', '2015-12-31', freq='h')
ser = pd.Series(np.random.normal(size=len(index)), index=index, name='abc')
plotting.carpet(ser)
if __name__ == '__main__':
unittest.main()
| [TST] Resolve RuntimeError: Invalid DISPLAY variable
| # -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 02:37:25 2013
@author: Jan
"""
import matplotlib
import unittest
matplotlib.use('Agg')
class PlotStyleTest(unittest.TestCase):
def test_default(self):
from opengrid.library.plotting import plot_style
plt = plot_style()
class CarpetTest(unittest.TestCase):
def test_default(self):
import numpy as np
import pandas as pd
from opengrid.library import plotting
index = pd.date_range('2015-1-1', '2015-12-31', freq='h')
ser = pd.Series(np.random.normal(size=len(index)), index=index, name='abc')
plotting.carpet(ser)
if __name__ == '__main__':
unittest.main()
|
import sublime_plugin
from editorconfig import get_properties, EditorConfigError
LINE_ENDINGS = {
'lf': 'Unix',
'crlf': 'Windows',
'cr': 'CR'
}
class EditorConfig(sublime_plugin.EventListener):
def on_load(self, view):
try:
config = get_properties(view.file_name())
except EditorConfigError:
print 'Error occurred while getting EditorConfig properties'
else:
if config:
settings = view.settings()
# EOL
view.set_line_endings(LINE_ENDINGS[config['end_of_line']])
# Indent type
settings.set('translate_tabs_to_spaces', config['indent_style'] == 'space')
# Indent size
settings.set('tab_size', int(config['indent_size']))
else:
print 'There seems to be an error with your .editorconfig file' | Fix plugin not taking into account opening of unsaved buffers and some refactoring
| import sublime_plugin
from editorconfig import get_properties, EditorConfigError
LINE_ENDINGS = {
'lf': 'Unix',
'crlf': 'Windows',
'cr': 'CR'
}
class EditorConfig(sublime_plugin.EventListener):
def on_load(self, view):
path = view.file_name()
if not path:
return
try:
config = get_properties(path)
except EditorConfigError:
print 'Error occurred while getting EditorConfig properties'
else:
if config:
settings = view.settings()
window = view.window()
end_of_line = config.get('end_of_line')
indent_style = config.get('indent_style')
indent_size = config.get('indent_size')
# Indent type
if indent_style == 'tab':
window.run_command('unexpand_tabs', {'set_translate_tabs': False})
if indent_style == 'space':
window.run_command('expand_tabs', {'set_translate_tabs': True})
# Indent size
if indent_size:
settings.set('tab_size', int(indent_size))
# EOL
if end_of_line:
view.set_line_endings(LINE_ENDINGS[end_of_line]) |
import six
from .base import MultiSelectField
class ValuesListField(MultiSelectField):
field_type = 'Core.Models.Fields.ValuesListField, Core'
supported_types = six.string_types
def __init__(self, *args, **kwargs):
"""Map names to IDs for use in field rehydration"""
super(ValuesListField, self).__init__(*args, **kwargs)
self.selection_to_id_map = {f['name']: f['id'] for f in self.field_definition['values']}
def validate_value(self, value):
"""Validate provided value is one of the valid options"""
super(ValuesListField, self).validate_value(value)
if value is not None:
if value not in self.selection_to_id_map:
raise ValueError('Field "{}" invalid value "{}". Valid options: {}'.format(
self.name,
value,
', '.join(self.selection_to_id_map.keys())
))
def cast_to_python(self, value):
"""Store actual value as internal representation"""
if value is not None:
value = value['value']
return value
def cast_to_swimlane(self, value):
"""Rehydrate value back as full JSON representation"""
if value is None:
return value
return {
'$type': 'Core.Models.Record.ValueSelection, Core',
'id': self.selection_to_id_map[value],
'value': value
}
| Convert ValueError -> ValidationError in ValuesListField
| import six
from swimlane.exceptions import ValidationError
from .base import MultiSelectField
class ValuesListField(MultiSelectField):
field_type = 'Core.Models.Fields.ValuesListField, Core'
supported_types = six.string_types
def __init__(self, *args, **kwargs):
"""Map names to IDs for use in field rehydration"""
super(ValuesListField, self).__init__(*args, **kwargs)
self.selection_to_id_map = {f['name']: f['id'] for f in self.field_definition['values']}
def validate_value(self, value):
"""Validate provided value is one of the valid options"""
super(ValuesListField, self).validate_value(value)
if value is not None:
if value not in self.selection_to_id_map:
raise ValidationError(
self.record,
'Field "{}" invalid value "{}". Valid options: {}'.format(
self.name,
value,
', '.join(self.selection_to_id_map.keys())
)
)
def cast_to_python(self, value):
"""Store actual value as internal representation"""
if value is not None:
value = value['value']
return value
def cast_to_swimlane(self, value):
"""Rehydrate value back as full JSON representation"""
if value is None:
return value
return {
'$type': 'Core.Models.Record.ValueSelection, Core',
'id': self.selection_to_id_map[value],
'value': value
}
|
"""Dashboard template filters"""
from django import template
import littlechef
from kitchen.settings import REPO
register = template.Library()
@register.filter(name='get_role_list')
def get_role_list(run_list):
"""Returns the role sublist from the given run_list"""
if run_list:
all_roles = littlechef.lib.get_roles_in_node(
{'run_list': run_list})
role_list = []
for role in all_roles:
if not role.startswith(REPO['EXCLUDE_ROLE_PREFIX']):
# Only add if it doesn't start with excluded role prefixes
role_list.append(role)
return role_list
else:
return []
@register.filter(name='get_recipe_list')
def get_recipe_list(run_list):
"""Returns the recipe sublist from the given run_list"""
return littlechef.lib.get_recipes_in_node({'run_list': run_list} or [])
| Fix recipe filter return statement
| """Dashboard template filters"""
from django import template
import littlechef
from kitchen.settings import REPO
register = template.Library()
@register.filter(name='get_role_list')
def get_role_list(run_list):
"""Returns the role sublist from the given run_list"""
if run_list:
all_roles = littlechef.lib.get_roles_in_node(
{'run_list': run_list})
role_list = []
for role in all_roles:
if not role.startswith(REPO['EXCLUDE_ROLE_PREFIX']):
# Only add if it doesn't start with excluded role prefixes
role_list.append(role)
return role_list
else:
return []
@register.filter(name='get_recipe_list')
def get_recipe_list(run_list):
"""Returns the recipe sublist from the given run_list"""
return littlechef.lib.get_recipes_in_node({'run_list': run_list or []})
|
import logging
from commander.deploy import task
from deploy_base import * # noqa
log = logging.getLogger(__name__)
base_update_assets = update_assets
base_database = database
@task
def database(ctx):
# only ever run this one on demo and dev.
management_cmd(ctx, 'bedrock_truncate_database --yes-i-am-sure')
base_database()
management_cmd(ctx, 'rnasync')
management_cmd(ctx, 'update_security_advisories --force --quiet', use_src_dir=True)
management_cmd(ctx, 'cron update_ical_feeds')
management_cmd(ctx, 'cron update_tweets')
management_cmd(ctx, 'runscript update_firefox_os_feeds')
@task
def update_assets(ctx):
"""Compile/compress static assets and fetch external data."""
base_update_assets()
# can't do this in `database` because it needs to run before
# the file sync from SRC -> WWW.
management_cmd(ctx, 'update_product_details', use_src_dir=True)
| Stop truncating the DB for dev/demo pushes.
| import logging
from commander.deploy import task
from deploy_base import * # noqa
log = logging.getLogger(__name__)
base_update_assets = update_assets
base_database = database
@task
def database(ctx):
# only ever run this one on demo and dev.
base_database()
management_cmd(ctx, 'rnasync')
management_cmd(ctx, 'update_security_advisories --quiet', use_src_dir=True)
management_cmd(ctx, 'cron update_ical_feeds')
management_cmd(ctx, 'cron update_tweets')
management_cmd(ctx, 'runscript update_firefox_os_feeds')
@task
def update_assets(ctx):
"""Compile/compress static assets and fetch external data."""
base_update_assets()
# can't do this in `database` because it needs to run before
# the file sync from SRC -> WWW.
management_cmd(ctx, 'update_product_details', use_src_dir=True)
|
import unittest
from ..framework import cli_test_suite
from .util import DEFAULT_MANAGEMENT_KEY
@cli_test_suite
def additional_tests(ykman_cli):
class Misc(unittest.TestCase):
def setUp(self):
ykman_cli('piv', 'reset', '-f')
def test_info(self):
output = ykman_cli('piv', 'info')
self.assertIn('PIV version:', output)
def test_reset(self):
output = ykman_cli('piv', 'reset', '-f')
self.assertIn('Success!', output)
def test_write_read_object(self):
data = 'test data'
for i in range(0, 3):
ykman_cli(
'piv', 'write-object',
'-m', DEFAULT_MANAGEMENT_KEY, '0x5f0001',
'-', input=data)
data = ykman_cli('piv', 'read-object', '0x5f0001')
self.assertEqual(data, 'test data')
return [Misc]
| Test that piv read-object preserves ANSI escape codes
Objects written might (accidentally?) contain such codes, so they
should be preserved when read back out. For example, there's a 281 in
10^12 chance that any six random bytes happen to make the escape code for
red text colour.
| import unittest
from ..framework import cli_test_suite
from .util import DEFAULT_MANAGEMENT_KEY
@cli_test_suite
def additional_tests(ykman_cli):
class Misc(unittest.TestCase):
def setUp(self):
ykman_cli('piv', 'reset', '-f')
def test_info(self):
output = ykman_cli('piv', 'info')
self.assertIn('PIV version:', output)
def test_reset(self):
output = ykman_cli('piv', 'reset', '-f')
self.assertIn('Success!', output)
def test_write_read_object(self):
data = 'test data'
for i in range(0, 3):
ykman_cli(
'piv', 'write-object',
'-m', DEFAULT_MANAGEMENT_KEY, '0x5f0001',
'-', input=data)
data = ykman_cli('piv', 'read-object', '0x5f0001')
self.assertEqual(data, 'test data')
def test_write_read_preserves_ansi_escapes(self):
red = b'\x00\x1b[31m'
blue = b'\x00\x1b[34m'
reset = b'\x00\x1b[0m'
data = (b'Hello, ' + red + b'red' + reset + b' and ' + blue
+ b'blue' + reset + b' world!')
ykman_cli(
'piv', 'write-object',
'-m', DEFAULT_MANAGEMENT_KEY, '0x5f0001',
'-', input=data)
output_data = ykman_cli.with_bytes_output(
'piv', 'read-object', '0x5f0001')
self.assertEqual(data, output_data)
return [Misc]
|
"""
byceps.services.image.service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from typing import BinaryIO, FrozenSet, Iterable, Set
from ...util.image import read_dimensions
from ...util.image.models import Dimensions, ImageType
from ...util.image.typeguess import guess_type
ALL_IMAGE_TYPES = frozenset(ImageType) # type: FrozenSet[ImageType]
class ImageTypeProhibited(ValueError):
pass
def get_all_image_types() -> FrozenSet[ImageType]:
"""Return all known image types."""
return ALL_IMAGE_TYPES
def get_image_type_names(types: Iterable[ImageType]) -> FrozenSet[str]:
"""Return the names of the image types."""
return frozenset(t.name.upper() for t in types)
def determine_image_type(stream: BinaryIO, allowed_types: Set[ImageType]) \
-> ImageType:
"""Extract image type from stream."""
image_type = guess_type(stream)
if image_type not in allowed_types:
allowed_type_names = get_image_type_names(allowed_types)
allowed_type_names_string = ', '.join(sorted(allowed_type_names))
raise ImageTypeProhibited(
'Image is not one of the allowed types ({}).'
.format(allowed_type_names_string))
stream.seek(0)
return image_type
def determine_dimensions(stream: BinaryIO) -> Dimensions:
"""Extract image dimensions from stream."""
dimensions = read_dimensions(stream)
stream.seek(0)
return dimensions
| Remove constant and function that list all existing image types
This allows having additional image types for (temporarily) internal
purposes without accidentally exposing them.
| """
byceps.services.image.service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from typing import BinaryIO, FrozenSet, Iterable, Set
from ...util.image import read_dimensions
from ...util.image.models import Dimensions, ImageType
from ...util.image.typeguess import guess_type
class ImageTypeProhibited(ValueError):
pass
def get_image_type_names(types: Iterable[ImageType]) -> FrozenSet[str]:
"""Return the names of the image types."""
return frozenset(t.name.upper() for t in types)
def determine_image_type(stream: BinaryIO, allowed_types: Set[ImageType]) \
-> ImageType:
"""Extract image type from stream."""
image_type = guess_type(stream)
if image_type not in allowed_types:
allowed_type_names = get_image_type_names(allowed_types)
allowed_type_names_string = ', '.join(sorted(allowed_type_names))
raise ImageTypeProhibited(
'Image is not one of the allowed types ({}).'
.format(allowed_type_names_string))
stream.seek(0)
return image_type
def determine_dimensions(stream: BinaryIO) -> Dimensions:
"""Extract image dimensions from stream."""
dimensions = read_dimensions(stream)
stream.seek(0)
return dimensions
|
# -*- coding: utf-8 -*-
from django import forms
from django.template import Context, loader
from django.http import HttpResponse
from epiweb.apps.survey import utils
from epiweb.apps.survey.data import example
def create_field(item):
if item['type'] == 'yes-no':
field = forms.ChoiceField(widget=forms.RadioSelect,
choices=[('yes', _('Yes')), ('no', _('No'))])
elif item['type'] == 'option-multiple':
field = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,
choices=zip(range(0, len(item['options'])), item['options']))
elif item['type'] == 'option-single':
field = forms.ChoiceField(widget=forms.RadioSelect,
choices=zip(range(0, len(item['options'])), item['options']))
elif item['type'] == 'date':
field = forms.DateField(input_formats='%m/%d/%y')
else:
field = forms.CharField()
field.label = item.get('label', None)
field.required = False
return field
def create_form(data, values=None):
if values:
f = forms.Form(values)
else:
f = forms.Form()
for item in data:
f.fields[item['id']] = create_field(item)
return f
def index(request):
if request.method == 'POST':
form = utils.generate_form(example.data.sections[0], request.POST)
else:
form = utils.generate_form(example.data.sections[0])
t = loader.get_template('survey/index.html')
c = Context({
'form': form
})
return HttpResponse(t.render(c))
def survey(request, survey_id, page=None):
html = "survey_id=%s, page=%s" % (survey_id, page)
return HttpResponse(html)
| Remove form generator from the view.
| # -*- coding: utf-8 -*-
from django import forms
from django.template import Context, loader
from django.http import HttpResponse
from epiweb.apps.survey import utils
from epiweb.apps.survey.data import example
def index(request):
if request.method == 'POST':
form = utils.generate_form(example.data.sections[0], request.POST)
else:
form = utils.generate_form(example.data.sections[0])
t = loader.get_template('survey/index.html')
c = Context({
'form': form
})
return HttpResponse(t.render(c))
def survey(request, survey_id, page=None):
html = "survey_id=%s, page=%s" % (survey_id, page)
return HttpResponse(html)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pollingstations', '0006_residentialaddress_slug'),
]
operations = [
migrations.CreateModel(
name='CustomFinder',
fields=[
('area_code', models.CharField(serialize=False, max_length=9, primary_key=True)),
('base_url', models.CharField(max_length=255, blank=True)),
('can_pass_postcode', models.BooleanField(default=False)),
('message', models.TextField(blank=True)),
],
),
]
| Edit migration so it depends on 0008_auto_20160415_1854
Ensure the migrations will apply correctly without conflcit once merged
Merging this branch is now blocked on PR #239
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pollingstations', '0008_auto_20160415_1854'),
]
operations = [
migrations.CreateModel(
name='CustomFinder',
fields=[
('area_code', models.CharField(serialize=False, max_length=9, primary_key=True)),
('base_url', models.CharField(max_length=255, blank=True)),
('can_pass_postcode', models.BooleanField(default=False)),
('message', models.TextField(blank=True)),
],
),
]
|
#!/usr/bin/env python
#
# Syntax: ./run_job <session-id>
#
# It should be run with the current working directory set properly
#
import sys, json
from sci.session import Session
from sci.bootstrap import Bootstrap
data = json.loads(sys.stdin.read())
session_id = sys.argv[1]
session = Session.load(session_id)
run_info = data['run_info']
Bootstrap.run(session, data['build_id'], data['job_server'],
run_info['step_fun'], args = run_info['args'],
kwargs = run_info['kwargs'], env = run_info['env'])
| Support when run_info is not specified
That is the case when starting a build (not running a step)
| #!/usr/bin/env python
#
# Syntax: ./run_job <session-id>
#
# It should be run with the current working directory set properly
#
import sys, json
from sci.session import Session
from sci.bootstrap import Bootstrap
data = json.loads(sys.stdin.read())
session_id = sys.argv[1]
session = Session.load(session_id)
run_info = data['run_info'] or {}
Bootstrap.run(session, data['build_id'], data['job_server'],
run_info.get('step_fun'),
args = run_info.get('args', []),
kwargs = run_info.get('kwargs', {}),
env = run_info.get('env'))
|
import unittest2
from mlabns.util import distance
class DistanceTestCase(unittest2.TestCase):
def testValidSmallDistance(self):
dist = distance.distance(0, 0, 10, 10)
self.assertEqual(1568.5205567985761, dist)
def testValidLargeDistance(self):
dist = distance.distance(20, 20, 100, 100)
self.assertEqual(8009.5721050828461, dist)
def testInvalidInputs(self):
import math
from numbers import Number
dist = 0
try:
dist = distance.distance(-700,1000,999,-5454)
except Exception:
self.fail("distance threw an exception on invalid entry")
self.assertTrue(isinstance(dist, Number))
self.assertFalse(math.isnan(dist))
if __name__ == '__main__':
unittest2.main()
| Update indentation as per style guide
| import unittest2
from mlabns.util import distance
class DistanceTestCase(unittest2.TestCase):
def testValidSmallDistance(self):
dist = distance.distance(0, 0, 10, 10)
self.assertEqual(1568.5205567985761, dist)
def testValidLargeDistance(self):
dist = distance.distance(20, 20, 100, 100)
self.assertEqual(8009.5721050828461, dist)
def testInvalidInputs(self):
import math
from numbers import Number
dist = 0
try:
dist = distance.distance(-700,1000,999,-5454)
except Exception:
self.fail("distance threw an exception on invalid entry")
self.assertTrue(isinstance(dist, Number))
self.assertFalse(math.isnan(dist))
if __name__ == '__main__':
unittest2.main()
|
class ThingObjectBase(object):
def __getitem__(self, item):
return getattr(self, item)
def __contains__(self, item):
return hasattr(self, item)
class ThingObjectOutput(ThingObjectBase):
def __init__(self):
self.data = []
def write(self, *args):
self.data.append(' '.join(str(x) for x in args))
class ThingObjectInput(ThingObjectBase):
def __init__(self):
self.data = []
def get_line(self):
line = input()
self.data.append(line)
return line
| Update Input object to support direct output during get_line operations
| class ThingObjectBase(object):
def __getitem__(self, item):
return getattr(self, item)
def __contains__(self, item):
return hasattr(self, item)
class ThingObjectOutput(ThingObjectBase):
def __init__(self):
self.data = []
def write(self, *args):
self.data.append(' '.join(str(x) for x in args))
class ThingObjectInput(ThingObjectBase):
def __init__(self, heap):
self.data = []
self.heap = heap
def get_line(self, line=None):
if line is not None:
self.heap['Output'].write(line)
line = input()
self.data.append(line)
return line
|
# -*- coding: utf-8 -*-
from pyxform.tests_v1.pyxform_test_case import PyxformTestCase
class AuditTest(PyxformTestCase):
def test_background_audio(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | |
| | type | name |
| | background-audio | my_recording |
""",
xml__contains=[
'<odk:recordaudio event="odk-instance-load" ref="/data/my_recording"/>',
],
)
| Add ignored test for recordaction validation
| # -*- coding: utf-8 -*-
from pyxform.tests_v1.pyxform_test_case import PyxformTestCase
import unittest
class BackgroundAudioTest(PyxformTestCase):
def test_background_audio(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | |
| | type | name |
| | background-audio | my_recording |
""",
xml__contains=[
'<odk:recordaudio event="odk-instance-load" ref="/data/my_recording"/>',
],
)
@unittest.skip("Required update to Validate to work")
def test_background_audio_is_valid(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | |
| | type | name |
| | background-audio | my_recording |
""",
run_odk_validate=True,
)
|
import os
from setuptools import setup
def long_description():
os.system('pandoc --from=markdown --to=rst --output=README.rst README.md')
readme_fn = os.path.join(os.path.dirname(__file__), 'README.rst')
if os.path.exists(readme_fn):
with open(readme_fn) as f:
return f.read()
else:
return 'not available'
setup(
name='boddle',
version=__import__('boddle').__version__,
description="A unit testing tool for Python's bottle library.",
long_description=long_description(),
author='Derek Anderson',
author_email='public@kered.org',
url='https://github.com/keredson/boddle',
packages=[],
py_modules=['boddle'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
install_requires=[],
)
| Add bottle as a requirement for this package. | import os
from setuptools import setup
def long_description():
os.system('pandoc --from=markdown --to=rst --output=README.rst README.md')
readme_fn = os.path.join(os.path.dirname(__file__), 'README.rst')
if os.path.exists(readme_fn):
with open(readme_fn) as f:
return f.read()
else:
return 'not available'
setup(
name='boddle',
version=__import__('boddle').__version__,
description="A unit testing tool for Python's bottle library.",
long_description=long_description(),
author='Derek Anderson',
author_email='public@kered.org',
url='https://github.com/keredson/boddle',
packages=[],
py_modules=['boddle'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
install_requires=['bottle'],
)
|
from setuptools import setup
setup(
name='bambou',
version='0.0.1',
url='http://www.nuagenetworks.net/',
author='Christophe Serafin',
author_email='christophe.serafin@alcatel-lucent.com',
packages=['bambou', 'bambou.utils'],
description='REST Library for Nuage Networks',
long_description=open('README.md').read(),
install_requires=[line for line in open('requirements.txt')],
)
| Set Bambou package version to 0.0.2
| from setuptools import setup
setup(
name='bambou',
version='0.0.2',
url='http://www.nuagenetworks.net/',
author='Christophe Serafin',
author_email='christophe.serafin@alcatel-lucent.com',
packages=['bambou', 'bambou.utils'],
description='REST Library for Nuage Networks',
long_description=open('README.md').read(),
install_requires=[line for line in open('requirements.txt')],
)
|
from setuptools import setup, find_packages
from plotbitrate import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='rezun-plotbitrate',
version=__version__,
packages=find_packages(),
description='A simple bitrate plotter for media files',
long_description=long_description,
long_description_content_type="text/markdown",
author='Steve Schmidt',
author_email='azcane@gmail.com',
license='BSD',
url='https://github.com/rezun/plotbitrate',
py_modules=['plotbitrate'],
classifiers=[
'Topic :: Multimedia :: Sound/Audio',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
],
keywords='ffprobe bitrate plot',
python_requires='>=3.5',
entry_points={
'console_scripts': [
'plotbitrate = plotbitrate:main'
]
},
install_requires=[
'matplotlib',
'pyqt5'
]
)
| Rename pypi package and change author
| from setuptools import setup, find_packages
from plotbitrate import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='plotbitrate',
version=__version__,
packages=find_packages(),
description='A simple bitrate plotter for media files',
long_description=long_description,
long_description_content_type="text/markdown",
author='Eric Work',
author_email='work.eric@gmail.com',
license='BSD',
url='https://github.com/zeroepoch/plotbitrate',
py_modules=['plotbitrate'],
classifiers=[
'Topic :: Multimedia :: Sound/Audio',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
],
keywords='ffprobe bitrate plot',
python_requires='>=3.5',
entry_points={
'console_scripts': [
'plotbitrate = plotbitrate:main'
]
},
install_requires=[
'matplotlib',
'pyqt5'
]
)
|
import re
from setuptools import setup
init_contents = open('random_object_id/__init__.py').read()
version = re.search('"([0-9\.]+)"', init_contents).group(1)
with open('README.rst', 'rb') as f:
long_description = f.read().decode('utf-8')
setup(
name='random-object-id',
packages=['random_object_id'],
entry_points={
'console_scripts': [
'random_object_id=random_object_id.random_object_id:main',
],
},
version=version,
description='Generate a random MongoDB ObjectId.',
long_description=long_description,
author='Max Rozentsveyg',
author_email='maxr@outlook.com',
url='https://github.com/mxr/random-object-id',
)
| Use helper method to read files
| import re
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
init_path = 'random_object_id/__init__.py'
version = re.search('"([0-9\.]+)"', read(init_path)).group(1)
long_description = read('README.rst')
setup(
name='random-object-id',
packages=['random_object_id'],
entry_points={
'console_scripts': [
'random_object_id=random_object_id.random_object_id:main',
],
},
version=version,
description='Generate a random MongoDB ObjectId.',
long_description=long_description,
author='Max Rozentsveyg',
author_email='maxr@outlook.com',
url='https://github.com/mxr/random-object-id',
)
|
from distutils.core import setup
from setuptools import find_packages
setup(
name='katagawa',
version='0.1.0',
packages=find_packages(),
url='https://github.com/SunDwarf/Katagawa',
license='MIT',
author='Laura Dickinson',
author_email='l@veriny.tf',
description='An asyncio ORM for Python 3.5'
)
| Add cached_property as a requirement.
| from distutils.core import setup
from setuptools import find_packages
setup(
name='katagawa',
version='0.1.0',
packages=find_packages(),
url='https://github.com/SunDwarf/Katagawa',
license='MIT',
author='Laura Dickinson',
author_email='l@veriny.tf',
description='An asyncio ORM for Python 3.5',
install_requires=[
"cached_property==1.3.0"
]
)
|
#!/usr/bin/env python
"""
whichpkg
========
Locate the path of a specific python module
"""
from setuptools import setup
setup(
name='whichpkg',
version='0.3.0',
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/whichpkg',
description='Locate the path of a specific python module',
long_description=__doc__,
install_requires=[],
scripts=['bin/whichpkg'],
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
| Read the __version__ from whichpkg directly
| #!/usr/bin/env python
"""
whichpkg
========
Locate the path of a specific python module
"""
import re
from setuptools import setup
version = re.search("__version__\s*=\s*'(.+)?'", open('bin/whichpkg').read()).groups(1)[0]
setup(
name='whichpkg',
version=version,
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/whichpkg',
description='Locate the path of a specific python module',
long_description=__doc__,
install_requires=[],
scripts=['bin/whichpkg'],
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
__all__ = [
'test',
]
def test():
"""
@desc: This is a convienance method to run all of the tests in `PVGeo`.
@notes:
This can be executed from either the command line of within a standard Python environment:
```bash
$ python -m PVGeo test
```
```py
>>> import PVGeo
>>> PVGeo.test()
```
"""
import unittest
import fnmatch
import os
path = os.path.dirname(__file__) # path to remove
path = path[0:path.rfind('/')]
test_file_strings = []
for root, dirnames, filenames in os.walk(os.path.dirname(__file__)):
for filename in fnmatch.filter(filenames, '__test__.py'):
test_file_strings.append(os.path.join(root, filename).replace(path, ''))
# Remove extensions and change to module import syle
module_strings = [str[1:len(str)-3].replace('/', '.') for str in test_file_strings]
suites = [unittest.defaultTestLoader.loadTestsFromName(str) for str
in module_strings]
testSuite = unittest.TestSuite(suites)
return unittest.TextTestRunner(verbosity=2).run(testSuite)
if __name__ == '__main__':
import sys
arg = sys.argv[1]
if arg.lower() == 'test':
test()
else:
raise RuntimeError('Unknown argument: %s' % arg)
| Add catch for Travis CI testing.
| __all__ = [
'test',
]
def test(close=False):
"""
@desc: This is a convienance method to run all of the tests in `PVGeo`.
@notes:
This can be executed from either the command line of within a standard Python environment:
```bash
$ python -m PVGeo test
```
```py
>>> import PVGeo
>>> PVGeo.test()
```
"""
import unittest
import fnmatch
import os
path = os.path.dirname(__file__) # path to remove
path = path[0:path.rfind('/')]
test_file_strings = []
for root, dirnames, filenames in os.walk(os.path.dirname(__file__)):
for filename in fnmatch.filter(filenames, '__test__.py'):
test_file_strings.append(os.path.join(root, filename).replace(path, ''))
# Remove extensions and change to module import syle
module_strings = [str[1:len(str)-3].replace('/', '.') for str in test_file_strings]
suites = [unittest.defaultTestLoader.loadTestsFromName(str) for str
in module_strings]
testSuite = unittest.TestSuite(suites)
run = unittest.TextTestRunner(verbosity=2).run(testSuite)
if close:
exit(len(run.failures) > 0 or len(run.errors) > 0)
return run
if __name__ == '__main__':
import sys
arg = sys.argv[1]
if arg.lower() == 'test':
test(True)
else:
raise RuntimeError('Unknown argument: %s' % arg)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name = "tagman",
version = "0.1.8",
author = "ReThought Ltd",
author_email = "matthew@rethought-solutions.com",
url = "https://github.com/Rethought/tagman.git",
packages = find_packages('src'),
package_dir = {'':'src'},
license = "BSD",
keywords = "django, tagging, tagman",
description = "Curated tagging app for Django",
classifiers = [
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
| Increment version to reflect change
| #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name = "tagman",
version = "0.1.9",
author = "ReThought Ltd",
author_email = "matthew@rethought-solutions.com",
url = "https://github.com/Rethought/tagman.git",
packages = find_packages('src'),
package_dir = {'':'src'},
license = "BSD",
keywords = "django, tagging, tagman",
description = "Curated tagging app for Django",
classifiers = [
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
#!/usr/bin/env python
# encoding: utf-8
"""
tempodb/setup.py
Copyright (c) 2012 TempoDB Inc. All rights reserved.
"""
import os
from setuptools import setup
def get_version(version_tuple):
version = '%s.%s' % (version_tuple[0], version_tuple[1])
if version_tuple[2]:
version = '%s.%s' % (version, version_tuple[2])
return version
# Dirty hack to get version number from tempodb/__init__.py - we can't
# import it as it depends on dateutil, requests, and simplejson which aren't
# installed until this file is read
init = os.path.join(os.path.dirname(__file__), 'tempodb', '__init__.py')
version_line = filter(lambda l: l.startswith('VERSION'), open(init))[0]
VERSION = get_version(eval(version_line.split('=')[-1]))
setup(
name="tempodb",
version=VERSION,
author="TempoDB Inc",
author_email="dev@tempo-db.com",
description="A client for the TempoDB API",
packages=["tempodb"],
long_description="A client for the TempoDB API.",
install_requires=[
'python-dateutil==1.5',
'requests',
'simplejson',
]
)
| Use a custom version of the requests package to default to SSLv3
| #!/usr/bin/env python
# encoding: utf-8
"""
tempodb/setup.py
Copyright (c) 2012 TempoDB Inc. All rights reserved.
"""
import os
from setuptools import setup
def get_version(version_tuple):
version = '%s.%s' % (version_tuple[0], version_tuple[1])
if version_tuple[2]:
version = '%s.%s' % (version, version_tuple[2])
return version
# Dirty hack to get version number from tempodb/__init__.py - we can't
# import it as it depends on dateutil, requests, and simplejson which aren't
# installed until this file is read
init = os.path.join(os.path.dirname(__file__), 'tempodb', '__init__.py')
version_line = filter(lambda l: l.startswith('VERSION'), open(init))[0]
VERSION = get_version(eval(version_line.split('=')[-1]))
setup(
name="tempodb",
version=VERSION,
author="TempoDB Inc",
author_email="dev@tempo-db.com",
description="A client for the TempoDB API",
packages=["tempodb"],
long_description="A client for the TempoDB API.",
dependency_links=[
'http://github.com/tempodb/requests/tarball/development#egg=requests-0.11.1ssl'
],
install_requires=[
'python-dateutil==1.5',
'requests==0.11.1ssl',
'simplejson',
]
)
|
# Copyright 2017 The Chromium Authors.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd.
import os
def GetPackageRelativePath(filename):
"""GetPackageRelativePath returns the path to |filename| relative to the root
of the package as determined by GetSourceRoot()."""
return os.path.relpath(filename, GetSourceRoot(filename)).replace('\\', '/')
def GetSourceRoot(filename):
"""Try to determine the root of the package which contains |filename|.
The current heuristic attempts to determine the root of the Chromium source
tree by searching up the directory hierarchy until we find a directory
containing src/.gn.
"""
# If filename is not absolute, then we are going to assume that it is
# relative to the current directory.
if not os.path.isabs(filename):
filename = os.path.abspath(filename)
if not os.path.exists(filename):
raise IOError('File not found: {}'.format(filename))
source_root = os.path.dirname(filename)
while True:
gnfile = os.path.join(source_root, 'src', '.gn')
if os.path.exists(gnfile):
return source_root
new_package_root = os.path.dirname(source_root)
if new_package_root == source_root:
raise Exception("Can't determine package root")
source_root = new_package_root
| Raise a more specific exception when the source root cannot be found.
| # Copyright 2017 The Chromium Authors.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd.
import os
class NoSourceRootError(Exception):
"""Exception raise when the CodeSearch library can't determine the location
of the local Chromium checkout."""
pass
def GetPackageRelativePath(filename):
"""GetPackageRelativePath returns the path to |filename| relative to the root
of the package as determined by GetSourceRoot()."""
return os.path.relpath(filename, GetSourceRoot(filename)).replace('\\', '/')
def GetSourceRoot(filename):
"""Try to determine the root of the package which contains |filename|.
The current heuristic attempts to determine the root of the Chromium source
tree by searching up the directory hierarchy until we find a directory
containing src/.gn.
"""
# If filename is not absolute, then we are going to assume that it is
# relative to the current directory.
if not os.path.isabs(filename):
filename = os.path.abspath(filename)
if not os.path.exists(filename):
raise NoSourceRootError('File not found: {}'.format(filename))
source_root = os.path.dirname(filename)
while True:
gnfile = os.path.join(source_root, 'src', '.gn')
if os.path.exists(gnfile):
return source_root
new_package_root = os.path.dirname(source_root)
if new_package_root == source_root:
raise NoSourceRootError("Can't determine package root")
source_root = new_package_root
|
import zephyr.util
from zephyr.collector import MeasurementCollector
from zephyr.bioharness import BioHarnessSignalAnalysis, BioHarnessPacketHandler
from zephyr.message import MessagePayloadParser
from zephyr.testing import visualize_measurements, test_data_dir, VirtualSerial
from zephyr.protocol import Protocol
def main():
zephyr.util.DISABLE_CLOCK_DIFFERENCE_ESTIMATION = True
collector = MeasurementCollector()
rr_signal_analysis = BioHarnessSignalAnalysis([], [collector.handle_event])
signal_packet_handlers = [collector.handle_signal, rr_signal_analysis.handle_signal]
signal_packet_handler = BioHarnessPacketHandler(signal_packet_handlers, [collector.handle_event])
payload_parser = MessagePayloadParser([signal_packet_handler.handle_packet])
ser = VirtualSerial(test_data_dir + "/120-second-bt-stream.dat")
protocol = Protocol(ser, payload_parser.handle_message)
try:
protocol.run()
except EOFError:
pass
visualize_measurements(collector)
if __name__ == "__main__":
main()
| Fix test data plotting to use the changed interfaces |
import zephyr.util
from zephyr.collector import MeasurementCollector
from zephyr.bioharness import BioHarnessSignalAnalysis, BioHarnessPacketHandler
from zephyr.message import MessagePayloadParser
from zephyr.testing import visualize_measurements, test_data_dir, VirtualSerial
from zephyr.protocol import Protocol, MessageFrameParser
def main():
zephyr.util.DISABLE_CLOCK_DIFFERENCE_ESTIMATION = True
collector = MeasurementCollector()
rr_signal_analysis = BioHarnessSignalAnalysis([], [collector.handle_event])
signal_packet_handlers = [collector.handle_signal, rr_signal_analysis.handle_signal]
signal_packet_handler = BioHarnessPacketHandler(signal_packet_handlers, [collector.handle_event])
payload_parser = MessagePayloadParser([signal_packet_handler.handle_packet])
message_parser = MessageFrameParser(payload_parser.handle_message)
ser = VirtualSerial(test_data_dir + "/120-second-bt-stream.dat")
protocol = Protocol(ser, [message_parser.parse_data])
try:
protocol.run()
except EOFError:
pass
visualize_measurements(collector)
if __name__ == "__main__":
main()
|
import unittest
import httpretty
from fbmsgbot.http_client import HttpClient
from fbmsgbot.resources.urls import FACEBOOK_MESSAGES_POST_URL
class TestHttpClient(unittest.TestCase):
"""
Test the HttpClient
"""
@httpretty.activate
def test_submit_GET_request(self):
httpretty.register_uri(httpretty.GET,
FACEBOOK_MESSAGES_POST_URL + '/users/123',
body='{ \
"data" : [1,2,3] \
}')
def completion(payload, error):
assert payload['data'] == [1, 2, 3]
assert payload['data'] != [3, 2, 1]
client = HttpClient('123123')
client.submit_request('/users/123', 'GET', None, completion)
@httpretty.activate
def test_submite_POST_request(self):
httpretty.register_uri(httpretty.POST,
FACEBOOK_MESSAGES_POST_URL + 'users/',
body='{ \
"name": "ben", \
"age": 12 \
}', status=201)
def completion(payload, error):
if error is None:
assert payload['name'] == 'ben'
assert payload['age'] == 12
else:
raise
client = HttpClient('123123')
client.submit_request('users/', 'POST', None, completion)
| Update tests to remove completion blocks
| import unittest
import httpretty
from fbmsgbot.http_client import HttpClient
from fbmsgbot.resources.urls import FACEBOOK_MESSAGES_POST_URL
class TestHttpClient(unittest.TestCase):
"""
Test the HttpClient
"""
@httpretty.activate
def test_submit_GET_request(self):
httpretty.register_uri(httpretty.GET,
FACEBOOK_MESSAGES_POST_URL + '/users/123',
body='{ \
"data" : [1,2,3] \
}', status=200)
client = HttpClient('123123')
response, error = client.submit_request('/users/123',
'GET', None)
assert response['data'] == [1, 2, 3]
assert response['data'] != [3, 2, 1]
@httpretty.activate
def test_submite_POST_request(self):
httpretty.register_uri(httpretty.POST,
FACEBOOK_MESSAGES_POST_URL + 'users/',
body='{ \
"name": "ben", \
"age": 12 \
}', status=201)
client = HttpClient('123123')
response, error = client.submit_request('users/',
'POST', None)
if error is None:
assert response['name'] == 'ben'
assert response['age'] == 12
else:
raise
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from morango.models import UUIDField
from kolibri.core.auth.models import AbstractFacilityDataModel
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.permissions.general import IsOwn
class Bookmark(AbstractFacilityDataModel):
content_id = UUIDField(blank=True, null=True)
channel_id = UUIDField(blank=True, null=True)
contentnode_id = UUIDField()
user = models.ForeignKey(FacilityUser, blank=False)
created = models.DateTimeField(default=timezone.now, db_index=True)
morango_model_name = "bookmark"
permissions = IsOwn()
def infer_dataset(self, *args, **kwargs):
if self.user_id:
return self.cached_related_dataset_lookup("user")
elif self.dataset_id:
# confirm that there exists a facility with that dataset_id
try:
return Facility.objects.get(dataset_id=self.dataset_id).dataset_id
except Facility.DoesNotExist:
pass
# if no user or matching facility, infer dataset from the default facility
facility = Facility.get_default_facility()
if not facility:
raise AssertionError(
"Before you can save bookmarks, you must have a facility"
)
return facility.dataset_id
def calculate_partition(self):
return "{dataset_id}:user-rw:{user_id}".format(
dataset_id=self.dataset_id, user_id=self.user.id
)
class Meta:
# Ensures that we do not save duplicates, otherwise raises a
# django.db.utils.IntegrityError
unique_together = (
"user",
"contentnode_id",
)
| Remove unnecessary cruft from Bookmark.infer_dataset
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from morango.models import UUIDField
from kolibri.core.auth.models import AbstractFacilityDataModel
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.permissions.general import IsOwn
class Bookmark(AbstractFacilityDataModel):
content_id = UUIDField(blank=True, null=True)
channel_id = UUIDField(blank=True, null=True)
contentnode_id = UUIDField()
user = models.ForeignKey(FacilityUser, blank=False)
created = models.DateTimeField(default=timezone.now, db_index=True)
morango_model_name = "bookmark"
permissions = IsOwn()
def infer_dataset(self, *args, **kwargs):
return self.cached_related_dataset_lookup("user")
def calculate_partition(self):
return "{dataset_id}:user-rw:{user_id}".format(
dataset_id=self.dataset_id, user_id=self.user.id
)
class Meta:
# Ensures that we do not save duplicates, otherwise raises a
# django.db.utils.IntegrityError
unique_together = (
"user",
"contentnode_id",
)
|
from django.conf import settings
from .models import SiteSetting
def get_site_settings(request):
if not hasattr(request, 'site_settings'):
site_settings_id = getattr(settings, 'SITE_SETTINGS_ID', None)
request.site_settings = get_site_settings_uncached(site_settings_id)
return request.site_settings
def get_site_settings_uncached(site_id=None):
return SiteSetting.objects.get(pk=site_id)
| Define function for getting setting value by key
| from django.conf import settings
from .models import SiteSetting
def get_site_settings(request):
if not hasattr(request, 'site_settings'):
site_settings_id = getattr(settings, 'SITE_SETTINGS_ID', None)
request.site_settings = get_site_settings_uncached(site_settings_id)
return request.site_settings
def get_site_settings_uncached(site_id=None):
return SiteSetting.objects.get(pk=site_id)
def get_setting_value(request, key):
site_settings = get_site_settings(request)
return getattr(site_settings, key, None)
|
import sqlite3
def main():
conn = sqlite3.connect("database")
cursor = conn.cursor()
# I claim this gives the current score. Another formulation is
# select trackid, score, max(scoreid) from scores group by trackid;
# cursor.execute("""select trackid, score from scores
# group by trackid order by scoreid""")
cursor.execute("""select scores.trackid, score, path from scores, tracks
where scores.trackid = tracks.trackid
group by scores.trackid order by scoreid""")
results = cursor.fetchall()
for result in results:
print str(result[1]) + "\t" + result[2]
if __name__ == '__main__':
main()
| Use new column for score.
| import sqlite3
def main():
conn = sqlite3.connect("database")
cursor = conn.cursor()
# I claim this gives the current score. Another formulation is
# select trackid, score, max(scoreid) from scores group by trackid;
# cursor.execute("""select trackid, score from scores
# group by trackid order by scoreid""")
# cursor.execute("""select scores.trackid, score, path from scores, tracks
# where scores.trackid = tracks.trackid
# group by scores.trackid order by scoreid""")
cursor.execute("""select score, path from tracks
where score is not null""")
results = cursor.fetchall()
for result in results:
print str(result[0]) + "\t" + result[1]
if __name__ == '__main__':
main()
|
import sys
if sys.version_info[:2] < (2, 7):
import unittest2 as unittest
else:
import unittest
| Make import mock.Mock or unittest.mock.Mock easier
| import sys
if sys.version_info[:2] < (2, 7):
import unittest2 as unittest # NOQA
else:
import unittest # NOQA
if sys.version_info < (3,):
from mock import Mock, patch # NOQA
else:
from unittest.mock import Mock, patch # NOQA
# Frosted doesn't yet support noqa flags, so this hides the imported/unused
# complaints
Mock, patch, unittest
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
class AuthorshipFormMixin(object):
"""Set the ``created_by`` and ``updated_by`` fields on a model.
This form requires that a property, ``self.user`` be set to an instance of
:py:class`~django.contrib.auth.models.User` before the ``save()`` method is
called.
"""
def save(self, *args, **kwargs):
self.instance.updated_by = self.user
if not self.instance.pk:
self.instance.created_by = self.user
return super(AuthorshipFormMixin, self).save(*args, **kwargs)
| Set the `self.user` property on the `AuthorshipFormMixin`.
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
class AuthorshipFormMixin(object):
"""Set the ``created_by`` and ``updated_by`` fields on a model.
Requires that a ``User`` instance be passed in to the constructor. Views
that inherit from ``AuthorshipViewMixin`` automatically pass this in.
"""
def __init__(self, user, *args, **kwargs):
self.user = user
super(AuthorshipFormMixin, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
self.instance.updated_by = self.user
if not self.instance.pk:
self.instance.created_by = self.user
return super(AuthorshipFormMixin, self).save(*args, **kwargs)
|
import enum
from sqlalchemy import Column, String, Boolean, Integer, DateTime, Enum
from virtool.postgres import Base
class UploadType(enum.Enum):
hmm = "hmm"
reference = "reference"
reads = "reads"
subtraction = "subtraction"
null = None
class Upload(Base):
__tablename__ = "uploads"
id = Column(Integer, primary_key=True)
created_at = Column(DateTime)
name = Column(String)
name_on_disk = Column(String, unique=True)
ready = Column(Boolean)
removed = Column(Boolean)
reserved = Column(Boolean)
size = Column(Integer)
type = Column(Enum(UploadType))
user = Column(String)
uploaded_at = Column(DateTime)
def __repr__(self):
return """<Upload(id= {self.id}, created_at={self.created_at}, name={self.name}, \
name_on_disk={self.name_on_disk}, ready={self.ready}, removed={self.removed}, reserved={self.reserved}, \
size={self.size}, type={self.type}, user={self.user}, uploaded_at={self.uploaded_at}>"""
| Declare subclass of `UploadType` to be `str`
* Fixes issues with JSON serializing
* Revert `__repr__` string format changes as the newlines created large gaps of whitespace | import enum
from sqlalchemy import Column, String, Boolean, Integer, DateTime, Enum
from virtool.postgres import Base
class UploadType(str, enum.Enum):
hmm = "hmm"
reference = "reference"
reads = "reads"
subtraction = "subtraction"
null = None
class Upload(Base):
__tablename__ = "uploads"
id = Column(Integer, primary_key=True)
created_at = Column(DateTime)
name = Column(String)
name_on_disk = Column(String, unique=True)
ready = Column(Boolean)
removed = Column(Boolean)
reserved = Column(Boolean)
size = Column(Integer)
type = Column(Enum(UploadType))
user = Column(String)
uploaded_at = Column(DateTime)
def __repr__(self):
return f"<Upload(id={self.id}, created_at={self.created_at}, name={self.name}, " \
f"name_on_disk={self.name_on_disk}, ready={self.ready}, removed={self.removed}, " \
f"reserved={self.reserved}, " f"size={self.size}, type={self.type}, user={self.user}, " \
f"uploaded_at={self.uploaded_at}>"
|
# Copyright (C) 2016 University of Zurich. All rights reserved.
#
# This file is part of MSRegistry Backend.
#
# MSRegistry Backend is free software: you can redistribute it and/or
# modify it under the terms of the version 3 of the GNU Affero General
# Public License as published by the Free Software Foundation, or any
# other later version.
#
# MSRegistry Backend is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the version
# 3 of the GNU Affero General Public License for more details.
#
# You should have received a copy of the version 3 of the GNU Affero
# General Public License along with MSRegistry Backend. If not, see
# <http://www.gnu.org/licenses/>.
__author__ = "Filippo Panessa <filippo.panessa@uzh.ch>"
__copyright__ = ("Copyright (c) 2016 S3IT, Zentrale Informatik,"
" University of Zurich")
from flask import jsonify
from . import auth
from app.auth.decorators import requires_auth
@auth.route('/test')
@requires_auth
def authTest():
return jsonify({'status': 200,
'code': 'authorization_success',
'description': "All good. You only get this message if you're authenticated."
})
| Remove code field from API /auth/test response
| # Copyright (C) 2016 University of Zurich. All rights reserved.
#
# This file is part of MSRegistry Backend.
#
# MSRegistry Backend is free software: you can redistribute it and/or
# modify it under the terms of the version 3 of the GNU Affero General
# Public License as published by the Free Software Foundation, or any
# other later version.
#
# MSRegistry Backend is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the version
# 3 of the GNU Affero General Public License for more details.
#
# You should have received a copy of the version 3 of the GNU Affero
# General Public License along with MSRegistry Backend. If not, see
# <http://www.gnu.org/licenses/>.
__author__ = "Filippo Panessa <filippo.panessa@uzh.ch>"
__copyright__ = ("Copyright (c) 2016 S3IT, Zentrale Informatik,"
" University of Zurich")
from flask import jsonify
from . import auth
from app.auth.decorators import requires_auth
@auth.route('/test')
@requires_auth
def authTest():
return jsonify({'code': 'authorization_success',
'description': "All good. You only get this message if you're authenticated."
})
|
from flask import Blueprint
from flask_restplus import Api
from .tools import api as tools
from .project import api as project
from .inputs import api as inputs
from .dashboard import api as dashboard
from .glossary import api as glossary
blueprint = Blueprint('api', __name__, url_prefix='/api')
api = Api(blueprint)
api.add_namespace(tools, path='/tools')
api.add_namespace(project, path='/project')
api.add_namespace(inputs, path='/inputs')
api.add_namespace(dashboard, path='/dashboards')
api.add_namespace(glossary, path='/glossary')
| Add general error handler for unhandled exceptions in api
| from flask import Blueprint
from flask_restplus import Api
from .tools import api as tools
from .project import api as project
from .inputs import api as inputs
from .dashboard import api as dashboard
from .glossary import api as glossary
blueprint = Blueprint('api', __name__, url_prefix='/api')
api = Api(blueprint)
api.add_namespace(tools, path='/tools')
api.add_namespace(project, path='/project')
api.add_namespace(inputs, path='/inputs')
api.add_namespace(dashboard, path='/dashboards')
api.add_namespace(glossary, path='/glossary')
@api.errorhandler
def default_error_handler(error):
"""Default error handler"""
import traceback
trace = traceback.format_exc()
return {'message': error.message, 'trace': trace}, 500
|
import sys, os, subprocess
def run(args, workdir=None):
p = subprocess.Popen(args, close_fds=True, cwd=workdir)
return p.wait()
if sys.platform == "darwin":
shell_open_command = "open"
else:
shell_open_command = "xdg-open"
def shell_open(path, workdir=None):
return run([shell_open_command, path], workdir=workdir) == 0
def get_user_config_dir(name=""):
path = os.environ.get("HOME", "")
if name:
path = os.path.join(path, "." + name)
return os.path.realpath(path)
__all__ = (
"shell_open",
"get_user_config_dir",
)
| Use XDG_CONFIG_HOME for configuration directory.
| import sys, os, subprocess
def run(args, workdir=None):
p = subprocess.Popen(args, close_fds=True, cwd=workdir)
return p.wait()
if sys.platform == "darwin":
shell_open_command = "open"
else:
shell_open_command = "xdg-open"
def shell_open(path, workdir=None):
return run([shell_open_command, path], workdir=workdir) == 0
def get_user_config_dir(name=""):
path = os.environ.get("XDG_CONFIG_HOME")
if not path:
path = os.path.join(os.environ.get("HOME", "/"), ".config")
if name:
path = os.path.join(path, name)
return os.path.realpath(path)
__all__ = (
"shell_open",
"get_user_config_dir",
)
|
import struct
from hsdecomp.types import *
def read_half_word(settings, file_offset):
return struct.unpack(settings.rt.halfword.struct, settings.binary[file_offset:file_offset+settings.rt.halfword.size])[0]
def read_word(settings, file_offset):
return struct.unpack(settings.rt.word.struct, settings.binary[file_offset:file_offset+settings.rt.word.size])[0]
def pointer_offset(settings, pointer, offset):
if isinstance(pointer, Tagged):
offset += pointer.tag
assert isinstance(pointer.untagged, Offset)
return Tagged(untagged = Offset(base = pointer.untagged.base, index = pointer.untagged.index + offset // settings.rt.word.size), tag = offset % settings.rt.word.size)
elif isinstance(pointer, StaticValue):
return StaticValue(value = pointer.value + offset)
elif isinstance(pointer, UnknownValue):
return UnknownValue()
else:
assert False,"bad pointer to offset"
def dereference(settings, parsed, pointer, stack):
if isinstance(pointer, Offset):
if isinstance(pointer.base, HeapPointer):
return parsed['heaps'][pointer.base.heap_segment][pointer.index]
elif isinstance(pointer.base, StackPointer):
return stack[pointer.index]
elif isinstance(pointer, StaticValue):
assert pointer.value % settings.rt.word.size == 0
return Tagged(StaticValue(value = read_word(settings, settings.data_offset + pointer.value)), tag = 0)
| Kill obsolete case in pointer_offset
| import struct
from hsdecomp.types import *
def read_half_word(settings, file_offset):
return struct.unpack(settings.rt.halfword.struct, settings.binary[file_offset:file_offset+settings.rt.halfword.size])[0]
def read_word(settings, file_offset):
return struct.unpack(settings.rt.word.struct, settings.binary[file_offset:file_offset+settings.rt.word.size])[0]
def pointer_offset(settings, pointer, offset):
if isinstance(pointer, Tagged):
offset += pointer.tag
assert isinstance(pointer.untagged, Offset)
return Tagged(untagged = Offset(base = pointer.untagged.base, index = pointer.untagged.index + offset // settings.rt.word.size), tag = offset % settings.rt.word.size)
elif isinstance(pointer, UnknownValue):
return UnknownValue()
else:
assert False,"bad pointer to offset"
def dereference(settings, parsed, pointer, stack):
if isinstance(pointer, Offset):
if isinstance(pointer.base, HeapPointer):
return parsed['heaps'][pointer.base.heap_segment][pointer.index]
elif isinstance(pointer.base, StackPointer):
return stack[pointer.index]
elif isinstance(pointer, StaticValue):
assert pointer.value % settings.rt.word.size == 0
return Tagged(StaticValue(value = read_word(settings, settings.data_offset + pointer.value)), tag = 0)
|
import unittest
import importlib_resources as resources
from . import data01
from . import util
class ContentsTests:
@property
def contents(self):
return sorted(
[el for el in list(resources.contents(self.data)) if el != '__pycache__']
)
class ContentsDiskTests(ContentsTests, unittest.TestCase):
def setUp(self):
self.data = data01
def test_contents(self):
self.assertEqual(
self.contents,
[
'__init__.py',
'binary.file',
'subdirectory',
'utf-16.file',
'utf-8.file',
],
)
class ContentsZipTests(ContentsTests, util.ZipSetup, unittest.TestCase):
def test_contents(self):
self.assertEqual(
self.contents,
[
'__init__.py',
'binary.file',
'subdirectory',
'utf-16.file',
'utf-8.file',
],
)
class ContentsNamespaceTests(ContentsTests, unittest.TestCase):
def setUp(self):
from . import namespacedata01
self.data = namespacedata01
def test_contents(self):
self.assertEqual(
self.contents,
[
'binary.file',
'utf-16.file',
'utf-8.file',
],
)
| Consolidate some behavior and re-use 'set' comparison for less strict unordered comparisons.
| import unittest
import importlib_resources as resources
from . import data01
from . import util
class ContentsTests:
expected = {
'__init__.py',
'binary.file',
'subdirectory',
'utf-16.file',
'utf-8.file',
}
def test_contents(self):
assert self.expected <= set(resources.contents(self.data))
class ContentsDiskTests(ContentsTests, unittest.TestCase):
def setUp(self):
self.data = data01
class ContentsZipTests(ContentsTests, util.ZipSetup, unittest.TestCase):
pass
class ContentsNamespaceTests(ContentsTests, unittest.TestCase):
expected = {
# no __init__ because of namespace design
# no subdirectory as incidental difference in fixture
'binary.file',
'utf-16.file',
'utf-8.file',
}
def setUp(self):
from . import namespacedata01
self.data = namespacedata01
|
from django.template.loader_tags import register
from django.template import loader, Context, defaultfilters, TemplateDoesNotExist
import markdown
presenters = {
'Speaker': 'presenters/speaker_presenter.html'
}
generic_template = 'presenters/object_presenter.html'
@register.simple_tag(takes_context=True)
def present(context, obj):
model_name = type(obj).__name__
template_name = presenters.get(model_name, generic_template)
t = loader.get_template(template_name)
return t.render(Context({
'model_name': model_name,
'obj': obj,
}))
@register.filter
def noval(data, placeholder):
if data:
return data
return placeholder
@register.simple_tag(takes_context=True)
def include_md(context, template_name):
lang = context['LANGUAGE_CODE'].replace('-', '_')
try:
t = loader.render_to_string('markdown/{}/{}'.format(lang, template_name), context)
except TemplateDoesNotExist:
t = loader.render_to_string('markdown/en_US/{}'.format(template_name), context)
html = markdown.markdown(t)
return defaultfilters.safe(html)
| :bug: Fix a bug in the template tag.
| from django.template.loader_tags import register
from django.template import loader, Context, defaultfilters, TemplateDoesNotExist
import markdown
presenters = {
'Speaker': 'presenters/speaker_presenter.html'
}
generic_template = 'presenters/object_presenter.html'
@register.simple_tag(takes_context=True)
def present(context, obj):
model_name = type(obj).__name__
template_name = presenters.get(model_name, generic_template)
t = loader.get_template(template_name)
return t.render(Context({
'model_name': model_name,
'obj': obj,
}))
@register.filter
def noval(data, placeholder):
if data:
return data
return placeholder
@register.simple_tag(takes_context=True)
def include_md(context, template_name):
lang = context['LANGUAGE_CODE'].replace('-', '_')
try:
t = loader.render_to_string('markdown/{}/{}'.format(lang, template_name), context)
except TemplateDoesNotExist:
t = loader.render_to_string('markdown/en/{}'.format(template_name), context)
html = markdown.markdown(t)
return defaultfilters.safe(html)
|
# Copyright 2013 John Reese
# Licensed under the MIT license
filename_regex = r'(?:[a-z]+_)#(?P<channel>[a-z]+)_(?P<date>\d{8}).log'
channel_regex_group = 1
date_regex_group = 2
date_format = r'%Y%m%d'
| Clean up and document default config values
| # Copyright 2013 John Reese
# Licensed under the MIT license
# the regex to parse data from irc log filenames.
# must contain two named matching groups:
# channel: the name of the channel
# date: the date of the conversation
filename_regex = r'#?(?P<channel>[a-z]+)_(?P<date>\d{8}).log'
# the format of the date content in the matched filename.
# must follow python's datetime.strptime() format, as defined at
# http://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
filename_date_format = r'%Y%m%d'
|
from django.conf.urls import patterns, url
urlpatterns = patterns('mailqueue.views',
url(r'^clear$', 'clear_sent_messages', name='clear_sent_messages'),
url(r'^$', 'run_mail_job', name='run_mail_job'),
)
| Remove warning "deprecated" in url.py
version django=1.9.6
RemovedInDjango110Warning: django.conf.urls.patterns() is deprecated and will be removed in Django 1.10. Update your urlpatterns to be a list of django.conf.urls.url() instances instead.
| from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^clear$', views.clear_sent_messages, name='clear_sent_messages'),
url(r'^$', views.run_mail_job, name='run_mail_job'),
]
|
#!/usr/bin/env python3
# coding: utf-8
"""
================================================
Optimization Benchmark: Plot the Sphere Function
================================================
This example show how to plot the *Sphere function*.
"""
###############################################################################
# Import required packages
import numpy as np
import matplotlib.pyplot as plt
from ailib.utils.plot import plot_2d_contour_solution_space, plot_2d_solution_space
from ailib.optimize.functions.unconstrained import sphere
###############################################################################
# Plot the sphere function
plot_2d_solution_space(sphere,
xmin=-2*np.ones(2),
xmax=2*np.ones(2),
xstar=np.zeros(2),
angle_view=(55, 83),
title="Sphere function",
output_file_name="sphere_3d.png")
plt.tight_layout()
plt.show()
###############################################################################
# Plot the contours
plot_2d_contour_solution_space(sphere,
xmin=-10*np.ones(2),
xmax=10*np.ones(2),
xstar=np.zeros(2),
title="Sphere function",
output_file_name="sphere.png")
plt.tight_layout()
plt.show()
| Switch off the output file generation.
| #!/usr/bin/env python3
# coding: utf-8
"""
================================================
Optimization Benchmark: Plot the Sphere Function
================================================
This example show how to plot the *Sphere function*.
"""
###############################################################################
# Import required packages
import numpy as np
import matplotlib.pyplot as plt
from ailib.utils.plot import plot_2d_contour_solution_space, plot_2d_solution_space
from ailib.optimize.functions.unconstrained import sphere
###############################################################################
# Plot the sphere function
plot_2d_solution_space(sphere,
xmin=-2*np.ones(2),
xmax=2*np.ones(2),
xstar=np.zeros(2),
angle_view=(55, 83),
title="Sphere function")
plt.tight_layout()
plt.show()
###############################################################################
# Plot the contours
plot_2d_contour_solution_space(sphere,
xmin=-10*np.ones(2),
xmax=10*np.ones(2),
xstar=np.zeros(2),
title="Sphere function")
plt.tight_layout()
plt.show()
|
from cla_public.config.common import *
DEBUG = os.environ.get('SET_DEBUG', False) == 'True'
SECRET_KEY = os.environ['SECRET_KEY']
SESSION_COOKIE_SECURE = os.environ.get('CLA_ENV', '') in ['prod', 'staging']
HOST_NAME = os.environ.get('HOST_NAME') or os.environ.get('HOSTNAME')
BACKEND_BASE_URI = os.environ['BACKEND_BASE_URI']
LAALAA_API_HOST = os.environ.get(
'LAALAA_API_HOST', 'https://prod.laalaa.dsd.io')
LOGGING['loggers'] = {
'': {
'handlers': ['console'],
'level': os.environ.get('LOG_LEVEL', 'INFO')
}
}
| Enable logstash formatter for console logs
| from cla_public.config.common import *
DEBUG = os.environ.get('SET_DEBUG', False) == 'True'
SECRET_KEY = os.environ['SECRET_KEY']
SESSION_COOKIE_SECURE = os.environ.get('CLA_ENV', '') in ['prod', 'staging']
HOST_NAME = os.environ.get('HOST_NAME') or os.environ.get('HOSTNAME')
BACKEND_BASE_URI = os.environ['BACKEND_BASE_URI']
LAALAA_API_HOST = os.environ.get(
'LAALAA_API_HOST', 'https://prod.laalaa.dsd.io')
LOGGING['handlers']['console']['formatter'] = 'logstash'
LOGGING['loggers'] = {
'': {
'handlers': ['console'],
'level': os.environ.get('LOG_LEVEL', 'INFO')
}
}
|
from .base import Config
__version__ = '1.0a1'
try:
from django.apps import AppConfig # noqa
except ImportError:
config = Config()
else:
default_app_config = 'constance.apps.ConstanceConfig'
| Make the config object lazy for old Djangos.
This should prevent import time side effects from instantiating the config object directly there.
| from .base import Config
from django.utils.functional import SimpleLazyObject
__version__ = '1.0a1'
try:
from django.apps import AppConfig # noqa
except ImportError:
config = SimpleLazyObject(Config)
else:
default_app_config = 'constance.apps.ConstanceConfig'
|
# -*- coding: utf-8 -*-
"""
Postpasses over the LLVM IR.
The signature of each postpass is postpass(env, ee, lmod, lfunc) -> lfunc
"""
from __future__ import print_function, division, absolute_import
from numba.support.math_support import math_support
default_postpasses = {}
def register_default(name):
def dec(f):
default_postpasses[name] = f
return f
return dec
# ______________________________________________________________________
# Postpasses
@register_default('math')
def postpass_link_math(env, ee, lmod, lfunc):
"numba.math.* -> mathcode.*"
math_support.link_llvm_math_intrinsics(ee, lmod, math_support.llvm_library)
return lfunc
| Resolve math functions from LLVM math library
| # -*- coding: utf-8 -*-
"""
Postpasses over the LLVM IR.
The signature of each postpass is postpass(env, ee, lmod, lfunc) -> lfunc
"""
from __future__ import print_function, division, absolute_import
from numba.support.math_support import math_support
default_postpasses = {}
def register_default(name):
def dec(f):
default_postpasses[name] = f
return f
return dec
# ______________________________________________________________________
# Postpasses
@register_default('math')
def postpass_link_math(env, ee, lmod, lfunc):
"numba.math.* -> mathcode.*"
math_support.link_llvm_math_intrinsics(ee, lmod, math_support.math_library,
math_support.link_llvm_asm)
return lfunc
|
""" Tests for test cases directory. """
# TODO: check http://code.google.com/p/unladen-swallow/wiki/Benchmarks
import os
from distutils.version import LooseVersion
import numpy
import unittest
from pythran.tests import TestFromDir
class TestCases(TestFromDir):
""" Class to check all tests in the cases directory. """
path = os.path.join(os.path.dirname(__file__), "cases")
TestCases.populate(TestCases)
if LooseVersion(numpy.__version__) >= '1.20':
del TestCases.test_train_equalizer_norun0
del TestCases.test_train_eq_run0
del TestCases.test_train_eq_run1
if __name__ == '__main__':
unittest.main()
| Disable loopy-jacob test on old gcc version
This one consumes too much memory and fails the validation, but it compiles fine
with a modern gcc or clang, so let's just blacklist it.
| """ Tests for test cases directory. """
# TODO: check http://code.google.com/p/unladen-swallow/wiki/Benchmarks
import os
from distutils.version import LooseVersion
import numpy
import unittest
from pythran.tests import TestFromDir
class TestCases(TestFromDir):
""" Class to check all tests in the cases directory. """
path = os.path.join(os.path.dirname(__file__), "cases")
TestCases.populate(TestCases)
if LooseVersion(numpy.__version__) >= '1.20':
del TestCases.test_train_equalizer_norun0
del TestCases.test_train_eq_run0
del TestCases.test_train_eq_run1
# too template intensive for old g++
if os.environ.get('CXX', None) == 'g++-5':
del TestCases.test_loopy_jacob_run0
if __name__ == '__main__':
unittest.main()
|
import re
from .models import CommCareBuild, CommCareBuildConfig
def get_all_versions(versions=None):
"""
Returns a list of all versions found in the database,
plus those in the optional list parameter.
"""
versions = versions or []
db = CommCareBuild.get_db()
results = db.view('builds/all', group_level=1).all()
versions += [result['key'][0] for result in results]
return sorted(list(set(versions)))
def get_default_build_spec():
return CommCareBuildConfig.fetch().get_default()
def extract_build_info_from_filename(content_disposition):
"""
>>> extract_build_info_from_filename(
... 'attachment; filename=CommCare_CommCare_2.13_32703_artifacts.zip'
... )
('2.13', 32703)
>>> try:
... extract_build_info_from_filename('foo')
... except ValueError as e:
... print e
Could not find filename like 'CommCare_CommCare_([\\\\d\\\\.]+)_(\\\\d+)_artifacts.zip' in 'foo'
"""
pattern = r'CommCare_CommCare_([\d\.]+)_(\d+)_artifacts.zip'
match = re.search(pattern, content_disposition)
if match:
version, number = match.groups()
return version, int(number)
else:
raise ValueError('Could not find filename like {!r} in {!r}'.format(
pattern, content_disposition))
| Remove optional arg that's now always used
| import re
from .models import CommCareBuild, CommCareBuildConfig
def get_all_versions(versions):
"""
Returns a list of all versions found in the database,
plus those in the optional list parameter.
"""
db = CommCareBuild.get_db()
results = db.view('builds/all', group_level=1).all()
versions += [result['key'][0] for result in results]
return sorted(list(set(versions)))
def get_default_build_spec():
return CommCareBuildConfig.fetch().get_default()
def extract_build_info_from_filename(content_disposition):
"""
>>> extract_build_info_from_filename(
... 'attachment; filename=CommCare_CommCare_2.13_32703_artifacts.zip'
... )
('2.13', 32703)
>>> try:
... extract_build_info_from_filename('foo')
... except ValueError as e:
... print e
Could not find filename like 'CommCare_CommCare_([\\\\d\\\\.]+)_(\\\\d+)_artifacts.zip' in 'foo'
"""
pattern = r'CommCare_CommCare_([\d\.]+)_(\d+)_artifacts.zip'
match = re.search(pattern, content_disposition)
if match:
version, number = match.groups()
return version, int(number)
else:
raise ValueError('Could not find filename like {!r} in {!r}'.format(
pattern, content_disposition))
|
from plugins.util import command, get_url
import json
import re
SPOTIFY_URI_REGEX = r"(?<=spotify:)(?:track|album|artist):[a-zA-Z0-9]{22}"
ENDPOINT = "https://api.spotify.com/v1/{0}s/{1}"
@command()
def spotify(m):
spotify_uris = re.findall(SPOTIFY_URI_REGEX, m.body)
for spotify_uri in spotify_uris:
try:
type, id = _parse_spotify_uri(spotify_uri)
req = get_url(m, ENDPOINT.format(type, id))
if req:
blob = json.loads(req)
if type == "track" or type == "album":
m.bot.private_message(m.location,
blob["artists"][0]["name"] + " - " + blob["name"])
else:
m.bot.private_message(m.location, blob["name"])
except ValueError:
m.bot.logger.error("Invalid Spotify URI: " + spotify_uri)
def _parse_spotify_uri(s):
[type, id] = s.split(':')
return type, id
| Change formatting of response, add URL
| from plugins.util import command, get_url
import json
import re
SPOTIFY_URI_REGEX = r"(?<=spotify:)(?:track|album|artist):[a-zA-Z0-9]{22}"
ENDPOINT = "https://api.spotify.com/v1/{0}s/{1}"
@command()
def spotify(m):
spotify_uris = re.findall(SPOTIFY_URI_REGEX, m.body)
for spotify_uri in spotify_uris:
try:
type, id = _parse_spotify_uri(spotify_uri)
except ValueError:
m.bot.logger.error("Invalid Spotify URI: " + spotify_uri)
else:
req = get_url(m, ENDPOINT.format(type, id))
if req:
blob = json.loads(req)
if type == "track" or type == "album":
m.bot.private_message(m.location, '"{0}" by {1} - {2}'
.format(blob["name"], blob["artists"][0]["name"],
blob["external_urls"]["spotify"]))
else:
m.bot.private_message(m.location, "{0} - {1}"
.format(blob["name"], blob["external_urls"]["spotify"]))
def _parse_spotify_uri(s):
[type, id] = s.split(':')
return type, id
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from optparse import make_option
from optparse import OptionParser
from scaffolder.core.commands import BaseCommand
from scaffolder.core.template import TemplateManager
class ListCommand(BaseCommand):
def __init__(self, name, help='', aliases=(), stdout=None, stderr=None):
help = 'Template command help entry'
parser = OptionParser(
version=self.get_version(),
option_list=self.get_option_list(),
usage='\n %prog {0} [OPTIONS]'.format(name)
)
aliases = ('tmp',)
BaseCommand.__init__(self, name, parser=parser, help=help, aliases=aliases)
def run(self, *args, **options):
manger = TemplateManager()
manger.list()
def get_default_option(self):
return []
| ListCommand: Remove __init__ method, not needed.
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from optparse import make_option
from optparse import OptionParser
from scaffolder.core.commands import BaseCommand
from scaffolder.core.template import TemplateManager
class ListCommand(BaseCommand):
help = 'Template command help entry'
def run(self, *args, **options):
manger = TemplateManager()
manger.list()
def get_default_option(self):
return []
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-10-31 16:33
from __future__ import unicode_literals
from django.db import migrations, OperationalError, ProgrammingError
class Migration(migrations.Migration):
dependencies = [
('ratechecker', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='fee',
unique_together=set([]),
),
migrations.RemoveField(
model_name='fee',
name='plan',
),
migrations.DeleteModel(
name='Fee',
),
]
| Remove OperationalError and ProgrammingError imports
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-10-31 16:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ratechecker', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='fee',
unique_together=set([]),
),
migrations.RemoveField(
model_name='fee',
name='plan',
),
migrations.DeleteModel(
name='Fee',
),
]
|
"""Sets up network on MicroPython board with Wiznet 5500 ethernet adapter attached via SPI.
This uses the netconfig_ module from my ``micropythonstm-lib``.
To compile the MicroPython ``stm32`` port with support for the Wiznet 5500 adapter,
add the following to ``mpconfigboard.mk`` in your board definition::
MICROPY_PY_WIZNET5K = 5500
MICROPY_PY_LWIP ?= 1
MICROPY_PY_USSL ?= 1
MICROPY_SSL_MBEDTLS ?= 1
and re-compile & upload the firmware::
cd mpy-cross
make
cd ../ports/stm32
make submodules
make BOARD=MYBOARD
# do whatever it takes connect your board in DFU mode
make BOARD=MYBOARD deploy
.. _netconfig: https://github.com/SpotlightKid/micropython-stm-lib/tree/master/netconfig
"""
from netconfig import connect
nic = connect('paddyland-wiznet.json', True)
print(nic.ifconfig())
| Fix minor typo in doc string in test support mopule
Signed-off-by: Christopher Arndt <711c73f64afdce07b7e38039a96d2224209e9a6c@chrisarndt.de>
| """Sets up network on MicroPython board with Wiznet 5500 ethernet adapter attached via SPI.
This uses the netconfig_ module from my ``micropython-stm-lib``.
To compile the MicroPython ``stm32`` port with support for the Wiznet 5500 adapter,
add the following to ``mpconfigboard.mk`` in your board definition::
MICROPY_PY_WIZNET5K = 5500
MICROPY_PY_LWIP ?= 1
MICROPY_PY_USSL ?= 1
MICROPY_SSL_MBEDTLS ?= 1
and re-compile & upload the firmware::
cd mpy-cross
make
cd ../ports/stm32
make submodules
make BOARD=MYBOARD
# do whatever it takes connect your board in DFU mode
make BOARD=MYBOARD deploy
.. _netconfig: https://github.com/SpotlightKid/micropython-stm-lib/tree/master/netconfig
"""
from netconfig import connect
nic = connect('paddyland-wiznet.json', True)
print(nic.ifconfig())
|
import re
from django.test.client import RequestFactory
from django.template import RequestContext
from haystack import indexes
from cms import models as cmsmodels
rf = RequestFactory()
HTML_TAG_RE = re.compile(r'<[^>]+>')
def cleanup_content(s):
"""
Removes HTML tags from data and replaces them with spaces.
"""
return HTML_TAG_RE.subn('', s)[0]
class PageIndex(indexes.SearchIndex, indexes.Indexable):
"""
Since for now we only offer this site in one language, we can get around
by not doing any language model hacks.
"""
text = indexes.CharField(document=True)
title = indexes.CharField()
url = indexes.CharField()
def get_model(self):
return cmsmodels.Page
def index_queryset(self, using=None):
return self.get_model().objects.filter(published=True)
def prepare(self, obj):
self.prepared_data = super(PageIndex, self).prepare(obj)
request = rf.get('/')
request.session = {}
text = u""
# Let's extract the title
context = RequestContext(request)
for title in obj.title_set.all():
self.prepared_data['title'] = title.title
for placeholder in obj.placeholders.all():
text += placeholder.render(context, None)
self.prepared_data['text'] = cleanup_content(
self.prepared_data['title'] + text)
self.prepared_data['url'] = obj.get_absolute_url()
return self.prepared_data
| Add whitespace between title and text for search index.
| import re
from django.test.client import RequestFactory
from django.template import RequestContext
from haystack import indexes
from cms import models as cmsmodels
rf = RequestFactory()
HTML_TAG_RE = re.compile(r'<[^>]+>')
def cleanup_content(s):
"""
Removes HTML tags from data and replaces them with spaces.
"""
return HTML_TAG_RE.subn('', s)[0]
class PageIndex(indexes.SearchIndex, indexes.Indexable):
"""
Since for now we only offer this site in one language, we can get around
by not doing any language model hacks.
"""
text = indexes.CharField(document=True)
title = indexes.CharField()
url = indexes.CharField()
def get_model(self):
return cmsmodels.Page
def index_queryset(self, using=None):
return self.get_model().objects.filter(published=True)
def prepare(self, obj):
self.prepared_data = super(PageIndex, self).prepare(obj)
request = rf.get('/')
request.session = {}
text = u""
# Let's extract the title
context = RequestContext(request)
for title in obj.title_set.all():
self.prepared_data['title'] = title.title
for placeholder in obj.placeholders.all():
text += placeholder.render(context, None)
self.prepared_data['text'] = cleanup_content(
self.prepared_data['title'] + u' ' + text)
self.prepared_data['url'] = obj.get_absolute_url()
return self.prepared_data
|
'''
Minion side functions for salt-ftp
'''
import os
def recv(files, dest):
'''
Used with salt-ftp, pass the files dict, and the destination
'''
if not os.path.isdir(dest) or not os.path.isdir(os.path.dirname(dest)):
return 'Destination not available'
ret = {}
for path, data in files.items():
final = ''
if os.path.basename(path) == os.path.basename(dest)\
and not os.path.isdir(dest):
final = dest
elif os.path.isdir(dest):
final = os.path.join(dest, os.path.basename(path))
else:
return 'Destination not available'
try:
open(final, 'w+').write(data)
ret[final] = True
except IOError:
ret[final] = False
return ret
| Make naming the destination file work
| '''
Minion side functions for salt-ftp
'''
import os
def recv(files, dest):
'''
Used with salt-ftp, pass the files dict, and the destination
'''
if not os.path.isdir(dest) or not os.path.isdir(os.path.dirname(dest)):
return 'Destination not available'
ret = {}
for path, data in files.items():
final = ''
if os.path.basename(path) == os.path.basename(dest)\
and not os.path.isdir(dest):
final = dest
elif os.path.isdir(dest):
final = os.path.join(dest, os.path.basename(path))
elif os.path.isdir(os.path.dirname(dest)):
final = dest
else:
return 'Destination not available'
try:
open(final, 'w+').write(data)
ret[final] = True
except IOError:
ret[final] = False
return ret
|
"""pydocstyle support."""
from pydocstyle import PEP257Checker
from pylama.lint import Linter as Abstract
class Linter(Abstract):
"""Check pydocstyle errors."""
@staticmethod
def run(path, code=None, **meta):
"""pydocstyle code checking.
:return list: List of errors.
"""
return [{
'lnum': e.line,
# Remove colon after error code ("D403: ..." => "D403 ...").
'text': (e.message[0:4] + e.message[5:]
if e.message[4] == ':' else e.message),
'type': 'D',
'number': e.code
} for e in PEP257Checker().check_source(code, path)]
| Update for pydocstyle 2.0.0 compatibility
Fix klen/pylama#96
Adding the newer ignore_decorators argument. Thanks to @not-raspberry for the tip!
| """pydocstyle support."""
THIRD_ARG = True
try:
#: Import for pydocstyle 2.0.0 and newer
from pydocstyle import ConventionChecker as PyDocChecker
except ImportError:
#: Backward compatibility for pydocstyle prior to 2.0.0
from pydocstyle import PEP257Checker as PyDocChecker
THIRD_ARG = False
from pylama.lint import Linter as Abstract
class Linter(Abstract):
"""Check pydocstyle errors."""
@staticmethod
def run(path, code=None, **meta):
"""pydocstyle code checking.
:return list: List of errors.
"""
check_source_args = (code, path, None) if THIRD_ARG else (code, path)
return [{
'lnum': e.line,
# Remove colon after error code ("D403: ..." => "D403 ...").
'text': (e.message[0:4] + e.message[5:]
if e.message[4] == ':' else e.message),
'type': 'D',
'number': e.code
} for e in PyDocChecker().check_source(*check_source_args)]
|
# -*- coding: utf-8 -*-
'''module for unit test and task for CI'''
import sys
import unittest
from yatest import testpost, testpage, testutility, testconfig
if __name__ == '__main__':
all_test_suites = []
all_test_suites.append(testpost.get_test_suites())
all_test_suites.append(testpage.get_test_suites())
all_test_suites.append(testutility.get_test_suites())
all_test_suites.append(testconfig.get_test_suites())
alltests = unittest.TestSuite(all_test_suites)
status = not unittest.TextTestRunner(verbosity=2).run(alltests).wasSuccessful()
sys.exit(status)
| Add new added test cases to travis.
| # -*- coding: utf-8 -*-
'''module for unit test and task for CI'''
import sys
import unittest
from yatest import testpost, testpage, testutility, testconfig, testgenerator, testpostmanager
if __name__ == '__main__':
all_test_suites = []
all_test_suites.append(testpost.get_test_suites())
all_test_suites.append(testpage.get_test_suites())
all_test_suites.append(testutility.get_test_suites())
all_test_suites.append(testconfig.get_test_suites())
all_test_suites.append(testgenerator.get_test_suites())
all_test_suites.append(testpostmanager.get_test_suites())
alltests = unittest.TestSuite(all_test_suites)
status = not unittest.TextTestRunner(verbosity=2).run(alltests).wasSuccessful()
sys.exit(status)
|
import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
if __name__ == '__main__':
unittest.main()
| Add basic test for example Pynini FST.
| import unittest
import normalize_breton_lib
class TestStringMethods(unittest.TestCase):
def test_normalize_breton(self):
'Test the output of NormalizeBreton.'
test_cases = [(('a--bc', 'a-bc'), ('ccb--a', 'ccb-a'), ('ba--aa', 'ba-aa'))]
for test in test_cases:
for test_case, expected in test:
test_fst = normalize_breton_lib.NormalizeBreton(test_case)
self.assertEqual(test_fst, expected)
if __name__ == '__main__':
unittest.main()
|
from .project import *
import os
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'gis',
'USER': os.environ['DATABASE_USERNAME'],
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': os.environ['DATABASE_HOST'],
'PORT': 5432,
'TEST_NAME': 'unittests',
}
}
PIPELINE_YUGLIFY_BINARY = '/usr/local/bin/yuglify'
| Comment out yuglify search path | from .project import *
import os
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'gis',
'USER': os.environ['DATABASE_USERNAME'],
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': os.environ['DATABASE_HOST'],
'PORT': 5432,
'TEST_NAME': 'unittests',
}
}
#PIPELINE_YUGLIFY_BINARY = '/usr/local/bin/yuglify'
|
#!/usr/bin/env python
import django_prometheus
import unittest
# TODO(korfuri): Add real tests. For now, this is just a placeholder
# to set up a testing system.
class DjangoPrometheusTest(unittest.TestCase):
def testNothing(self):
self.assertTrue(True)
if __name__ == 'main':
unittest.main()
| Add a test for PowersOf.
| #!/usr/bin/env python
import django_prometheus
from django_prometheus.utils import PowersOf, _INF
import unittest
class DjangoPrometheusTest(unittest.TestCase):
def testPowersOf(self):
"""Tests utils.PowersOf."""
self.assertEqual(
[0, 1, 2, 4, 8, _INF],
PowersOf(2, 4))
self.assertEqual(
[0, 3, 9, 27, 81, 243, _INF],
PowersOf(3, 5, lower=1))
self.assertEqual(
[1, 2, 4, 8, _INF],
PowersOf(2, 4, include_zero=False))
self.assertEqual(
[4, 8, 16, 32, 64, 128, _INF],
PowersOf(2, 6, lower=2, include_zero=False))
if __name__ == 'main':
unittest.main()
|
from .model import Model
from ...api import layerize
from .affine import Affine
class Residual(Model):
def __init__(self, layer):
Model.__init__(self)
self._layers.append(layer)
self.on_data_hooks.append(on_data)
def __call__(self, X):
return X + self._layers[0](X)
def begin_update(self, X, drop=0.):
y, bp_y = self._layers[0].begin_update(X, drop=drop)
output = X+y
def residual_bwd(d_output, sgd=None):
return d_output + bp_y(d_output, sgd)
return output, residual_bwd
def on_data(self, X, y=None):
for layer in self._layers:
for hook in layer.on_data_hooks:
hook(layer, X, y)
if hasattr(layer, 'W'):
layer.W.fill(0)
| Make residual connections work for list-valued inputs
| from .model import Model
from ...api import layerize
from .affine import Affine
class Residual(Model):
def __init__(self, layer):
Model.__init__(self)
self._layers.append(layer)
self.on_data_hooks.append(on_data)
def __call__(self, X):
Y = self._layers[0](X)
if isinstance(X, list) or isinstance(X, tuple):
return [X[i]+Y[i] for i in range(len(X))]
else:
return X + Y
def begin_update(self, X, drop=0.):
y, bp_y = self._layers[0].begin_update(X, drop=drop)
if isinstance(X, list) or isinstance(X, tuple):
output = [X[i]+y[i] for i in range(len(X))]
else:
output = X+y
def residual_bwd(d_output, sgd=None):
dX = bp_y(d_output, sgd)
if isinstance(d_output, list) or isinstance(d_output, tuple):
return [d_output[i]+dX[i] for i in range(len(d_output))]
else:
return d_output + dX
return output, residual_bwd
def on_data(self, X, y=None):
for layer in self._layers:
for hook in layer.on_data_hooks:
hook(layer, X, y)
if hasattr(layer, 'W'):
layer.W.fill(0)
|
import pytest
@pytest.mark.skip(
reason="Broken in 5.0. https://github.com/RocketChat/Rocket.Chat/issues/26520"
)
def test_update_jitsi_timeout(logged_rocket):
update_jitsi_timeout = logged_rocket.update_jitsi_timeout(room_id="GENERAL").json()
assert update_jitsi_timeout.get("success")
| Add todo in the test_update_jitsi_timeout so it's easier to find in the future
| import pytest
# TODO: Go back to this test once the ticket has being answered
@pytest.mark.skip(
reason="Broken in 5.0. https://github.com/RocketChat/Rocket.Chat/issues/26520"
)
def test_update_jitsi_timeout(logged_rocket):
update_jitsi_timeout = logged_rocket.update_jitsi_timeout(room_id="GENERAL").json()
assert update_jitsi_timeout.get("success")
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django.contrib.flatpages.admin import FlatPageAdmin as StockFlatPageAdmin
from django.contrib.sites.models import Site
from microcms.conf import settings
from microcms.models import Meta
class MetaAdmin(admin.ModelAdmin):
list_display = ('flatpage',)
list_filter = ('flatpage',)
ordering = ('flatpage',)
search_fields = ('flatpage',)
admin.site.register(Meta, MetaAdmin)
class MetaInline(admin.StackedInline):
model = Meta
class FlatPageAdmin(StockFlatPageAdmin):
inlines = [MetaInline]
class Media:
js = [settings.TINYMCE_URL, settings.TINYMCE_SETUP_URL]
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageAdmin)
| Insert automatically flatpage default site
| # -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django.contrib.flatpages.admin import FlatPageAdmin as StockFlatPageAdmin
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from microcms.conf import settings
from microcms.models import Meta
class MetaAdmin(admin.ModelAdmin):
list_display = ('flatpage',)
list_filter = ('flatpage',)
ordering = ('flatpage',)
search_fields = ('flatpage',)
admin.site.register(Meta, MetaAdmin)
class MetaInline(admin.StackedInline):
model = Meta
class FlatPageAdmin(StockFlatPageAdmin):
fieldsets = (
(None, {'fields': ('url', 'title', 'content')}),
(_('Advanced options'),
{'classes': ('collapse closed',),
'fields': ('enable_comments',
'registration_required',
'template_name')
}
),
)
inlines = [MetaInline]
class Media:
js = [settings.TINYMCE_URL, settings.TINYMCE_SETUP_URL]
def save_model(self, request, obj, form, change):
# Get the site with the lower id
site = Site.objects.order_by('id')[0]
obj.save()
obj.sites.add(site)
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageAdmin)
|
from django.core.management.base import BaseCommand
from recommends.tasks import recommends_precompute
from datetime import datetime
import dateutil.relativedelta
from optparse import make_option
import warnings
class Command(BaseCommand):
help = 'Calculate recommendations and similarities based on ratings'
option_list = BaseCommand.option_list + (
make_option('--verbose',
action='store_true',
dest='verbose',
default=False,
help='verbose mode'
),
)
def handle(self, *args, **options):
verbosity = int(options.get('verbosity', 0))
if options['verbose']:
warnings.warn('The `--verbose` option is being deprecated and it will be removed in the next release. Use `--verbosity` instead.', PendingDeprecationWarning)
verbosity = 1
if verbosity == 0:
# avoids allocating the results
recommends_precompute()
else:
self.stdout.write("\nCalculation Started.\n")
start_time = datetime.now()
results = recommends_precompute()
end_time = datetime.now()
if verbosity > 1:
for r in results:
self.stdout.write(
"%d similarities and %d recommendations saved.\n"
% (r['similar_count'], r['recommend_count']))
rd = dateutil.relativedelta.relativedelta(end_time, start_time)
self.stdout.write(
"Calculation finished in %d years, %d months, %d days, %d hours, %d minutes and %d seconds\n"
% (rd.years, rd.months, rd.days, rd.hours, rd.minutes, rd.seconds))
| Change deprecated options_list to add_arguments
| from django.core.management.base import BaseCommand
from recommends.tasks import recommends_precompute
from datetime import datetime
import dateutil.relativedelta
import warnings
class Command(BaseCommand):
help = 'Calculate recommendations and similarities based on ratings'
def add_arguments(self, parser):
parser.add_argument('--verbose',
action='store_true',
dest='verbose',
default=False,
help='verbose mode')
def handle(self, *args, **options):
verbosity = int(options.get('verbosity', 0))
if options['verbose']:
warnings.warn('The `--verbose` option is being deprecated and it will be removed in the next release. Use `--verbosity` instead.', PendingDeprecationWarning)
verbosity = 1
if verbosity == 0:
# avoids allocating the results
recommends_precompute()
else:
self.stdout.write("\nCalculation Started.\n")
start_time = datetime.now()
results = recommends_precompute()
end_time = datetime.now()
if verbosity > 1:
for r in results:
self.stdout.write(
"%d similarities and %d recommendations saved.\n"
% (r['similar_count'], r['recommend_count']))
rd = dateutil.relativedelta.relativedelta(end_time, start_time)
self.stdout.write(
"Calculation finished in %d years, %d months, %d days, %d hours, %d minutes and %d seconds\n"
% (rd.years, rd.months, rd.days, rd.hours, rd.minutes, rd.seconds))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from testtools import matchers
from spreadflow_core.test.matchers import MatchesInvocation
class MatchesDeltaItem(matchers.MatchesDict):
def __init__(self, item):
spec = {
'data': matchers.Equals(item['data']),
'inserts': matchers.MatchesSetwise(*[matchers.Equals(oid) for oid in item['inserts']]),
'deletes': matchers.MatchesSetwise(*[matchers.Equals(oid) for oid in item['deletes']])
}
if 'parent' in item:
spec['parent'] = MatchesDeltaItem(item['parent'])
super(MatchesDeltaItem, self).__init__(spec)
class MatchesSendDeltaItemInvocation(MatchesInvocation):
def __init__(self, expected_item, expected_port):
super(MatchesSendDeltaItemInvocation, self).__init__(
MatchesDeltaItem(expected_item),
matchers.Equals(expected_port)
)
| Use equal matcher for remaining keys
| from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from testtools import matchers
from spreadflow_core.test.matchers import MatchesInvocation
class MatchesDeltaItem(matchers.MatchesDict):
def __init__(self, item):
spec = {
'data': matchers.Equals(item['data']),
'inserts': matchers.MatchesSetwise(*[matchers.Equals(oid) for oid in item['inserts']]),
'deletes': matchers.MatchesSetwise(*[matchers.Equals(oid) for oid in item['deletes']])
}
if 'parent' in item:
spec['parent'] = MatchesDeltaItem(item['parent'])
# Use equal matcher for remaining keys.
for key in set(item.keys()) - set(spec.keys()):
spec[key] = matchers.Equals(item[key])
super(MatchesDeltaItem, self).__init__(spec)
class MatchesSendDeltaItemInvocation(MatchesInvocation):
def __init__(self, expected_item, expected_port):
super(MatchesSendDeltaItemInvocation, self).__init__(
MatchesDeltaItem(expected_item),
matchers.Equals(expected_port)
)
|
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import re
def simple_prettifier(scraped_data):
"""Return more presentable data (in a list) provided by scrape_target_elements()
:param bs4.element.ResultSet scraped_data: all of the data scraped by scrape_target_elements()
:return: list of presentable data
:rtype: list
"""
data_list = []
for data in scraped_data:
data_list.append(data.text)
return data_list
def regex_prettifier(scraped_data, regex):
"""Return regex modified data (in a list).
:param list scraped_data: all the scraped data
:param str regex: the regular expression you want to use to prettify the
data
:return: list of regex modified data
:rtype: list
"""
data_list = []
for data in scraped_data:
data_list.append(re.sub(regex, '', data))
return data_list
| Change function name to be more accurate
| import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import re
def remove_html_tags(scraped_data):
"""Return more presentable data (in a list) provided by scrape_target_elements()
:param bs4.element.ResultSet scraped_data: all of the data scraped by scrape_target_elements()
:return: list of presentable data
:rtype: list
"""
data_list = []
for data in scraped_data:
data_list.append(data.text)
return data_list
def regex_prettifier(scraped_data, regex):
"""Return regex modified data (in a list).
:param list scraped_data: all the scraped data
:param str regex: the regular expression you want to use to prettify the
data
:return: list of regex modified data
:rtype: list
"""
data_list = []
for data in scraped_data:
data_list.append(re.sub(regex, '', data))
return data_list
|
from django.test import TestCase
from django_tablib import ModelDataset, Field
from .models import TestModel
class DjangoTablibTestCase(TestCase):
def setUp(self):
TestModel.objects.create(field1='value')
def test_declarative_fields(self):
class TestModelDataset(ModelDataset):
field1 = Field(header='Field 1')
field2 = Field(attribute='field1')
class Meta:
model = TestModel
data = TestModelDataset()
self.assertEqual(len(data.headers), 2)
self.assertTrue('id' not in data.headers)
self.assertFalse('field1' in data.headers)
self.assertTrue('field2' in data.headers)
self.assertTrue('Field 1' in data.headers)
self.assertEqual(data[0][0], data[0][1])
| Adjust test for new functionality.
| from django.test import TestCase
from django_tablib import ModelDataset, Field
from .models import TestModel
class DjangoTablibTestCase(TestCase):
def setUp(self):
TestModel.objects.create(field1='value')
def test_declarative_fields(self):
class TestModelDataset(ModelDataset):
field1 = Field(header='Field 1')
field2 = Field(attribute='field1')
class Meta:
model = TestModel
data = TestModelDataset()
self.assertEqual(len(data.headers), 3)
self.assertTrue('id' in data.headers)
self.assertFalse('field1' in data.headers)
self.assertTrue('field2' in data.headers)
self.assertTrue('Field 1' in data.headers)
self.assertEqual(data[0][0], data[0][1])
|
from __future__ import absolute_import, division, print_function
import pytest
import tensorflow as tf
from lucid.modelzoo.vision_models import InceptionV1
from lucid.optvis import objectives, param, render, transform
model = InceptionV1()
model.load_graphdef()
@pytest.mark.parametrize("decorrelate", [True, False])
@pytest.mark.parametrize("fft", [True, False])
def test_integration(decorrelate, fft):
obj = objectives.neuron("mixed3a_pre_relu", 0)
param_f = lambda: param.image(16, decorrelate=decorrelate, fft=fft)
rendering = render.render_vis(model, obj, param_f=param_f, thresholds=(1,2),
verbose=False, transforms=[])
start_image = rendering[0]
end_image = rendering[-1]
objective_f = objectives.neuron("mixed3a", 177)
param_f = lambda: param.image(64, decorrelate=decorrelate, fft=fft)
rendering = render.render_vis(model, objective_f, param_f, verbose=False, thresholds=(0,64), use_fixed_seed=True)
start_image, end_image = rendering
assert (start_image != end_image).any()
| Move model init into pytest fixture to avoid loading model and downloading graph just by importing the test module
| from __future__ import absolute_import, division, print_function
import pytest
import tensorflow as tf
from lucid.modelzoo.vision_models import InceptionV1
from lucid.optvis import objectives, param, render, transform
@pytest.fixture
def inceptionv1():
model = InceptionV1()
model.load_graphdef()
return model
@pytest.mark.parametrize("decorrelate", [True, False])
@pytest.mark.parametrize("fft", [True, False])
def test_integration(decorrelate, fft, inceptionv1):
obj = objectives.neuron("mixed3a_pre_relu", 0)
param_f = lambda: param.image(16, decorrelate=decorrelate, fft=fft)
rendering = render.render_vis(inceptionv1, obj, param_f=param_f, thresholds=(1,2),
verbose=False, transforms=[])
start_image = rendering[0]
end_image = rendering[-1]
objective_f = objectives.neuron("mixed3a", 177)
param_f = lambda: param.image(64, decorrelate=decorrelate, fft=fft)
rendering = render.render_vis(inceptionv1, objective_f, param_f, verbose=False, thresholds=(0,64), use_fixed_seed=True)
start_image, end_image = rendering
assert (start_image != end_image).any()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
openslides.utils.csv_ext
~~~~~~~~~~~~~~~~~~~~~~~~
Additional dialect definitions for pythons CSV module.
:copyright: 2011 by the OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
from csv import Dialect, excel, register_dialect
class excel_semikolon(Dialect):
delimiter = ';'
doublequote = True
lineterminator = '\r\n'
quotechar = '"'
quoting = 0
skipinitialspace = False
def patchup(dialect):
if dialect:
if dialect.delimiter == excel_semikolon.delimiter and \
dialect.quotechar == excel_semikolon.quotechar:
# walks like a duck and talks like a duck.. must be one
dialect.doublequote = True
return dialect
register_dialect("excel_semikolon", excel_semikolon)
| Extend patchup for builtin excel dialect
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
openslides.utils.csv_ext
~~~~~~~~~~~~~~~~~~~~~~~~
Additional dialect definitions for pythons CSV module.
:copyright: 2011 by the OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
from csv import Dialect, excel, register_dialect
class excel_semikolon(Dialect):
delimiter = ';'
doublequote = True
lineterminator = '\r\n'
quotechar = '"'
quoting = 0
skipinitialspace = False
def patchup(dialect):
if dialect:
if dialect.delimiter in [excel_semikolon.delimiter, excel.delimiter] and \
dialect.quotechar == excel_semikolon.quotechar:
# walks like a duck and talks like a duck.. must be one
dialect.doublequote = True
return dialect
register_dialect("excel_semikolon", excel_semikolon)
|
from django.db import models
from django.template.loader import render_to_string
from normandy.recipes.models import Recipe
class Extension(models.Model):
name = models.CharField(max_length=255)
xpi = models.FileField(upload_to='extensions')
@property
def recipes_used_by(self):
"""Set of enabled recipes that are using this extension."""
return Recipe.objects.filter(
latest_revision__arguments_json__contains=self.xpi.url,
)
def recipes_used_by_html(self):
return render_to_string('admin/field_recipe_list.html', {
'recipes': self.recipes_used_by.order_by('latest_revision__name'),
})
recipes_used_by_html.short_description = 'Used in Recipes'
| Add ordering to Extension model
| from django.db import models
from django.template.loader import render_to_string
from normandy.recipes.models import Recipe
class Extension(models.Model):
name = models.CharField(max_length=255)
xpi = models.FileField(upload_to='extensions')
class Meta:
ordering = ('-id',)
@property
def recipes_used_by(self):
"""Set of enabled recipes that are using this extension."""
return Recipe.objects.filter(
latest_revision__arguments_json__contains=self.xpi.url,
)
def recipes_used_by_html(self):
return render_to_string('admin/field_recipe_list.html', {
'recipes': self.recipes_used_by.order_by('latest_revision__name'),
})
recipes_used_by_html.short_description = 'Used in Recipes'
|
"""
ExtensionItem -- Graphical representation of an association.
"""
# TODO: for Extension.postload(): in some cases where the association ends
# are connected to the same Class, the head_end property is connected to the
# tail end and visa versa.
from gaphor import UML
from gaphor.diagram.diagramline import NamedLine
class ExtensionItem(NamedLine):
"""
ExtensionItem represents associations.
An ExtensionItem has two ExtensionEnd items. Each ExtensionEnd item
represents a Property (with Property.association == my association).
"""
__uml__ = UML.Extension
def __init__(self, id=None, model=None):
NamedLine.__init__(self, id, model)
self.watch("subject<Extension>.ownedEnd")
def draw_head(self, context):
cr = context.cairo
cr.move_to(0, 0)
cr.line_to(15, -10)
cr.line_to(15, 10)
cr.line_to(0, 0)
cr.set_source_rgb(0, 0, 0)
cr.fill()
cr.move_to(15, 0)
| Convert Extension item to new line style
| """
ExtensionItem -- Graphical representation of an association.
"""
# TODO: for Extension.postload(): in some cases where the association ends
# are connected to the same Class, the head_end property is connected to the
# tail end and visa versa.
from gaphor import UML
from gaphor.UML.modelfactory import stereotypes_str
from gaphor.diagram.presentation import LinePresentation
from gaphor.diagram.shapes import Box, EditableText, Text
from gaphor.diagram.support import represents
@represents(UML.Extension)
class ExtensionItem(LinePresentation):
"""
ExtensionItem represents associations.
An ExtensionItem has two ExtensionEnd items. Each ExtensionEnd item
represents a Property (with Property.association == my association).
"""
def __init__(self, id=None, model=None):
super().__init__(id, model)
self.shape_middle = Box(
Text(
text=lambda: stereotypes_str(self.subject),
style={"min-width": 0, "min-height": 0},
),
EditableText(text=lambda: self.subject and self.subject.name or ""),
)
self.watch("subject<NamedElement>.name")
self.watch("subject.appliedStereotype.classifier.name")
def draw_head(self, context):
cr = context.cairo
cr.move_to(0, 0)
cr.line_to(15, -10)
cr.line_to(15, 10)
cr.line_to(0, 0)
cr.set_source_rgb(0, 0, 0)
cr.fill()
cr.move_to(15, 0)
|
import os
from datetime import date
from unittest import main, TestCase
import numpy as np
from serenata_toolbox.chamber_of_deputies.official_missions_dataset import OfficialMissionsDataset
class TestOfficialMissionsDataset(TestCase):
def setUp(self):
self.subject = OfficialMissionsDataset()
def test_fetch(self):
df = self.subject.fetch(date(2017, 1, 1), date(2017, 2, 28))
actualColumns = df.columns
expectedColumns = [
'participant', 'destination', 'subject', 'start', 'end',
'canceled', 'report_status', 'report_details_link'
]
self.assertTrue(np.array_equal(expectedColumns, actualColumns))
self.assertEqual(57, len(df))
expectedCanceled = ['No', 'Yes']
actualCanceled = df.canceled.unique()
self.assertTrue(np.array_equal(np.array(expectedCanceled), np.array(actualCanceled)))
if __name__ == '__main__':
main()
| Update testes (Chamber of Deputies changed real world data)
| import os
from datetime import date
from unittest import main, TestCase
import numpy as np
from serenata_toolbox.chamber_of_deputies.official_missions_dataset import OfficialMissionsDataset
class TestOfficialMissionsDataset(TestCase):
def setUp(self):
self.subject = OfficialMissionsDataset()
def test_fetch(self):
df = self.subject.fetch(date(2017, 1, 1), date(2017, 2, 28))
actualColumns = df.columns
expectedColumns = [
'participant', 'destination', 'subject', 'start', 'end',
'canceled', 'report_status', 'report_details_link'
]
self.assertTrue(np.array_equal(expectedColumns, actualColumns))
self.assertEqual(53, len(df))
expectedCanceled = ['No']
actualCanceled = df.canceled.unique()
self.assertTrue(np.array_equal(np.array(expectedCanceled), np.array(actualCanceled)))
if __name__ == '__main__':
main()
|
from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register
from cpm_data.models import JuryMember
class MyPageModelAdmin(ModelAdmin):
model = JuryMember
menu_label = 'Jury'
menu_order = 200 # will put in 3rd place (000 being 1st, 100 2nd)
list_display = ('name', 'country')
search_fields = ('name_en', 'name_be', 'name_ru')
modeladmin_register(MyPageModelAdmin)
| Add modeladmin classes for Season, JuryMember, Partner models
| from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register
from cpm_data.models import JuryMember, Partner, Season
class SeasonModelAdmin(ModelAdmin):
model = Season
menu_label = 'Seasons'
menu_icon = 'date'
menu_order = 200
list_display = ('name_en', 'name_be', 'name_ru')
search_fields = ('name_en', 'name_be', 'name_ru')
class JuryMemberModelAdmin(ModelAdmin):
model = JuryMember
menu_label = 'Jury'
menu_icon = 'group'
menu_order = 210
list_display = ('name_en', 'name_be', 'name_ru', 'country')
search_fields = ('name_en', 'name_be', 'name_ru')
class PartnerModelAdmin(ModelAdmin):
model = Partner
menu_label = 'Partners'
menu_icon = 'grip'
menu_order = 220
list_display = ('name_en', 'name_be', 'name_ru', 'image')
search_fields = ('name_en', 'name_be', 'name_ru')
modeladmin_register(SeasonModelAdmin)
modeladmin_register(JuryMemberModelAdmin)
modeladmin_register(PartnerModelAdmin)
|
""" CLI interface.
"""
import argparse
import bud_get
def main():
parser = argparse.ArgumentParser()
parser.add_argument("infile", help = "the input data file")
parser.add_argument("outfile", help = "the output file")
args = parser.parse_args()
in_path = args.infile
out_path = args.outfile
print "Processing [%s]..." % in_path
csv_data = bud_get.filter_csv(in_path)
bud_get.write_csv(csv_data, out_path)
if __name__ == "__main__":
main()
| Print version in command line.
| """ CLI interface.
"""
import argparse
import bud_get
from pkg_resources import get_distribution
def main():
VERSION = get_distribution('bud-get').version
parser = argparse.ArgumentParser()
parser.add_argument("infile", help = "the input data file")
parser.add_argument("outfile", help = "the output file")
parser.add_argument('--version', action='version', version='v%s' % VERSION)
args = parser.parse_args()
in_path = args.infile
out_path = args.outfile
print "Processing [%s]..." % in_path
csv_data = bud_get.filter_csv(in_path)
bud_get.write_csv(csv_data, out_path)
if __name__ == "__main__":
main()
|
"""
This file defines the types for type annotations.
These names aren't part of the module namespace, but they are used in the
annotations in the function signatures. The functions in the module are only
valid for inputs that match the given type annotations.
"""
from cupy.cuda import Device as _Device
__all__ = [
"Array",
"Device",
"Dtype",
"SupportsDLPack",
"SupportsBufferProtocol",
"PyCapsule",
]
import sys
from typing import Any, Literal, Sequence, Type, Union, TYPE_CHECKING
from . import Array
from numpy import (
dtype,
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
)
# This should really be recursive, but that isn't supported yet. See the
# similar comment in numpy/typing/_array_like.py
NestedSequence = Sequence[Sequence[Any]]
Device = _Device
if TYPE_CHECKING or sys.version_info >= (3, 9):
Dtype = dtype[Union[
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
]]
else:
Dtype = dtype
SupportsDLPack = Any
SupportsBufferProtocol = Any
PyCapsule = Any
| MAINT: Add a missing subscription slot to `NestedSequence`
| """
This file defines the types for type annotations.
These names aren't part of the module namespace, but they are used in the
annotations in the function signatures. The functions in the module are only
valid for inputs that match the given type annotations.
"""
from cupy.cuda import Device as _Device
__all__ = [
"Array",
"Device",
"Dtype",
"SupportsDLPack",
"SupportsBufferProtocol",
"PyCapsule",
]
import sys
from typing import Any, Literal, Sequence, Type, Union, TYPE_CHECKING, TypeVar
from . import Array
from numpy import (
dtype,
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
)
# This should really be recursive, but that isn't supported yet. See the
# similar comment in numpy/typing/_array_like.py
_T = TypeVar("_T")
NestedSequence = Sequence[Sequence[_T]]
Device = _Device
if TYPE_CHECKING or sys.version_info >= (3, 9):
Dtype = dtype[Union[
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
]]
else:
Dtype = dtype
SupportsDLPack = Any
SupportsBufferProtocol = Any
PyCapsule = Any
|