code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import os.path
import shutil
import uuid
import re
from django.test import TestCase
from django.contrib.auth.models import User
from mock import patch, Mock, PropertyMock
from docker.errors import APIError as DockerAPIError, DockerException
from readthedocs.projects.models import Project
from readthedocs.builds.models import Version
from readthedocs.doc_builder.environments import (DockerEnvironment,
DockerBuildCommand,
LocalEnvironment,
BuildCommand)
from readthedocs.doc_builder.exceptions import BuildEnvironmentError
from readthedocs.rtd_tests.utils import make_test_git
from readthedocs.rtd_tests.base import RTDTestCase
from readthedocs.rtd_tests.mocks.environment import EnvironmentMockGroup
class TestLocalEnvironment(TestCase):
'''Test execution and exception handling in environment'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
self.mocks = EnvironmentMockGroup()
self.mocks.start()
def tearDown(self):
self.mocks.stop()
def test_normal_execution(self):
'''Normal build in passing state'''
self.mocks.configure_mock('process', {
'communicate.return_value': ('This is okay', '')})
type(self.mocks.process).returncode = PropertyMock(return_value=0)
build_env = LocalEnvironment(version=self.version, project=self.project,
build={})
with build_env:
build_env.run('echo', 'test')
self.assertTrue(self.mocks.process.communicate.called)
self.assertTrue(build_env.done)
self.assertTrue(build_env.successful)
self.assertEqual(len(build_env.commands), 1)
self.assertEqual(build_env.commands[0].output, u'This is okay')
def test_failing_execution(self):
'''Build in failing state'''
self.mocks.configure_mock('process', {
'communicate.return_value': ('This is not okay', '')})
type(self.mocks.process).returncode = PropertyMock(return_value=1)
build_env = LocalEnvironment(version=self.version, project=self.project,
build={})
with build_env:
build_env.run('echo', 'test')
self.fail('This should be unreachable')
self.assertTrue(self.mocks.process.communicate.called)
self.assertTrue(build_env.done)
self.assertTrue(build_env.failed)
self.assertEqual(len(build_env.commands), 1)
self.assertEqual(build_env.commands[0].output, u'This is not okay')
def test_failing_execution_with_caught_exception(self):
'''Build in failing state with BuildEnvironmentError exception'''
build_env = LocalEnvironment(version=self.version, project=self.project,
build={})
with build_env:
raise BuildEnvironmentError('Foobar')
self.assertFalse(self.mocks.process.communicate.called)
self.assertEqual(len(build_env.commands), 0)
self.assertTrue(build_env.done)
self.assertTrue(build_env.failed)
def test_failing_execution_with_uncaught_exception(self):
'''Build in failing state with exception from code'''
build_env = LocalEnvironment(version=self.version, project=self.project,
build={})
def _inner():
with build_env:
raise Exception()
self.assertRaises(Exception, _inner)
self.assertFalse(self.mocks.process.communicate.called)
self.assertTrue(build_env.done)
self.assertTrue(build_env.failed)
class TestDockerEnvironment(TestCase):
'''Test docker build environment'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
self.mocks = EnvironmentMockGroup()
self.mocks.start()
def tearDown(self):
self.mocks.stop()
def test_container_id(self):
'''Test docker build command'''
docker = DockerEnvironment(version=self.version, project=self.project,
build={})
self.assertEqual(docker.container_id,
'version-foobar-of-pip-20')
def test_connection_failure(self):
'''Connection failure on to docker socket should raise exception'''
self.mocks.configure_mock('docker', {
'side_effect': DockerException
})
build_env = DockerEnvironment(version=self.version, project=self.project,
build={})
def _inner():
with build_env:
self.fail('Should not hit this')
self.assertRaises(BuildEnvironmentError, _inner)
def test_api_failure(self):
'''Failing API error response from docker should raise exception'''
response = Mock(status_code=500, reason='Because')
self.mocks.configure_mock('docker_client', {
'create_container.side_effect': DockerAPIError(
'Failure creating container',
response,
'Failure creating container'
)
})
build_env = DockerEnvironment(version=self.version, project=self.project,
build={})
def _inner():
with build_env:
self.fail('Should not hit this')
self.assertRaises(BuildEnvironmentError, _inner)
def test_command_execution(self):
'''Command execution through Docker'''
self.mocks.configure_mock('docker_client', {
'exec_create.return_value': {'Id': 'container-foobar'},
'exec_start.return_value': 'This is the return',
'exec_inspect.return_value': {'ExitCode': 1},
})
build_env = DockerEnvironment(version=self.version, project=self.project,
build={})
with build_env:
build_env.run('echo test', cwd='/tmp')
self.mocks.docker_client.exec_create.assert_called_with(
container='version-foobar-of-pip-20',
cmd="/bin/sh -c 'cd /tmp && echo\\ test'",
stderr=True,
stdout=True
)
self.assertEqual(build_env.commands[0].exit_code, 1)
self.assertEqual(build_env.commands[0].output, 'This is the return')
self.assertEqual(build_env.commands[0].error, None)
self.assertTrue(build_env.failed)
def test_command_execution_cleanup_exception(self):
'''Command execution through Docker, catch exception during cleanup'''
response = Mock(status_code=500, reason='Because')
self.mocks.configure_mock('docker_client', {
'exec_create.return_value': {'Id': 'container-foobar'},
'exec_start.return_value': 'This is the return',
'exec_inspect.return_value': {'ExitCode': 0},
'kill.side_effect': DockerAPIError(
'Failure killing container',
response,
'Failure killing container'
)
})
build_env = DockerEnvironment(version=self.version, project=self.project,
build={})
with build_env:
build_env.run('echo', 'test', cwd='/tmp')
self.mocks.docker_client.kill.assert_called_with(
'version-foobar-of-pip-20')
self.assertTrue(build_env.successful)
def test_container_already_exists(self):
'''Docker container already exists'''
self.mocks.configure_mock('docker_client', {
'inspect_container.return_value': {'State': {'Running': True}},
'exec_create.return_value': {'Id': 'container-foobar'},
'exec_start.return_value': 'This is the return',
'exec_inspect.return_value': {'ExitCode': 0},
})
build_env = DockerEnvironment(version=self.version, project=self.project,
build={})
def _inner():
with build_env:
build_env.run('echo', 'test', cwd='/tmp')
self.assertRaises(BuildEnvironmentError, _inner)
self.assertEqual(
str(build_env.failure),
'A build environment is currently running for this version')
self.assertEqual(self.mocks.docker_client.exec_create.call_count, 0)
self.assertTrue(build_env.failed)
def test_container_timeout(self):
'''Docker container timeout and command failure'''
response = Mock(status_code=404, reason='Container not found')
self.mocks.configure_mock('docker_client', {
'inspect_container.side_effect': [
DockerAPIError(
'No container found',
response,
'No container found',
),
{'State': {'Running': False, 'ExitCode': 42}},
],
'exec_create.return_value': {'Id': 'container-foobar'},
'exec_start.return_value': 'This is the return',
'exec_inspect.return_value': {'ExitCode': 0},
})
build_env = DockerEnvironment(version=self.version, project=self.project,
build={})
with build_env:
build_env.run('echo', 'test', cwd='/tmp')
self.assertEqual(
str(build_env.failure),
'Build exited due to time out')
self.assertEqual(self.mocks.docker_client.exec_create.call_count, 1)
self.assertTrue(build_env.failed)
class TestBuildCommand(TestCase):
'''Test build command creation'''
def test_command_env(self):
'''Test build command env vars'''
env = {'FOOBAR': 'foobar',
'PATH': 'foobar'}
cmd = BuildCommand('echo', environment=env)
for key in env.keys():
self.assertEqual(cmd.environment[key], env[key])
def test_result(self):
'''Test result of output using unix true/false commands'''
cmd = BuildCommand('true')
cmd.run()
self.assertTrue(cmd.successful)
cmd = BuildCommand('false')
cmd.run()
self.assertTrue(cmd.failed)
def test_missing_command(self):
'''Test missing command'''
path = os.path.join('non-existant', str(uuid.uuid4()))
self.assertFalse(os.path.exists(path))
cmd = BuildCommand(path)
cmd.run()
missing_re = re.compile(r'(?:No such file or directory|not found)')
self.assertRegexpMatches(cmd.error, missing_re)
def test_input(self):
'''Test input to command'''
cmd = BuildCommand('/bin/cat', input_data='FOOBAR')
cmd.run()
self.assertEqual(cmd.output, 'FOOBAR')
def test_output(self):
'''Test output command'''
cmd = BuildCommand(['/bin/bash',
'-c', 'echo -n FOOBAR'])
cmd.run()
self.assertEqual(cmd.output, "FOOBAR")
def test_error_output(self):
'''Test error output from command'''
# Test default combined output/error streams
cmd = BuildCommand(['/bin/bash',
'-c', 'echo -n FOOBAR 1>&2'])
cmd.run()
self.assertEqual(cmd.output, 'FOOBAR')
self.assertIsNone(cmd.error)
# Test non-combined streams
cmd = BuildCommand(['/bin/bash',
'-c', 'echo -n FOOBAR 1>&2'],
combine_output=False)
cmd.run()
self.assertEqual(cmd.output, '')
self.assertEqual(cmd.error, 'FOOBAR')
@patch('subprocess.Popen')
def test_unicode_output(self, mock_subprocess):
'''Unicode output from command'''
mock_process = Mock(**{
'communicate.return_value': (b'HérÉ îß sömê ünïçó∂é', ''),
})
mock_subprocess.return_value = mock_process
cmd = BuildCommand(['echo', 'test'], cwd='/tmp/foobar')
cmd.run()
self.assertEqual(
cmd.output,
u'H\xe9r\xc9 \xee\xdf s\xf6m\xea \xfcn\xef\xe7\xf3\u2202\xe9')
class TestDockerBuildCommand(TestCase):
'''Test docker build commands'''
def setUp(self):
self.mocks = EnvironmentMockGroup()
self.mocks.start()
def tearDown(self):
self.mocks.stop()
def test_wrapped_command(self):
'''Test shell wrapping for Docker chdir'''
cmd = DockerBuildCommand(['pip', 'install', 'requests'],
cwd='/tmp/foobar')
self.assertEqual(
cmd.get_wrapped_command(),
("/bin/sh -c "
"'cd /tmp/foobar && "
"pip install requests'"))
cmd = DockerBuildCommand(['python', '/tmp/foo/pip', 'install',
'Django>1.7'],
cwd='/tmp/foobar',
bin_path='/tmp/foo')
self.assertEqual(
cmd.get_wrapped_command(),
("/bin/sh -c "
"'cd /tmp/foobar && PATH=/tmp/foo:$PATH "
"python /tmp/foo/pip install Django\>1.7'"))
def test_unicode_output(self):
'''Unicode output from command'''
self.mocks.configure_mock('docker_client', {
'exec_create.return_value': {'Id': 'container-foobar'},
'exec_start.return_value': b'HérÉ îß sömê ünïçó∂é',
'exec_inspect.return_value': {'ExitCode': 0},
})
cmd = DockerBuildCommand(['echo', 'test'], cwd='/tmp/foobar')
cmd.build_env = Mock()
cmd.build_env.get_client.return_value = self.mocks.docker_client
type(cmd.build_env).container_id = PropertyMock(return_value='foo')
cmd.run()
self.assertEqual(
cmd.output,
u'H\xe9r\xc9 \xee\xdf s\xf6m\xea \xfcn\xef\xe7\xf3\u2202\xe9')
self.assertEqual(self.mocks.docker_client.exec_start.call_count, 1)
self.assertEqual(self.mocks.docker_client.exec_create.call_count, 1)
self.assertEqual(self.mocks.docker_client.exec_inspect.call_count, 1)
def test_command_oom_kill(self):
'''Command is OOM killed'''
self.mocks.configure_mock('docker_client', {
'exec_create.return_value': {'Id': 'container-foobar'},
'exec_start.return_value': b'Killed\n',
'exec_inspect.return_value': {'ExitCode': 137},
})
cmd = DockerBuildCommand(['echo', 'test'], cwd='/tmp/foobar')
cmd.build_env = Mock()
cmd.build_env.get_client.return_value = self.mocks.docker_client
type(cmd.build_env).container_id = PropertyMock(return_value='foo')
cmd.run()
self.assertEqual(
str(cmd.output),
u'Command killed due to excessive memory consumption\n')
| stevepiercy/readthedocs.org | readthedocs/rtd_tests/tests/test_doc_building.py | Python | mit | 15,211 |
# coding=utf-8
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.modules.storage.netapp.netapp_e_mgmt_interface import MgmtInterface
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
__metaclass__ = type
import mock
from units.compat.mock import PropertyMock
class MgmtInterfaceTest(ModuleTestCase):
REQUIRED_PARAMS = {
'api_username': 'rw',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1',
}
TEST_DATA = [
{
"controllerRef": "070000000000000000000001",
"controllerSlot": 1,
"interfaceName": "wan0",
"interfaceRef": "2800070000000000000000000001000000000000",
"channel": 1,
"alias": "creG1g-AP-a",
"ipv4Enabled": True,
"ipv4Address": "10.1.1.10",
"linkStatus": "up",
"ipv4SubnetMask": "255.255.255.0",
"ipv4AddressConfigMethod": "configStatic",
"ipv4GatewayAddress": "10.1.1.1",
"ipv6Enabled": False,
"physicalLocation": {
"slot": 0,
},
"dnsProperties": {
"acquisitionProperties": {
"dnsAcquisitionType": "stat",
"dnsServers": [
{
"addressType": "ipv4",
"ipv4Address": "10.1.0.250",
},
{
"addressType": "ipv4",
"ipv4Address": "10.10.0.20",
}
]
},
"dhcpAcquiredDnsServers": []
},
"ntpProperties": {
"acquisitionProperties": {
"ntpAcquisitionType": "disabled",
"ntpServers": None
},
"dhcpAcquiredNtpServers": []
},
},
{
"controllerRef": "070000000000000000000001",
"controllerSlot": 1,
"interfaceName": "wan1",
"interfaceRef": "2800070000000000000000000001000000000000",
"channel": 2,
"alias": "creG1g-AP-a",
"ipv4Enabled": True,
"ipv4Address": "0.0.0.0",
"ipv4SubnetMask": "0.0.0.0",
"ipv4AddressConfigMethod": "configDhcp",
"ipv4GatewayAddress": "10.1.1.1",
"ipv6Enabled": False,
"physicalLocation": {
"slot": 1,
},
"dnsProperties": {
"acquisitionProperties": {
"dnsAcquisitionType": "stat",
"dnsServers": [
{
"addressType": "ipv4",
"ipv4Address": "10.1.0.250",
"ipv6Address": None
},
{
"addressType": "ipv4",
"ipv4Address": "10.10.0.20",
"ipv6Address": None
}
]
},
"dhcpAcquiredDnsServers": []
},
"ntpProperties": {
"acquisitionProperties": {
"ntpAcquisitionType": "disabled",
"ntpServers": None
},
"dhcpAcquiredNtpServers": []
},
},
{
"controllerRef": "070000000000000000000002",
"controllerSlot": 2,
"interfaceName": "wan0",
"interfaceRef": "2800070000000000000000000001000000000000",
"channel": 1,
"alias": "creG1g-AP-b",
"ipv4Enabled": True,
"ipv4Address": "0.0.0.0",
"ipv4SubnetMask": "0.0.0.0",
"ipv4AddressConfigMethod": "configDhcp",
"ipv4GatewayAddress": "10.1.1.1",
"ipv6Enabled": False,
"physicalLocation": {
"slot": 0,
},
"dnsProperties": {
"acquisitionProperties": {
"dnsAcquisitionType": "stat",
"dnsServers": [
{
"addressType": "ipv4",
"ipv4Address": "10.1.0.250",
"ipv6Address": None
}
]
},
"dhcpAcquiredDnsServers": []
},
"ntpProperties": {
"acquisitionProperties": {
"ntpAcquisitionType": "stat",
"ntpServers": [
{
"addrType": "ipvx",
"domainName": None,
"ipvxAddress": {
"addressType": "ipv4",
"ipv4Address": "10.13.1.5",
"ipv6Address": None
}
},
{
"addrType": "ipvx",
"domainName": None,
"ipvxAddress": {
"addressType": "ipv4",
"ipv4Address": "10.15.1.8",
"ipv6Address": None
}
}
]
},
"dhcpAcquiredNtpServers": []
},
},
{
"controllerRef": "070000000000000000000002",
"controllerSlot": 2,
"interfaceName": "wan1",
"interfaceRef": "2801070000000000000000000001000000000000",
"channel": 2,
"alias": "creG1g-AP-b",
"ipv4Enabled": True,
"ipv4Address": "0.0.0.0",
"ipv4SubnetMask": "0.0.0.0",
"ipv4AddressConfigMethod": "configDhcp",
"ipv4GatewayAddress": "10.1.1.1",
"ipv6Enabled": False,
"physicalLocation": {
"slot": 1,
},
"dnsProperties": {
"acquisitionProperties": {
"dnsAcquisitionType": "stat",
"dnsServers": [
{
"addressType": "ipv4",
"ipv4Address": "10.19.1.2",
"ipv6Address": None
}
]
},
"dhcpAcquiredDnsServers": []
},
"ntpProperties": {
"acquisitionProperties": {
"ntpAcquisitionType": "stat",
"ntpServers": [
{
"addrType": "ipvx",
"domainName": None,
"ipvxAddress": {
"addressType": "ipv4",
"ipv4Address": "10.13.1.5",
"ipv6Address": None
}
},
{
"addrType": "ipvx",
"domainName": None,
"ipvxAddress": {
"addressType": "ipv4",
"ipv4Address": "10.15.1.18",
"ipv6Address": None
}
}
]
},
"dhcpAcquiredNtpServers": []
},
},
]
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_mgmt_interface.request'
def _set_args(self, args=None):
module_args = self.REQUIRED_PARAMS.copy()
if args is not None:
module_args.update(args)
set_module_args(module_args)
def test_controller_property_pass(self):
"""Verify dictionary return from controller property."""
initial = {
"state": "enable",
"controller": "A",
"channel": "1",
"address": "192.168.1.1",
"subnet_mask": "255.255.255.1",
"config_method": "static"}
controller_request = [
{"physicalLocation": {"slot": 2},
"controllerRef": "070000000000000000000002",
"networkSettings": {"remoteAccessEnabled": True}},
{"physicalLocation": {"slot": 1},
"controllerRef": "070000000000000000000001",
"networkSettings": {"remoteAccessEnabled": False}}]
expected = {
'A': {'controllerRef': '070000000000000000000001',
'controllerSlot': 1, 'ssh': False},
'B': {'controllerRef': '070000000000000000000002',
'controllerSlot': 2, 'ssh': True}}
self._set_args(initial)
mgmt_interface = MgmtInterface()
with mock.patch(self.REQ_FUNC, return_value=(200, controller_request)):
response = mgmt_interface.controllers
self.assertTrue(response == expected)
def test_controller_property_fail(self):
"""Verify controllers endpoint request failure causes AnsibleFailJson exception."""
initial = {
"state": "enable",
"controller": "A",
"channel": "1",
"address": "192.168.1.1",
"subnet_mask": "255.255.255.1",
"config_method": "static"}
controller_request = [
{"physicalLocation": {"slot": 2},
"controllerRef": "070000000000000000000002",
"networkSettings": {"remoteAccessEnabled": True}},
{"physicalLocation": {"slot": 1},
"controllerRef": "070000000000000000000001",
"networkSettings": {"remoteAccessEnabled": False}}]
expected = {
'A': {'controllerRef': '070000000000000000000001',
'controllerSlot': 1, 'ssh': False},
'B': {'controllerRef': '070000000000000000000002',
'controllerSlot': 2, 'ssh': True}}
self._set_args(initial)
mgmt_interface = MgmtInterface()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the controller settings."):
with mock.patch(self.REQ_FUNC, return_value=Exception):
response = mgmt_interface.controllers
def test_interface_property_match_pass(self):
"""Verify return value from interface property."""
initial = {
"state": "enable",
"controller": "A",
"channel": "1",
"address": "192.168.1.1",
"subnet_mask": "255.255.255.0",
"config_method": "static"}
controller_request = [
{"physicalLocation": {"slot": 2},
"controllerRef": "070000000000000000000002",
"networkSettings": {"remoteAccessEnabled": True}},
{"physicalLocation": {"slot": 1},
"controllerRef": "070000000000000000000001",
"networkSettings": {"remoteAccessEnabled": False}}]
expected = {
"dns_servers": [{"ipv4Address": "10.1.0.250", "addressType": "ipv4"},
{"ipv4Address": "10.10.0.20", "addressType": "ipv4"}],
"subnet_mask": "255.255.255.0",
"link_status": "up",
"ntp_servers": None,
"ntp_config_method": "disabled",
"controllerRef": "070000000000000000000001",
"config_method": "configStatic",
"enabled": True,
"gateway": "10.1.1.1",
"alias": "creG1g-AP-a",
"controllerSlot": 1,
"dns_config_method": "stat",
"id": "2800070000000000000000000001000000000000",
"address": "10.1.1.10",
"ipv6Enabled": False,
"channel": 1}
self._set_args(initial)
mgmt_interface = MgmtInterface()
with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TEST_DATA), (200, controller_request)]):
iface = mgmt_interface.interface
self.assertTrue(iface == expected)
def test_interface_property_request_exception_fail(self):
"""Verify ethernet-interfaces endpoint request failure results in AnsibleFailJson exception."""
initial = {
"state": "enable",
"controller": "A",
"channel": "1",
"address": "192.168.1.1",
"subnet_mask": "255.255.255.1",
"config_method": "static"}
controller_request = [
{"physicalLocation": {"slot": 2},
"controllerRef": "070000000000000000000002",
"networkSettings": {"remoteAccessEnabled": True}},
{"physicalLocation": {"slot": 1},
"controllerRef": "070000000000000000000001",
"networkSettings": {"remoteAccessEnabled": False}}]
self._set_args(initial)
mgmt_interface = MgmtInterface()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve defined management interfaces."):
with mock.patch(self.REQ_FUNC, side_effect=[Exception, (200, controller_request)]):
iface = mgmt_interface.interface
def test_interface_property_no_match_fail(self):
"""Verify return value from interface property."""
initial = {
"state": "enable",
"controller": "A",
"name": "wrong_name",
"address": "192.168.1.1",
"subnet_mask": "255.255.255.1",
"config_method": "static"}
controller_request = [
{"physicalLocation": {"slot": 2},
"controllerRef": "070000000000000000000002",
"networkSettings": {"remoteAccessEnabled": True}},
{"physicalLocation": {"slot": 1},
"controllerRef": "070000000000000000000001",
"networkSettings": {"remoteAccessEnabled": False}}]
expected = {
"dns_servers": [{"ipv4Address": "10.1.0.20", "addressType": "ipv4"},
{"ipv4Address": "10.1.0.50", "addressType": "ipv4"}],
"subnet_mask": "255.255.255.0",
"ntp_servers": None,
"ntp_config_method": "disabled",
"controllerRef": "070000000000000000000001",
"config_method": "configStatic",
"enabled": True,
"gateway": "10.1.1.1",
"alias": "creG1g-AP-a",
"controllerSlot": 1,
"dns_config_method": "stat",
"id": "2800070000000000000000000001000000000000",
"address": "10.1.1.111",
"ipv6Enabled": False,
"channel": 1}
self._set_args(initial)
mgmt_interface = MgmtInterface()
with self.assertRaisesRegexp(AnsibleFailJson, r"We could not find an interface matching"):
with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TEST_DATA), (200, controller_request)]):
iface = mgmt_interface.interface
def test_get_enable_interface_settings_enabled_pass(self):
"""Validate get_enable_interface_settings updates properly."""
initial = {
"state": "enable",
"controller": "A",
"name": "wrong_name",
"address": "192.168.1.1",
"subnet_mask": "255.255.255.1",
"config_method": "static"}
iface = {"enabled": False}
expected_iface = {}
self._set_args(initial)
mgmt_interface = MgmtInterface()
update, expected_iface, body = mgmt_interface.get_enable_interface_settings(iface, expected_iface, False, {})
self.assertTrue(update and expected_iface["enabled"] and body["ipv4Enabled"])
def test_get_enable_interface_settings_disabled_pass(self):
"""Validate get_enable_interface_settings updates properly."""
initial = {
"state": "disable",
"controller": "A",
"name": "wan0",
"address": "192.168.1.1",
"subnet_mask": "255.255.255.1",
"config_method": "static"}
iface = {"enabled": True}
expected_iface = {}
self._set_args(initial)
mgmt_interface = MgmtInterface()
update, expected_iface, body = mgmt_interface.get_enable_interface_settings(iface, expected_iface, False, {})
self.assertTrue(update and not expected_iface["enabled"] and not body["ipv4Enabled"])
def test_update_array_interface_ssh_pass(self):
"""Verify get_interface_settings gives the right static configuration response."""
initial = {
"state": "enable",
"controller": "A",
"name": "wan0",
"address": "192.168.1.1",
"subnet_mask": "255.255.255.1",
"config_method": "static",
"ssh": True}
iface = {"dns_servers": [{"ipv4Address": "10.1.0.20", "addressType": "ipv4"},
{"ipv4Address": "10.1.0.50", "addressType": "ipv4"}],
"subnet_mask": "255.255.255.0",
"link_status": "up",
"ntp_servers": None,
"ntp_config_method": "disabled",
"controllerRef": "070000000000000000000001",
"config_method": "configStatic",
"enabled": True,
"gateway": "10.1.1.1",
"alias": "creG1g-AP-a",
"controllerSlot": 1,
"dns_config_method": "stat",
"id": "2800070000000000000000000001000000000000",
"address": "10.1.1.111",
"ipv6Enabled": False,
"channel": 1}
settings = {"controllerRef": "070000000000000000000001",
"ssh": False}
self._set_args(initial)
mgmt_interface = MgmtInterface()
with mock.patch(self.REQ_FUNC, return_value=(200, None)):
update = mgmt_interface.update_array(settings, iface)
self.assertTrue(update)
def test_update_array_dns_static_ntp_disable_pass(self):
"""Verify get_interface_settings gives the right static configuration response."""
initial = {
"controller": "A",
"name": "wan0",
"dns_config_method": "static",
"dns_address": "192.168.1.1",
"dns_address_backup": "192.168.1.100",
"ntp_config_method": "disable"}
iface = {"dns_servers": [{"ipv4Address": "10.1.0.20", "addressType": "ipv4"},
{"ipv4Address": "10.1.0.50", "addressType": "ipv4"}],
"subnet_mask": "255.255.255.0",
"link_status": "up",
"ntp_servers": None,
"ntp_config_method": "disabled",
"controllerRef": "070000000000000000000001",
"config_method": "configStatic",
"enabled": True,
"gateway": "10.1.1.1",
"alias": "creG1g-AP-a",
"controllerSlot": 1,
"dns_config_method": "configDhcp",
"id": "2800070000000000000000000001000000000000",
"address": "10.1.1.111",
"ipv6Enabled": False,
"channel": 1}
settings = {"controllerRef": "070000000000000000000001",
"ssh": False}
self._set_args(initial)
mgmt_interface = MgmtInterface()
with mock.patch(self.REQ_FUNC, return_value=(200, None)):
update = mgmt_interface.update_array(settings, iface)
self.assertTrue(update)
def test_update_array_dns_dhcp_ntp_static_pass(self):
"""Verify get_interface_settings gives the right static configuration response."""
initial = {
"controller": "A",
"name": "wan0",
"ntp_config_method": "static",
"ntp_address": "192.168.1.1",
"ntp_address_backup": "192.168.1.100",
"dns_config_method": "dhcp"}
iface = {"dns_servers": [{"ipv4Address": "10.1.0.20", "addressType": "ipv4"},
{"ipv4Address": "10.1.0.50", "addressType": "ipv4"}],
"subnet_mask": "255.255.255.0",
"link_status": "up",
"ntp_servers": None,
"ntp_config_method": "disabled",
"controllerRef": "070000000000000000000001",
"config_method": "configStatic",
"enabled": True,
"gateway": "10.1.1.1",
"alias": "creG1g-AP-a",
"controllerSlot": 1,
"dns_config_method": "configStatic",
"id": "2800070000000000000000000001000000000000",
"address": "10.1.1.111",
"ipv6Enabled": False,
"channel": 1}
settings = {"controllerRef": "070000000000000000000001",
"ssh": False}
self._set_args(initial)
mgmt_interface = MgmtInterface()
with mock.patch(self.REQ_FUNC, return_value=(200, None)):
update = mgmt_interface.update_array(settings, iface)
self.assertTrue(update)
def test_update_array_dns_dhcp_ntp_static_no_change_pass(self):
"""Verify get_interface_settings gives the right static configuration response."""
initial = {
"controller": "A",
"name": "wan0",
"ntp_config_method": "dhcp",
"dns_config_method": "dhcp"}
iface = {"dns_servers": [{"ipv4Address": "10.1.0.20", "addressType": "ipv4"},
{"ipv4Address": "10.1.0.50", "addressType": "ipv4"}],
"subnet_mask": "255.255.255.0",
"ntp_servers": None,
"ntp_config_method": "dhcp",
"controllerRef": "070000000000000000000001",
"config_method": "static",
"enabled": True,
"gateway": "10.1.1.1",
"alias": "creG1g-AP-a",
"controllerSlot": 1,
"dns_config_method": "dhcp",
"id": "2800070000000000000000000001000000000000",
"address": "10.1.1.11",
"ipv6Enabled": False,
"channel": 1}
settings = {"controllerRef": "070000000000000000000001",
"ssh": False}
self._set_args(initial)
mgmt_interface = MgmtInterface()
with mock.patch(self.REQ_FUNC, return_value=(200, None)):
update = mgmt_interface.update_array(settings, iface)
self.assertFalse(update)
def test_update_array_ipv4_ipv6_disabled_fail(self):
"""Verify exception is thrown when both ipv4 and ipv6 would be disabled at the same time."""
initial = {
"state": "disable",
"controller": "A",
"name": "wan0",
"address": "192.168.1.1",
"subnet_mask": "255.255.255.1",
"config_method": "static",
"ssh": True}
iface = {"dns_servers": [{"ipv4Address": "10.1.0.20", "addressType": "ipv4"},
{"ipv4Address": "10.1.0.50", "addressType": "ipv4"}],
"subnet_mask": "255.255.255.0",
"ntp_servers": None,
"ntp_config_method": "disabled",
"controllerRef": "070000000000000000000001",
"config_method": "configStatic",
"enabled": True,
"gateway": "10.1.1.1",
"alias": "creG1g-AP-a",
"controllerSlot": 1,
"dns_config_method": "stat",
"id": "2800070000000000000000000001000000000000",
"address": "10.1.1.11",
"ipv6Enabled": False,
"channel": 1}
settings = {"controllerRef": "070000000000000000000001",
"ssh": False}
self._set_args(initial)
mgmt_interface = MgmtInterface()
with self.assertRaisesRegexp(AnsibleFailJson, r"This storage-system already has IPv6 connectivity disabled."):
with mock.patch(self.REQ_FUNC, return_value=(422, dict(ipv4Enabled=False, retcode="4", errorMessage=""))):
mgmt_interface.update_array(settings, iface)
def test_update_array_request_error_fail(self):
"""Verify exception is thrown when request results in an error."""
initial = {
"state": "disable",
"controller": "A",
"name": "wan0",
"address": "192.168.1.1",
"subnet_mask": "255.255.255.1",
"config_method": "static",
"ssh": True}
iface = {"dns_servers": [{"ipv4Address": "10.1.0.20", "addressType": "ipv4"},
{"ipv4Address": "10.1.0.50", "addressType": "ipv4"}],
"subnet_mask": "255.255.255.0",
"ntp_servers": None,
"ntp_config_method": "disabled",
"controllerRef": "070000000000000000000001",
"config_method": "configStatic",
"enabled": True,
"gateway": "10.1.1.1",
"alias": "creG1g-AP-a",
"controllerSlot": 1,
"dns_config_method": "stat",
"id": "2800070000000000000000000001000000000000",
"address": "10.1.1.111",
"ipv6Enabled": False,
"channel": 1}
settings = {"controllerRef": "070000000000000000000001",
"ssh": False}
self._set_args(initial)
mgmt_interface = MgmtInterface()
with self.assertRaisesRegexp(AnsibleFailJson, r"We failed to configure the management interface."):
with mock.patch(self.REQ_FUNC, return_value=(300, dict(ipv4Enabled=False, retcode="4", errorMessage=""))):
mgmt_interface.update_array(settings, iface)
def test_update_pass(self):
"""Validate update method completes."""
initial = {
"state": "enable",
"controller": "A",
"channel": "1",
"address": "192.168.1.1",
"subnet_mask": "255.255.255.1",
"config_method": "static",
"ssh": "yes"}
controller_request = [
{"physicalLocation": {"slot": 2},
"controllerRef": "070000000000000000000002",
"networkSettings": {"remoteAccessEnabled": True}},
{"physicalLocation": {"slot": 1},
"controllerRef": "070000000000000000000001",
"networkSettings": {"remoteAccessEnabled": False}}]
expected = {
"dns_servers": [{"ipv4Address": "10.1.0.20", "addressType": "ipv4"},
{"ipv4Address": "10.1.0.50", "addressType": "ipv4"}],
"subnet_mask": "255.255.255.0",
"ntp_servers": None,
"ntp_config_method": "disabled",
"controllerRef": "070000000000000000000001",
"config_method": "configStatic",
"enabled": True,
"gateway": "10.1.1.1",
"alias": "creG1g-AP-a",
"controllerSlot": 1,
"dns_config_method": "stat",
"id": "2800070000000000000000000001000000000000",
"address": "10.1.1.111",
"ipv6Enabled": False,
"channel": 1}
self._set_args(initial)
mgmt_interface = MgmtInterface()
with self.assertRaisesRegexp(AnsibleExitJson, r"The interface settings have been updated."):
with mock.patch(self.REQ_FUNC, side_effect=[(200, None), (200, controller_request), (200, self.TEST_DATA),
(200, controller_request), (200, self.TEST_DATA)]):
mgmt_interface.update()
| thaim/ansible | test/units/modules/storage/netapp/test_netapp_e_mgmt_interface.py | Python | mit | 28,142 |
from inspect import ismethod, isfunction
import os
import time
import traceback
from CodernityDB.database import RecordDeleted, RecordNotFound
from couchpotato import md5, get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import toUnicode, sp
from couchpotato.core.helpers.variable import getTitle, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from .index import ReleaseIndex, ReleaseStatusIndex, ReleaseIDIndex, ReleaseDownloadIndex
from couchpotato.environment import Env
log = CPLog(__name__)
class Release(Plugin):
_database = {
'release': ReleaseIndex,
'release_status': ReleaseStatusIndex,
'release_identifier': ReleaseIDIndex,
'release_download': ReleaseDownloadIndex
}
def __init__(self):
addApiView('release.manual_download', self.manualDownload, docs = {
'desc': 'Send a release manually to the downloaders',
'params': {
'id': {'type': 'id', 'desc': 'ID of the release object in release-table'}
}
})
addApiView('release.delete', self.deleteView, docs = {
'desc': 'Delete releases',
'params': {
'id': {'type': 'id', 'desc': 'ID of the release object in release-table'}
}
})
addApiView('release.ignore', self.ignore, docs = {
'desc': 'Toggle ignore, for bad or wrong releases',
'params': {
'id': {'type': 'id', 'desc': 'ID of the release object in release-table'}
}
})
addEvent('release.add', self.add)
addEvent('release.download', self.download)
addEvent('release.try_download_result', self.tryDownloadResult)
addEvent('release.create_from_search', self.createFromSearch)
addEvent('release.delete', self.delete)
addEvent('release.clean', self.clean)
addEvent('release.update_status', self.updateStatus)
addEvent('release.with_status', self.withStatus)
addEvent('release.for_media', self.forMedia)
# Clean releases that didn't have activity in the last week
addEvent('app.load', self.cleanDone, priority = 1000)
fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 12)
def cleanDone(self):
log.debug('Removing releases from dashboard')
now = time.time()
week = 604800
db = get_db()
# Get (and remove) parentless releases
releases = db.all('release', with_doc = False)
media_exist = []
reindex = 0
for release in releases:
if release.get('key') in media_exist:
continue
try:
try:
doc = db.get('id', release.get('_id'))
except RecordDeleted:
reindex += 1
continue
db.get('id', release.get('key'))
media_exist.append(release.get('key'))
try:
if doc.get('status') == 'ignore':
doc['status'] = 'ignored'
db.update(doc)
except:
log.error('Failed fixing mis-status tag: %s', traceback.format_exc())
except ValueError:
fireEvent('database.delete_corrupted', release.get('key'), traceback_error = traceback.format_exc(0))
reindex += 1
except RecordDeleted:
db.delete(doc)
log.debug('Deleted orphaned release: %s', doc)
reindex += 1
except:
log.debug('Failed cleaning up orphaned releases: %s', traceback.format_exc())
if reindex > 0:
db.reindex()
del media_exist
# get movies last_edit more than a week ago
medias = fireEvent('media.with_status', ['done', 'active'], single = True)
for media in medias:
if media.get('last_edit', 0) > (now - week):
continue
for rel in self.forMedia(media['_id']):
# Remove all available releases
if rel['status'] in ['available']:
self.delete(rel['_id'])
# Set all snatched and downloaded releases to ignored to make sure they are ignored when re-adding the media
elif rel['status'] in ['snatched', 'downloaded']:
self.updateStatus(rel['_id'], status = 'ignored')
if 'recent' in media.get('tags', []):
fireEvent('media.untag', media.get('_id'), 'recent', single = True)
def add(self, group, update_info = True, update_id = None):
try:
db = get_db()
release_identifier = '%s.%s.%s' % (group['identifier'], group['meta_data'].get('audio', 'unknown'), group['meta_data']['quality']['identifier'])
# Add movie if it doesn't exist
try:
media = db.get('media', 'imdb-%s' % group['identifier'], with_doc = True)['doc']
except:
media = fireEvent('movie.add', params = {
'identifier': group['identifier'],
'profile_id': None,
}, search_after = False, update_after = update_info, notify_after = False, status = 'done', single = True)
release = None
if update_id:
try:
release = db.get('id', update_id)
release.update({
'identifier': release_identifier,
'last_edit': int(time.time()),
'status': 'done',
})
except:
log.error('Failed updating existing release: %s', traceback.format_exc())
else:
# Add Release
if not release:
release = {
'_t': 'release',
'media_id': media['_id'],
'identifier': release_identifier,
'quality': group['meta_data']['quality'].get('identifier'),
'is_3d': group['meta_data']['quality'].get('is_3d', 0),
'last_edit': int(time.time()),
'status': 'done'
}
try:
r = db.get('release_identifier', release_identifier, with_doc = True)['doc']
r['media_id'] = media['_id']
except:
log.debug('Failed updating release by identifier "%s". Inserting new.', release_identifier)
r = db.insert(release)
# Update with ref and _id
release.update({
'_id': r['_id'],
'_rev': r['_rev'],
})
# Empty out empty file groups
release['files'] = dict((k, [toUnicode(x) for x in v]) for k, v in group['files'].items() if v)
db.update(release)
fireEvent('media.restatus', media['_id'], allowed_restatus = ['done'], single = True)
return True
except:
log.error('Failed: %s', traceback.format_exc())
return False
def deleteView(self, id = None, **kwargs):
return {
'success': self.delete(id)
}
def delete(self, release_id):
try:
db = get_db()
rel = db.get('id', release_id)
db.delete(rel)
return True
except RecordDeleted:
log.debug('Already deleted: %s', release_id)
return True
except:
log.error('Failed: %s', traceback.format_exc())
return False
def clean(self, release_id):
try:
db = get_db()
rel = db.get('id', release_id)
raw_files = rel.get('files')
if len(raw_files) == 0:
self.delete(rel['_id'])
else:
files = {}
for file_type in raw_files:
for release_file in raw_files.get(file_type, []):
if os.path.isfile(sp(release_file)):
if file_type not in files:
files[file_type] = []
files[file_type].append(release_file)
rel['files'] = files
db.update(rel)
return True
except:
log.error('Failed: %s', traceback.format_exc())
return False
def ignore(self, id = None, **kwargs):
db = get_db()
try:
if id:
rel = db.get('id', id, with_doc = True)
self.updateStatus(id, 'available' if rel['status'] in ['ignored', 'failed'] else 'ignored')
return {
'success': True
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False
}
def manualDownload(self, id = None, **kwargs):
db = get_db()
try:
release = db.get('id', id)
item = release['info']
movie = db.get('id', release['media_id'])
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name'])
# Get matching provider
provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True)
if item.get('protocol') != 'torrent_magnet':
item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download
success = self.download(data = item, media = movie, manual = True)
if success:
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name'])
return {
'success': success == True
}
except:
log.error('Couldn\'t find release with id: %s: %s', (id, traceback.format_exc()))
return {
'success': False
}
def download(self, data, media, manual = False):
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
if not downloader_enabled:
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', data.get('protocol'))
return False
# Download NZB or torrent file
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
try:
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
except:
log.error('Tried to download, but the "%s" provider gave an error: %s', (data.get('protocol'), traceback.format_exc()))
return False
if filedata == 'try_next':
return filedata
elif not filedata:
return False
# Send NZB or torrent file to downloader
download_result = fireEvent('download', data = data, media = media, manual = manual, filedata = filedata, single = True)
if not download_result:
log.info('Tried to download, but the "%s" downloader gave an error', data.get('protocol'))
return False
log.debug('Downloader result: %s', download_result)
try:
db = get_db()
try:
rls = db.get('release_identifier', md5(data['url']), with_doc = True)['doc']
except:
log.error('No release found to store download information in')
return False
renamer_enabled = Env.setting('enabled', 'renamer')
# Save download-id info if returned
if isinstance(download_result, dict):
rls['download_info'] = download_result
db.update(rls)
log_movie = '%s (%s) in %s' % (getTitle(media), media['info'].get('year'), rls['quality'])
snatch_message = 'Snatched "%s": %s from %s' % (data.get('name'), log_movie, (data.get('provider', '') + data.get('provider_extra', '')))
log.info(snatch_message)
fireEvent('%s.snatched' % data['type'], message = snatch_message, data = media)
# Mark release as snatched
if renamer_enabled:
self.updateStatus(rls['_id'], status = 'snatched')
# If renamer isn't used, mark media done if finished or release downloaded
else:
if media['status'] == 'active':
profile = db.get('id', media['profile_id'])
if fireEvent('quality.isfinish', {'identifier': rls['quality'], 'is_3d': rls.get('is_3d', False)}, profile, single = True):
log.info('Renamer disabled, marking media as finished: %s', log_movie)
# Mark release done
self.updateStatus(rls['_id'], status = 'done')
# Mark media done
fireEvent('media.restatus', media['_id'], single = True)
return True
# Assume release downloaded
self.updateStatus(rls['_id'], status = 'downloaded')
except:
log.error('Failed storing download status: %s', traceback.format_exc())
return False
return True
def tryDownloadResult(self, results, media, quality_custom):
wait_for = False
let_through = False
filtered_results = []
minimum_seeders = tryInt(Env.setting('minimum_seeders', section = 'torrent', default = 1))
# Filter out ignored and other releases we don't want
for rel in results:
if rel['status'] in ['ignored', 'failed']:
log.info('Ignored: %s', rel['name'])
continue
if rel['score'] < quality_custom.get('minimum_score'):
log.info('Ignored, score "%s" to low, need at least "%s": %s', (rel['score'], quality_custom.get('minimum_score'), rel['name']))
continue
if rel['size'] <= 50:
log.info('Ignored, size "%sMB" to low: %s', (rel['size'], rel['name']))
continue
if 'seeders' in rel and rel.get('seeders') < minimum_seeders:
log.info('Ignored, not enough seeders, has %s needs %s: %s', (rel.get('seeders'), minimum_seeders, rel['name']))
continue
# If a single release comes through the "wait for", let through all
rel['wait_for'] = False
if quality_custom.get('index') != 0 and quality_custom.get('wait_for', 0) > 0 and rel.get('age') <= quality_custom.get('wait_for', 0):
rel['wait_for'] = True
else:
let_through = True
filtered_results.append(rel)
# Loop through filtered results
for rel in filtered_results:
# Only wait if not a single release is old enough
if rel.get('wait_for') and not let_through:
log.info('Ignored, waiting %s days: %s', (quality_custom.get('wait_for') - rel.get('age'), rel['name']))
wait_for = True
continue
downloaded = fireEvent('release.download', data = rel, media = media, single = True)
if downloaded is True:
return True
elif downloaded != 'try_next':
break
return wait_for
def createFromSearch(self, search_results, media, quality):
try:
db = get_db()
found_releases = []
is_3d = False
try: is_3d = quality['custom']['3d']
except: pass
for rel in search_results:
rel_identifier = md5(rel['url'])
release = {
'_t': 'release',
'identifier': rel_identifier,
'media_id': media.get('_id'),
'quality': quality.get('identifier'),
'is_3d': is_3d,
'status': rel.get('status', 'available'),
'last_edit': int(time.time()),
'info': {}
}
# Add downloader info if provided
try:
release['download_info'] = rel['download_info']
del rel['download_info']
except:
pass
try:
rls = db.get('release_identifier', rel_identifier, with_doc = True)['doc']
except:
rls = db.insert(release)
rls.update(release)
# Update info, but filter out functions
for info in rel:
try:
if not isinstance(rel[info], (str, unicode, int, long, float)):
continue
rls['info'][info] = toUnicode(rel[info]) if isinstance(rel[info], (str, unicode)) else rel[info]
except:
log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc()))
db.update(rls)
# Update release in search_results
rel['status'] = rls.get('status')
if rel['status'] == 'available':
found_releases.append(rel_identifier)
return found_releases
except:
log.error('Failed: %s', traceback.format_exc())
return []
def updateStatus(self, release_id, status = None):
if not status: return False
try:
db = get_db()
rel = db.get('id', release_id)
if rel and rel.get('status') != status:
release_name = None
if rel.get('files'):
for file_type in rel.get('files', {}):
if file_type == 'movie':
for release_file in rel['files'][file_type]:
release_name = os.path.basename(release_file)
break
if not release_name and rel.get('info'):
release_name = rel['info'].get('name')
#update status in Db
log.debug('Marking release %s as %s', (release_name, status))
rel['status'] = status
rel['last_edit'] = int(time.time())
db.update(rel)
#Update all movie info as there is no release update function
fireEvent('notify.frontend', type = 'release.update_status', data = rel)
return True
except:
log.error('Failed: %s', traceback.format_exc())
return False
def withStatus(self, status, with_doc = True):
db = get_db()
status = list(status if isinstance(status, (list, tuple)) else [status])
for s in status:
for ms in db.get_many('release_status', s):
if with_doc:
try:
doc = db.get('id', ms['_id'])
yield doc
except RecordNotFound:
log.debug('Record not found, skipping: %s', ms['_id'])
else:
yield ms
def forMedia(self, media_id):
db = get_db()
raw_releases = db.get_many('release', media_id)
releases = []
for r in raw_releases:
try:
doc = db.get('id', r.get('_id'))
releases.append(doc)
except RecordDeleted:
pass
except (ValueError, EOFError):
fireEvent('database.delete_corrupted', r.get('_id'), traceback_error = traceback.format_exc(0))
releases = sorted(releases, key = lambda k: k.get('info', {}).get('score', 0), reverse = True)
# Sort based on preferred search method
download_preference = self.conf('preferred_method', section = 'searcher')
if download_preference != 'both':
releases = sorted(releases, key = lambda k: k.get('info', {}).get('protocol', '')[:3], reverse = (download_preference == 'torrent'))
return releases or []
| coderb0t/CouchPotatoServer | couchpotato/core/plugins/release/main.py | Python | gpl-3.0 | 20,734 |
from edxmako.shortcuts import render_to_string
from pipeline.conf import settings
from pipeline.packager import Packager
from pipeline.utils import guess_type
from static_replace import try_staticfiles_lookup
def compressed_css(package_name):
package = settings.PIPELINE_CSS.get(package_name, {})
if package:
package = {package_name: package}
packager = Packager(css_packages=package, js_packages={})
package = packager.package_for('css', package_name)
if settings.PIPELINE:
return render_css(package, package.output_filename)
else:
paths = packager.compile(package.paths)
return render_individual_css(package, paths)
def render_css(package, path):
template_name = package.template_name or "mako/css.html"
context = package.extra_context
url = try_staticfiles_lookup(path)
context.update({
'type': guess_type(path, 'text/css'),
'url': url,
})
return render_to_string(template_name, context)
def render_individual_css(package, paths):
tags = [render_css(package, path) for path in paths]
return '\n'.join(tags)
def compressed_js(package_name):
package = settings.PIPELINE_JS.get(package_name, {})
if package:
package = {package_name: package}
packager = Packager(css_packages={}, js_packages=package)
package = packager.package_for('js', package_name)
if settings.PIPELINE:
return render_js(package, package.output_filename)
else:
paths = packager.compile(package.paths)
templates = packager.pack_templates(package)
return render_individual_js(package, paths, templates)
def render_js(package, path):
template_name = package.template_name or "mako/js.html"
context = package.extra_context
context.update({
'type': guess_type(path, 'text/javascript'),
'url': try_staticfiles_lookup(path)
})
return render_to_string(template_name, context)
def render_inline_js(package, js):
context = package.extra_context
context.update({
'source': js
})
return render_to_string("mako/inline_js.html", context)
def render_individual_js(package, paths, templates=None):
tags = [render_js(package, js) for js in paths]
if templates:
tags.append(render_inline_js(package, templates))
return '\n'.join(tags)
| liuqr/edx-xiaodun | common/djangoapps/pipeline_mako/__init__.py | Python | agpl-3.0 | 2,354 |
self.description = "Install a package from a sync db with fnmatch'ed NoExtract"
sp = pmpkg("dummy")
sp.files = ["bin/dummy",
"usr/share/man/man8",
"usr/share/man/man1/dummy.1"]
self.addpkg2db("sync", sp)
self.option["NoExtract"] = ["usr/share/man/*"]
self.args = "-S %s" % sp.name
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=dummy")
self.addrule("FILE_EXIST=bin/dummy")
self.addrule("!FILE_EXIST=usr/share/man/man8")
self.addrule("!FILE_EXIST=usr/share/man/man1/dummy.1")
| kylon/pacman-fakeroot | test/pacman/tests/sync502.py | Python | gpl-2.0 | 513 |
# -*- coding: utf-8 -*-
from . import project_sla
from . import analytic_account
from . import project_sla_control
from . import project_issue
from . import project_task
from . import report
| sergiocorato/project-service | project_sla/__init__.py | Python | agpl-3.0 | 191 |
from nose.tools import *
from framework.mongo import database as db
from scripts.remove_wiki_title_forward_slashes import main
from tests.base import OsfTestCase
from tests.factories import NodeWikiFactory, ProjectFactory
class TestRemoveWikiTitleForwardSlashes(OsfTestCase):
def test_forward_slash_is_removed_from_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project)
invalid_name = 'invalid/name'
db.nodewikipage.update({'_id': wiki._id}, {'$set': {'page_name': invalid_name}})
project.wiki_pages_current['invalid/name'] = project.wiki_pages_current[wiki.page_name]
project.wiki_pages_versions['invalid/name'] = project.wiki_pages_versions[wiki.page_name]
project.save()
main()
wiki.reload()
assert_equal(wiki.page_name, 'invalidname')
assert_in('invalidname', project.wiki_pages_current)
assert_in('invalidname', project.wiki_pages_versions)
def test_valid_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project)
page_name = wiki.page_name
main()
wiki.reload()
assert_equal(page_name, wiki.page_name)
assert_in(page_name, project.wiki_pages_current)
assert_in(page_name, project.wiki_pages_versions)
| rdhyee/osf.io | scripts/tests/test_remove_wiki_title_forward_slashes.py | Python | apache-2.0 | 1,335 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: acl
version_added: "1.4"
short_description: Sets and retrieves file ACL information.
description:
- Sets and retrieves file ACL information.
options:
path:
required: true
default: null
description:
- The full path of the file or object.
aliases: ['name']
state:
required: false
default: query
choices: [ 'query', 'present', 'absent' ]
description:
- defines whether the ACL should be present or not. The C(query) state gets the current acl without changing it, for use in 'register' operations.
follow:
required: false
default: yes
choices: [ 'yes', 'no' ]
description:
- whether to follow symlinks on the path if a symlink is encountered.
default:
version_added: "1.5"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if
path is a file.
entity:
version_added: "1.5"
required: false
description:
- actual user or group that the ACL applies to when matching entity types user or group are selected.
etype:
version_added: "1.5"
required: false
default: null
choices: [ 'user', 'group', 'mask', 'other' ]
description:
- the entity type of the ACL to apply, see setfacl documentation for more info.
permissions:
version_added: "1.5"
required: false
default: null
description:
- Permissions to apply/remove can be any combination of r, w and x (read, write and execute respectively)
entry:
required: false
default: null
description:
- DEPRECATED. The acl to set or remove. This must always be quoted in the form of '<etype>:<qualifier>:<perms>'. The qualifier may be empty for
some types, but the type and perms are always required. '-' can be used as placeholder when you do not care about permissions. This is now
superseded by entity, type and permissions fields.
recursive:
version_added: "2.0"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- Recursively sets the specified ACL (added in Ansible 2.0). Incompatible with C(state=query).
author:
- "Brian Coca (@bcoca)"
- "Jérémie Astori (@astorije)"
notes:
- The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed.
- As of Ansible 2.0, this module only supports Linux distributions.
- As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well.
'''
EXAMPLES = '''
# Grant user Joe read access to a file
- acl:
path: /etc/foo.conf
entity: joe
etype: user
permissions: r
state: present
# Removes the acl for Joe on a specific file
- acl:
path: /etc/foo.conf
entity: joe
etype: user
state: absent
# Sets default acl for joe on foo.d
- acl:
path: /etc/foo.d
entity: joe
etype: user
permissions: rw
default: yes
state: present
# Same as previous but using entry shorthand
- acl:
path: /etc/foo.d
entry: "default:user:joe:rw-"
state: present
# Obtain the acl for a specific file
- acl:
path: /etc/foo.conf
register: acl_info
'''
RETURN = '''
acl:
description: Current acl on provided path (after changes, if any)
returned: success
type: list
sample: [ "user::rwx", "group::rwx", "other::rwx" ]
'''
import os
from ansible.module_utils.basic import AnsibleModule, get_platform
from ansible.module_utils.pycompat24 import get_exception
def split_entry(entry):
''' splits entry and ensures normalized return'''
a = entry.split(':')
d = None
if entry.lower().startswith("d"):
d = True
a.pop(0)
if len(a) == 2:
a.append(None)
t, e, p = a
t = t.lower()
if t.startswith("u"):
t = "user"
elif t.startswith("g"):
t = "group"
elif t.startswith("m"):
t = "mask"
elif t.startswith("o"):
t = "other"
else:
t = None
return [d, t, e, p]
def build_entry(etype, entity, permissions=None, use_nfsv4_acls=False):
'''Builds and returns an entry string. Does not include the permissions bit if they are not provided.'''
if use_nfsv4_acls:
return ':'.join([etype, entity, permissions, 'allow'])
if permissions:
return etype + ':' + entity + ':' + permissions
else:
return etype + ':' + entity
def build_command(module, mode, path, follow, default, recursive, entry=''):
'''Builds and returns a getfacl/setfacl command.'''
if mode == 'set':
cmd = [module.get_bin_path('setfacl', True)]
cmd.append('-m "%s"' % entry)
elif mode == 'rm':
cmd = [module.get_bin_path('setfacl', True)]
cmd.append('-x "%s"' % entry)
else: # mode == 'get'
cmd = [module.get_bin_path('getfacl', True)]
# prevents absolute path warnings and removes headers
if get_platform().lower() == 'linux':
cmd.append('--omit-header')
cmd.append('--absolute-names')
if recursive:
cmd.append('--recursive')
if not follow:
if get_platform().lower() == 'linux':
cmd.append('--physical')
elif get_platform().lower() == 'freebsd':
cmd.append('-h')
if default:
if mode == 'rm':
cmd.insert(1, '-k')
else: # mode == 'set' or mode == 'get'
cmd.insert(1, '-d')
cmd.append(path)
return cmd
def acl_changed(module, cmd):
'''Returns true if the provided command affects the existing ACLs, false otherwise.'''
# FreeBSD do not have a --test flag, so by default, it is safer to always say "true"
if get_platform().lower() == 'freebsd':
return True
cmd = cmd[:] # lists are mutables so cmd would be overwritten without this
cmd.insert(1, '--test')
lines = run_acl(module, cmd)
for line in lines:
if not line.endswith('*,*'):
return True
return False
def run_acl(module, cmd, check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception:
e = get_exception()
module.fail_json(msg=e.strerror)
lines = []
for l in out.splitlines():
if not l.startswith('#'):
lines.append(l.strip())
if lines and not lines[-1].split():
# trim last line only when it is empty
return lines[:-1]
else:
return lines
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, aliases=['name'], type='path'),
entry=dict(required=False, type='str'),
entity=dict(required=False, type='str', default=''),
etype=dict(
required=False,
choices=['other', 'user', 'group', 'mask'],
type='str'
),
permissions=dict(required=False, type='str'),
state=dict(
required=False,
default='query',
choices=['query', 'present', 'absent'],
type='str'
),
follow=dict(required=False, type='bool', default=True),
default=dict(required=False, type='bool', default=False),
recursive=dict(required=False, type='bool', default=False),
use_nfsv4_acls=dict(required=False, type='bool', default=False)
),
supports_check_mode=True,
)
if get_platform().lower() not in ['linux', 'freebsd']:
module.fail_json(msg="The acl module is not available on this system.")
path = module.params.get('path')
entry = module.params.get('entry')
entity = module.params.get('entity')
etype = module.params.get('etype')
permissions = module.params.get('permissions')
state = module.params.get('state')
follow = module.params.get('follow')
default = module.params.get('default')
recursive = module.params.get('recursive')
use_nfsv4_acls = module.params.get('use_nfsv4_acls')
if not os.path.exists(path):
module.fail_json(msg="Path not found or not accessible.")
if state == 'query' and recursive:
module.fail_json(msg="'recursive' MUST NOT be set when 'state=query'.")
if not entry:
if state == 'absent' and permissions:
module.fail_json(msg="'permissions' MUST NOT be set when 'state=absent'.")
if state == 'absent' and not entity:
module.fail_json(msg="'entity' MUST be set when 'state=absent'.")
if state in ['present', 'absent'] and not etype:
module.fail_json(msg="'etype' MUST be set when 'state=%s'." % state)
if entry:
if etype or entity or permissions:
module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.")
if state == 'present' and not entry.count(":") in [2, 3]:
module.fail_json(msg="'entry' MUST have 3 or 4 sections divided by ':' when 'state=present'.")
if state == 'absent' and not entry.count(":") in [1, 2]:
module.fail_json(msg="'entry' MUST have 2 or 3 sections divided by ':' when 'state=absent'.")
if state == 'query':
module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.")
default_flag, etype, entity, permissions = split_entry(entry)
if default_flag is not None:
default = default_flag
if get_platform().lower() == 'freebsd':
if recursive:
module.fail_json(msg="recursive is not supported on that platform.")
changed = False
msg = ""
if state == 'present':
entry = build_entry(etype, entity, permissions, use_nfsv4_acls)
command = build_command(
module, 'set', path, follow,
default, recursive, entry
)
changed = acl_changed(module, command)
if changed and not module.check_mode:
run_acl(module, command)
msg = "%s is present" % entry
elif state == 'absent':
entry = build_entry(etype, entity, use_nfsv4_acls)
command = build_command(
module, 'rm', path, follow,
default, recursive, entry
)
changed = acl_changed(module, command)
if changed and not module.check_mode:
run_acl(module, command, False)
msg = "%s is absent" % entry
elif state == 'query':
msg = "current acl"
acl = run_acl(
module,
build_command(module, 'get', path, follow, default, recursive)
)
module.exit_json(changed=changed, msg=msg, acl=acl)
if __name__ == '__main__':
main()
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/files/acl.py | Python | bsd-3-clause | 11,261 |
from __future__ import absolute_import
import psycopg2 as Database
# Some of these imports are unused, but they are inherited from other engines
# and should be available as part of the backend ``base.py`` namespace.
from django.db.backends.postgresql_psycopg2.base import ( # NOQA
DatabaseWrapper, DatabaseFeatures, DatabaseOperations, DatabaseClient,
DatabaseCreation, DatabaseIntrospection
)
from .decorators import (
capture_transaction_exceptions, auto_reconnect_cursor,
auto_reconnect_connection, less_shitty_error_messages
)
__all__ = ('DatabaseWrapper', 'DatabaseFeatures', 'DatabaseOperations',
'DatabaseOperations', 'DatabaseClient', 'DatabaseCreation',
'DatabaseIntrospection')
class CursorWrapper(object):
"""
A wrapper around the postgresql_psycopg2 backend which handles various events
from cursors, such as auto reconnects and lazy time zone evaluation.
"""
def __init__(self, db, cursor):
self.db = db
self.cursor = cursor
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
@capture_transaction_exceptions
@auto_reconnect_cursor
@less_shitty_error_messages
def execute(self, sql, params=None):
if params is not None:
return self.cursor.execute(sql, params)
return self.cursor.execute(sql)
@capture_transaction_exceptions
@auto_reconnect_cursor
@less_shitty_error_messages
def executemany(self, sql, paramlist=()):
return self.cursor.executemany(sql, paramlist)
class DatabaseWrapper(DatabaseWrapper):
@auto_reconnect_connection
def _set_isolation_level(self, level):
return super(DatabaseWrapper, self)._set_isolation_level(level)
@auto_reconnect_connection
def _cursor(self, *args, **kwargs):
cursor = super(DatabaseWrapper, self)._cursor()
return CursorWrapper(self, cursor)
def close(self, reconnect=False):
"""
This ensures we dont error if the connection has already been closed.
"""
if self.connection is not None:
if not self.connection.closed:
try:
self.connection.close()
except Database.InterfaceError:
# connection was already closed by something
# like pgbouncer idle timeout.
pass
self.connection = None
class DatabaseFeatures(DatabaseFeatures):
can_return_id_from_insert = True
def __init__(self, connection):
self.connection = connection
| Kryz/sentry | src/sentry/db/postgres/base.py | Python | bsd-3-clause | 2,640 |
from random import shuffle
def bogosort(arr):
while not sorted(arr) == arr:
shuffle(arr)
return arr | warreee/Algorithm-Implementations | Bogosort/Python/jcla1/bogosort.py | Python | mit | 116 |
# Copyright 2011 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
#======================================================================
#
# UDP Header Format
#
# 0 7 8 15 16 23 24 31
# +--------+--------+--------+--------+
# | Source | Destination |
# | Port | Port |
# +--------+--------+--------+--------+
# | | |
# | Length | Checksum |
# +--------+--------+--------+--------+
# |
# | data octets ...
# +---------------- ...
#======================================================================
import struct
from packet_utils import *
from dhcp import *
from dns import *
from rip import *
from packet_base import packet_base
# We grab ipv4 later to prevent cyclic dependency
#_ipv4 = None
class udp(packet_base):
"UDP packet struct"
MIN_LEN = 8
def __init__(self, raw=None, prev=None, **kw):
#global _ipv4
#if not _ipv4:
# from ipv4 import ipv4
# _ipv4 = ipv4
packet_base.__init__(self)
self.prev = prev
self.srcport = 0
self.dstport = 0
self.len = 8
self.csum = 0
if raw is not None:
self.parse(raw)
self._init(kw)
def __str__(self):
s = '[UDP %s>%s l:%s c:%02x]' % (self.srcport, self.dstport,
self.len, self.csum)
return s
def parse(self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < udp.MIN_LEN:
self.msg('(udp parse) warning UDP packet data too short to parse header: data len %u' % dlen)
return
(self.srcport, self.dstport, self.len, self.csum) \
= struct.unpack('!HHHH', raw[:udp.MIN_LEN])
self.hdr_len = udp.MIN_LEN
self.payload_len = self.len - self.hdr_len
self.parsed = True
if self.len < udp.MIN_LEN:
self.msg('(udp parse) warning invalid UDP len %u' % self.len)
return
if (self.dstport == dhcp.SERVER_PORT
or self.dstport == dhcp.CLIENT_PORT):
self.next = dhcp(raw=raw[udp.MIN_LEN:],prev=self)
elif (self.dstport == dns.SERVER_PORT
or self.srcport == dns.SERVER_PORT):
self.next = dns(raw=raw[udp.MIN_LEN:],prev=self)
elif ( (self.dstport == rip.RIP_PORT
or self.srcport == rip.RIP_PORT) ):
# and isinstance(self.prev, _ipv4)
# and self.prev.dstip == rip.RIP2_ADDRESS ):
self.next = rip(raw=raw[udp.MIN_LEN:],prev=self)
elif dlen < self.len:
self.msg('(udp parse) warning UDP packet data shorter than UDP len: %u < %u' % (dlen, self.len))
return
else:
self.payload = raw[udp.MIN_LEN:]
def hdr(self, payload):
self.len = len(payload) + udp.MIN_LEN
self.csum = self.checksum()
return struct.pack('!HHHH', self.srcport, self.dstport, self.len, self.csum)
def checksum(self, unparsed=False):
"""
Calculates the checksum.
If unparsed, calculates it on the raw, unparsed data. This is
useful for validating that it is correct on an incoming packet.
"""
if self.prev.__class__.__name__ != 'ipv4':
self.msg('packet not in ipv4, cannot calculate checksum ' +
'over psuedo-header' )
return 0
if unparsed:
payload_len = len(self.raw)
payload = self.raw
else:
if isinstance(self.next, packet_base):
payload = self.next.pack()
elif self.next is None:
payload = bytes()
else:
payload = self.next
payload_len = udp.MIN_LEN + len(payload)
ippacket = struct.pack('!IIBBH', self.prev.srcip.toUnsigned(),
self.prev.dstip.toUnsigned(),
0,
self.prev.protocol,
payload_len)
if not unparsed:
myhdr = struct.pack('!HHHH', self.srcport, self.dstport,
payload_len, 0)
payload = myhdr + payload
r = checksum(ippacket + payload, 0, 9)
return 0xffff if r == 0 else r
| srijanmishra/RouteFlow | pox/pox/lib/packet/udp.py | Python | apache-2.0 | 5,386 |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generatetoaddress(num_blocks_to_generate,
self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| afk11/bitcoin | test/functional/feature_minchainwork.py | Python | mit | 4,122 |
#!/usr/bin/python
"""
Ansible module to manage the ssh known_hosts file.
Copyright(c) 2014, Matthew Vernon <mcv21@cam.ac.uk>
This module is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this module. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: known_hosts
short_description: Add or remove a host from the C(known_hosts) file
description:
- The C(known_hosts) module lets you add or remove a host keys from the C(known_hosts) file.
- Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh.
This is useful if you're going to want to use the M(git) module over ssh, for example.
- If you have a very large number of host keys to manage, you will find the M(template) module more useful.
version_added: "1.9"
options:
name:
aliases: [ 'host' ]
description:
- The host to add or remove (must match a host specified in key)
required: true
default: null
key:
description:
- The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed).
The key must be in the right format for ssh (see sshd(1), section "SSH_KNOWN_HOSTS FILE FORMAT")
required: false
default: null
path:
description:
- The known_hosts file to edit
required: no
default: "(homedir)+/.ssh/known_hosts"
hash_host:
description:
- Hash the hostname in the known_hosts file
required: no
default: no
version_added: "2.3"
state:
description:
- I(present) to add the host key, I(absent) to remove it.
choices: [ "present", "absent" ]
required: no
default: present
requirements: [ ]
author: "Matthew Vernon (@mcv21)"
'''
EXAMPLES = '''
- name: tell the host about our servers it might want to ssh to
known_hosts:
path: /etc/ssh/ssh_known_hosts
name: foo.com.invalid
key: "{{ lookup('file', 'pubkeys/foo.com.invalid') }}"
'''
# Makes sure public host keys are present or absent in the given known_hosts
# file.
#
# Arguments
# =========
# name = hostname whose key should be added (alias: host)
# key = line(s) to add to known_hosts file
# path = the known_hosts file to edit (default: ~/.ssh/known_hosts)
# hash_host = yes|no (default: no) hash the hostname in the known_hosts file
# state = absent|present (default: present)
import os
import os.path
import tempfile
import errno
import re
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import AnsibleModule
def enforce_state(module, params):
"""
Add or remove key.
"""
host = params["name"]
key = params.get("key", None)
port = params.get("port", None)
path = params.get("path")
hash_host = params.get("hash_host")
state = params.get("state")
# Find the ssh-keygen binary
sshkeygen = module.get_bin_path("ssh-keygen", True)
# Trailing newline in files gets lost, so re-add if necessary
if key and key[-1] != '\n':
key += '\n'
if key is None and state != "absent":
module.fail_json(msg="No key specified when adding a host")
sanity_check(module, host, key, sshkeygen)
found, replace_or_add, found_line, key = search_for_host_key(module, host, key, hash_host, path, sshkeygen)
params['diff'] = compute_diff(path, found_line, replace_or_add, state, key)
# We will change state if found==True & state!="present"
# or found==False & state=="present"
# i.e found XOR (state=="present")
# Alternatively, if replace is true (i.e. key present, and we must change
# it)
if module.check_mode:
module.exit_json(changed=replace_or_add or (state == "present") != found,
diff=params['diff'])
# Now do the work.
# Only remove whole host if found and no key provided
if found and key is None and state == "absent":
module.run_command([sshkeygen, '-R', host, '-f', path], check_rc=True)
params['changed'] = True
# Next, add a new (or replacing) entry
if replace_or_add or found != (state == "present"):
try:
inf = open(path, "r")
except IOError:
e = get_exception()
if e.errno == errno.ENOENT:
inf = None
else:
module.fail_json(msg="Failed to read %s: %s" % (path, str(e)))
try:
outf = tempfile.NamedTemporaryFile(mode='w+', dir=os.path.dirname(path))
if inf is not None:
for line_number, line in enumerate(inf):
if found_line == (line_number + 1) and (replace_or_add or state == 'absent'):
continue # skip this line to replace its key
outf.write(line)
inf.close()
if state == 'present':
outf.write(key)
outf.flush()
module.atomic_move(outf.name, path)
except (IOError, OSError):
e = get_exception()
module.fail_json(msg="Failed to write to file %s: %s" % (path, str(e)))
try:
outf.close()
except:
pass
params['changed'] = True
return params
def sanity_check(module, host, key, sshkeygen):
'''Check supplied key is sensible
host and key are parameters provided by the user; If the host
provided is inconsistent with the key supplied, then this function
quits, providing an error to the user.
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
'''
# If no key supplied, we're doing a removal, and have nothing to check here.
if key is None:
return
# Rather than parsing the key ourselves, get ssh-keygen to do it
# (this is essential for hashed keys, but otherwise useful, as the
# key question is whether ssh-keygen thinks the key matches the host).
# The approach is to write the key to a temporary file,
# and then attempt to look up the specified host in that file.
try:
outf = tempfile.NamedTemporaryFile(mode='w+')
outf.write(key)
outf.flush()
except IOError:
e = get_exception()
module.fail_json(msg="Failed to write to temporary file %s: %s" %
(outf.name, str(e)))
sshkeygen_command = [sshkeygen, '-F', host, '-f', outf.name]
rc, stdout, stderr = module.run_command(sshkeygen_command, check_rc=True)
try:
outf.close()
except:
pass
if stdout == '': # host not found
module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
def search_for_host_key(module, host, key, hash_host, path, sshkeygen):
'''search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line)
Looks up host and keytype in the known_hosts file path; if it's there, looks to see
if one of those entries matches key. Returns:
found (Boolean): is host found in path?
replace_or_add (Boolean): is the key in path different to that supplied by user?
found_line (int or None): the line where a key of the same type was found
if found=False, then replace is always False.
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
'''
if os.path.exists(path) is False:
return False, False, None, key
sshkeygen_command = [sshkeygen, '-F', host, '-f', path]
# openssh >=6.4 has changed ssh-keygen behaviour such that it returns
# 1 if no host is found, whereas previously it returned 0
rc, stdout, stderr = module.run_command(sshkeygen_command, check_rc=False)
if stdout == '' and stderr == '' and (rc == 0 or rc == 1):
return False, False, None, key # host not found, no other errors
if rc != 0: # something went wrong
module.fail_json(msg="ssh-keygen failed (rc=%d, stdout='%s',stderr='%s')" % (rc, stdout, stderr))
# If user supplied no key, we don't want to try and replace anything with it
if key is None:
return True, False, None, key
lines = stdout.split('\n')
new_key = normalize_known_hosts_key(key)
sshkeygen_command.insert(1, '-H')
rc, stdout, stderr = module.run_command(sshkeygen_command, check_rc=False)
if rc not in (0, 1) or stderr != '': # something went wrong
module.fail_json(msg="ssh-keygen failed to hash host (rc=%d, stdout='%s',stderr='%s')" % (rc, stdout, stderr))
hashed_lines = stdout.split('\n')
for lnum, l in enumerate(lines):
if l == '':
continue
elif l[0] == '#': # info output from ssh-keygen; contains the line number where key was found
try:
# This output format has been hardcoded in ssh-keygen since at least OpenSSH 4.0
# It always outputs the non-localized comment before the found key
found_line = int(re.search(r'found: line (\d+)', l).group(1))
except IndexError:
module.fail_json(msg="failed to parse output of ssh-keygen for line number: '%s'" % l)
else:
found_key = normalize_known_hosts_key(l)
if hash_host is True:
if found_key['host'][:3] == '|1|':
new_key['host'] = found_key['host']
else:
hashed_host = normalize_known_hosts_key(hashed_lines[lnum])
found_key['host'] = hashed_host['host']
key = key.replace(host, found_key['host'])
if new_key == found_key: # found a match
return True, False, found_line, key # found exactly the same key, don't replace
elif new_key['type'] == found_key['type']: # found a different key for the same key type
return True, True, found_line, key
# No match found, return found and replace, but no line
return True, True, None, key
def normalize_known_hosts_key(key):
'''
Transform a key, either taken from a known_host file or provided by the
user, into a normalized form.
The host part (which might include multiple hostnames or be hashed) gets
replaced by the provided host. Also, any spurious information gets removed
from the end (like the username@host tag usually present in hostkeys, but
absent in known_hosts files)
'''
k = key.strip() # trim trailing newline
k = key.split()
d = dict()
# The optional "marker" field, used for @cert-authority or @revoked
if k[0][0] == '@':
d['options'] = k[0]
d['host'] = k[1]
d['type'] = k[2]
d['key'] = k[3]
else:
d['host'] = k[0]
d['type'] = k[1]
d['key'] = k[2]
return d
def compute_diff(path, found_line, replace_or_add, state, key):
diff = {
'before_header': path,
'after_header': path,
'before': '',
'after': '',
}
try:
inf = open(path, "r")
except IOError:
e = get_exception()
if e.errno == errno.ENOENT:
diff['before_header'] = '/dev/null'
else:
diff['before'] = inf.read()
inf.close()
lines = diff['before'].splitlines(1)
if (replace_or_add or state == 'absent') and found_line is not None and 1 <= found_line <= len(lines):
del lines[found_line - 1]
if state == 'present' and (replace_or_add or found_line is None):
lines.append(key)
diff['after'] = ''.join(lines)
return diff
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str', aliases=['host']),
key=dict(required=False, type='str'),
path=dict(default="~/.ssh/known_hosts", type='path'),
hash_host=dict(required=False, type='bool', default=False),
state=dict(default='present', choices=['absent', 'present']),
),
supports_check_mode=True
)
results = enforce_state(module, module.params)
module.exit_json(**results)
if __name__ == '__main__':
main()
| andreaso/ansible | lib/ansible/modules/system/known_hosts.py | Python | gpl-3.0 | 12,710 |
"""
report test results in JUnit-XML format,
for use with Jenkins and build integration servers.
Based on initial code from Ross Lawley.
Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
"""
from __future__ import absolute_import, division, print_function
import functools
import py
import os
import re
import sys
import time
import pytest
from _pytest import nodes
from _pytest.config import filename_arg
# Python 2.X and 3.X compatibility
if sys.version_info[0] < 3:
from codecs import open
else:
unichr = chr
unicode = str
long = int
class Junit(py.xml.Namespace):
pass
# We need to get the subset of the invalid unicode ranges according to
# XML 1.0 which are valid in this python build. Hence we calculate
# this dynamically instead of hardcoding it. The spec range of valid
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# | [#x10000-#x10FFFF]
_legal_chars = (0x09, 0x0A, 0x0d)
_legal_ranges = (
(0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF),
)
_legal_xml_re = [
unicode("%s-%s") % (unichr(low), unichr(high))
for (low, high) in _legal_ranges if low < sys.maxunicode
]
_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re))
del _legal_chars
del _legal_ranges
del _legal_xml_re
_py_ext_re = re.compile(r"\.py$")
def bin_xml_escape(arg):
def repl(matchobj):
i = ord(matchobj.group())
if i <= 0xFF:
return unicode('#x%02X') % i
else:
return unicode('#x%04X') % i
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
class _NodeReporter(object):
def __init__(self, nodeid, xml):
self.id = nodeid
self.xml = xml
self.add_stats = self.xml.add_stats
self.duration = 0
self.properties = []
self.nodes = []
self.testcase = None
self.attrs = {}
def append(self, node):
self.xml.add_stats(type(node).__name__)
self.nodes.append(node)
def add_property(self, name, value):
self.properties.append((str(name), bin_xml_escape(value)))
def make_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.properties:
return Junit.properties([
Junit.property(name=name, value=value)
for name, value in self.properties
])
return ''
def record_testreport(self, testreport):
assert not self.testcase
names = mangle_test_address(testreport.nodeid)
classnames = names[:-1]
if self.xml.prefix:
classnames.insert(0, self.xml.prefix)
attrs = {
"classname": ".".join(classnames),
"name": bin_xml_escape(names[-1]),
"file": testreport.location[0],
}
if testreport.location[1] is not None:
attrs["line"] = testreport.location[1]
if hasattr(testreport, "url"):
attrs["url"] = testreport.url
self.attrs = attrs
def to_xml(self):
testcase = Junit.testcase(time=self.duration, **self.attrs)
testcase.append(self.make_properties_node())
for node in self.nodes:
testcase.append(node)
return testcase
def _add_simple(self, kind, message, data=None):
data = bin_xml_escape(data)
node = kind(data, message=message)
self.append(node)
def write_captured_output(self, report):
for capname in ('out', 'err'):
content = getattr(report, 'capstd' + capname)
if content:
tag = getattr(Junit, 'system-' + capname)
self.append(tag(bin_xml_escape(content)))
def append_pass(self, report):
self.add_stats('passed')
def append_failure(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
if hasattr(report, "wasxfail"):
self._add_simple(
Junit.skipped,
"xfail-marked test passes unexpectedly")
else:
if hasattr(report.longrepr, "reprcrash"):
message = report.longrepr.reprcrash.message
elif isinstance(report.longrepr, (unicode, str)):
message = report.longrepr
else:
message = str(report.longrepr)
message = bin_xml_escape(message)
fail = Junit.failure(message=message)
fail.append(bin_xml_escape(report.longrepr))
self.append(fail)
def append_collect_error(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
self.append(Junit.error(bin_xml_escape(report.longrepr),
message="collection failure"))
def append_collect_skipped(self, report):
self._add_simple(
Junit.skipped, "collection skipped", report.longrepr)
def append_error(self, report):
if getattr(report, 'when', None) == 'teardown':
msg = "test teardown failure"
else:
msg = "test setup failure"
self._add_simple(
Junit.error, msg, report.longrepr)
def append_skipped(self, report):
if hasattr(report, "wasxfail"):
self._add_simple(
Junit.skipped, "expected test failure", report.wasxfail
)
else:
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = bin_xml_escape(skipreason[9:])
self.append(
Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
type="pytest.skip",
message=skipreason))
self.write_captured_output(report)
def finalize(self):
data = self.to_xml().unicode(indent=0)
self.__dict__.clear()
self.to_xml = lambda: py.xml.raw(data)
@pytest.fixture
def record_xml_property(request):
"""Add extra xml properties to the tag for the calling test.
The fixture is callable with ``(name, value)``, with value being automatically
xml-encoded.
"""
request.node.warn(
code='C3',
message='record_xml_property is an experimental feature',
)
xml = getattr(request.config, "_xml", None)
if xml is not None:
node_reporter = xml.node_reporter(request.node.nodeid)
return node_reporter.add_property
else:
def add_property_noop(name, value):
pass
return add_property_noop
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group.addoption(
'--junitxml', '--junit-xml',
action="store",
dest="xmlpath",
metavar="path",
type=functools.partial(filename_arg, optname="--junitxml"),
default=None,
help="create junit-xml style report file at given path.")
group.addoption(
'--junitprefix', '--junit-prefix',
action="store",
metavar="str",
default=None,
help="prepend prefix to classnames in junit-xml output")
parser.addini("junit_suite_name", "Test suite name for JUnit report", default="pytest")
def pytest_configure(config):
xmlpath = config.option.xmlpath
# prevent opening xmllog on slave nodes (xdist)
if xmlpath and not hasattr(config, 'slaveinput'):
config._xml = LogXML(xmlpath, config.option.junitprefix, config.getini("junit_suite_name"))
config.pluginmanager.register(config._xml)
def pytest_unconfigure(config):
xml = getattr(config, '_xml', None)
if xml:
del config._xml
config.pluginmanager.unregister(xml)
def mangle_test_address(address):
path, possible_open_bracket, params = address.partition('[')
names = path.split("::")
try:
names.remove('()')
except ValueError:
pass
# convert file path to dotted path
names[0] = names[0].replace(nodes.SEP, '.')
names[0] = _py_ext_re.sub("", names[0])
# put any params back
names[-1] += possible_open_bracket + params
return names
class LogXML(object):
def __init__(self, logfile, prefix, suite_name="pytest"):
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.suite_name = suite_name
self.stats = dict.fromkeys([
'error',
'passed',
'failure',
'skipped',
], 0)
self.node_reporters = {} # nodeid -> _NodeReporter
self.node_reporters_ordered = []
self.global_properties = []
# List of reports that failed on call but teardown is pending.
self.open_reports = []
self.cnt_double_fail_tests = 0
def finalize(self, report):
nodeid = getattr(report, 'nodeid', report)
# local hack to handle xdist report order
slavenode = getattr(report, 'node', None)
reporter = self.node_reporters.pop((nodeid, slavenode))
if reporter is not None:
reporter.finalize()
def node_reporter(self, report):
nodeid = getattr(report, 'nodeid', report)
# local hack to handle xdist report order
slavenode = getattr(report, 'node', None)
key = nodeid, slavenode
if key in self.node_reporters:
# TODO: breasks for --dist=each
return self.node_reporters[key]
reporter = _NodeReporter(nodeid, self)
self.node_reporters[key] = reporter
self.node_reporters_ordered.append(reporter)
return reporter
def add_stats(self, key):
if key in self.stats:
self.stats[key] += 1
def _opentestcase(self, report):
reporter = self.node_reporter(report)
reporter.record_testreport(report)
return reporter
def pytest_runtest_logreport(self, report):
"""handle a setup/call/teardown report, generating the appropriate
xml tags as necessary.
note: due to plugins like xdist, this hook may be called in interlaced
order with reports from other nodes. for example:
usual call order:
-> setup node1
-> call node1
-> teardown node1
-> setup node2
-> call node2
-> teardown node2
possible call order in xdist:
-> setup node1
-> call node1
-> setup node2
-> call node2
-> teardown node2
-> teardown node1
"""
close_report = None
if report.passed:
if report.when == "call": # ignore setup/teardown
reporter = self._opentestcase(report)
reporter.append_pass(report)
elif report.failed:
if report.when == "teardown":
# The following vars are needed when xdist plugin is used
report_wid = getattr(report, "worker_id", None)
report_ii = getattr(report, "item_index", None)
close_report = next(
(rep for rep in self.open_reports
if (rep.nodeid == report.nodeid and
getattr(rep, "item_index", None) == report_ii and
getattr(rep, "worker_id", None) == report_wid
)
), None)
if close_report:
# We need to open new testcase in case we have failure in
# call and error in teardown in order to follow junit
# schema
self.finalize(close_report)
self.cnt_double_fail_tests += 1
reporter = self._opentestcase(report)
if report.when == "call":
reporter.append_failure(report)
self.open_reports.append(report)
else:
reporter.append_error(report)
elif report.skipped:
reporter = self._opentestcase(report)
reporter.append_skipped(report)
self.update_testcase_duration(report)
if report.when == "teardown":
reporter = self._opentestcase(report)
reporter.write_captured_output(report)
self.finalize(report)
report_wid = getattr(report, "worker_id", None)
report_ii = getattr(report, "item_index", None)
close_report = next(
(rep for rep in self.open_reports
if (rep.nodeid == report.nodeid and
getattr(rep, "item_index", None) == report_ii and
getattr(rep, "worker_id", None) == report_wid
)
), None)
if close_report:
self.open_reports.remove(close_report)
def update_testcase_duration(self, report):
"""accumulates total duration for nodeid from given report and updates
the Junit.testcase with the new total if already created.
"""
reporter = self.node_reporter(report)
reporter.duration += getattr(report, 'duration', 0.0)
def pytest_collectreport(self, report):
if not report.passed:
reporter = self._opentestcase(report)
if report.failed:
reporter.append_collect_error(report)
else:
reporter.append_collect_skipped(report)
def pytest_internalerror(self, excrepr):
reporter = self.node_reporter('internal')
reporter.attrs.update(classname="pytest", name='internal')
reporter._add_simple(Junit.error, 'internal error', excrepr)
def pytest_sessionstart(self):
self.suite_start_time = time.time()
def pytest_sessionfinish(self):
dirname = os.path.dirname(os.path.abspath(self.logfile))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(self.logfile, 'w', encoding='utf-8')
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = (self.stats['passed'] + self.stats['failure'] +
self.stats['skipped'] + self.stats['error'] -
self.cnt_double_fail_tests)
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
logfile.write(Junit.testsuite(
self._get_global_properties_node(),
[x.to_xml() for x in self.node_reporters_ordered],
name=self.suite_name,
errors=self.stats['error'],
failures=self.stats['failure'],
skips=self.stats['skipped'],
tests=numtests,
time="%.3f" % suite_time_delta, ).unicode(indent=0))
logfile.close()
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-",
"generated xml file: %s" % (self.logfile))
def add_global_property(self, name, value):
self.global_properties.append((str(name), bin_xml_escape(value)))
def _get_global_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.global_properties:
return Junit.properties(
[
Junit.property(name=name, value=value)
for name, value in self.global_properties
]
)
return ''
| anthgur/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/_pytest/junitxml.py | Python | mpl-2.0 | 15,681 |
import frappe
def execute():
duplicates = frappe.db.sql("""select email_group, email, count(name)
from `tabEmail Group Member`
group by email_group, email
having count(name) > 1""")
# delete all duplicates except 1
for email_group, email, count in duplicates:
frappe.db.sql("""delete from `tabEmail Group Member`
where email_group=%s and email=%s limit %s""", (email_group, email, count-1))
| hassanibi/erpnext | erpnext/patches/v6_2/remove_newsletter_duplicates.py | Python | gpl-3.0 | 407 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from itertools import groupby
def grouplines(self, ordered_lines, sortkey):
"""Return lines from a specified invoice or sale order grouped by category"""
grouped_lines = []
for key, valuesiter in groupby(ordered_lines, sortkey):
group = {}
group['category'] = key
group['lines'] = list(v for v in valuesiter)
if 'subtotal' in key and key.subtotal is True:
group['subtotal'] = sum(line.price_subtotal for line in group['lines'])
grouped_lines.append(group)
return grouped_lines
class SaleLayoutCategory(osv.Model):
_name = 'sale_layout.category'
_order = 'sequence'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True),
'subtotal': fields.boolean('Add subtotal'),
'separator': fields.boolean('Add separator'),
'pagebreak': fields.boolean('Add pagebreak')
}
_defaults = {
'subtotal': True,
'separator': True,
'pagebreak': False,
'sequence': 10
}
class AccountInvoice(osv.Model):
_inherit = 'account.invoice'
def sale_layout_lines(self, cr, uid, ids, invoice_id=None, context=None):
"""
Returns invoice lines from a specified invoice ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'invoice_id' (int): specify the concerned invoice.
"""
ordered_lines = self.browse(cr, uid, invoice_id, context=context).invoice_line
# We chose to group first by category model and, if not present, by invoice name
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
import openerp
class AccountInvoiceLine(osv.Model):
_inherit = 'account.invoice.line'
_order = 'invoice_id, categ_sequence, sequence, id'
sale_layout_cat_id = openerp.fields.Many2one('sale_layout.category', string='Section')
categ_sequence = openerp.fields.Integer(related='sale_layout_cat_id.sequence',
string='Layout Sequence', store=True)
class SaleOrder(osv.Model):
_inherit = 'sale.order'
def sale_layout_lines(self, cr, uid, ids, order_id=None, context=None):
"""
Returns order lines from a specified sale ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'order_id' (int): specify the concerned sale order.
"""
ordered_lines = self.browse(cr, uid, order_id, context=context).order_line
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
class SaleOrderLine(osv.Model):
_inherit = 'sale.order.line'
_columns = {
'sale_layout_cat_id': fields.many2one('sale_layout.category',
string='Section'),
'categ_sequence': fields.related('sale_layout_cat_id',
'sequence', type='integer',
string='Layout Sequence', store=True)
# Store is intentionally set in order to keep the "historic" order.
}
_order = 'order_id, categ_sequence, sequence, id'
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Save the layout when converting to an invoice line."""
invoice_vals = super(SaleOrderLine, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)
if line.sale_layout_cat_id:
invoice_vals['sale_layout_cat_id'] = line.sale_layout_cat_id.id
if line.categ_sequence:
invoice_vals['categ_sequence'] = line.categ_sequence
return invoice_vals
| diogocs1/comps | web/addons/sale_layout/models/sale_layout.py | Python | apache-2.0 | 4,907 |
"""
Integration tests for third_party_auth LTI auth providers
"""
import unittest
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from oauthlib.oauth1.rfc5849 import Client, SIGNATURE_TYPE_BODY
from third_party_auth.tests import testutil
FORM_ENCODED = 'application/x-www-form-urlencoded'
LTI_CONSUMER_KEY = 'consumer'
LTI_CONSUMER_SECRET = 'secret'
LTI_TPA_LOGIN_URL = 'http://testserver/auth/login/lti/'
LTI_TPA_COMPLETE_URL = 'http://testserver/auth/complete/lti/'
OTHER_LTI_CONSUMER_KEY = 'settings-consumer'
OTHER_LTI_CONSUMER_SECRET = 'secret2'
LTI_USER_ID = 'lti_user_id'
EDX_USER_ID = 'test_user'
EMAIL = 'lti_user@example.com'
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class IntegrationTestLTI(testutil.TestCase):
"""
Integration tests for third_party_auth LTI auth providers
"""
def setUp(self):
super(IntegrationTestLTI, self).setUp()
self.client.defaults['SERVER_NAME'] = 'testserver'
self.url_prefix = 'http://testserver'
self.configure_lti_provider(
name='Other Tool Consumer 1', enabled=True,
lti_consumer_key='other1',
lti_consumer_secret='secret1',
lti_max_timestamp_age=10,
)
self.configure_lti_provider(
name='LTI Test Tool Consumer', enabled=True,
lti_consumer_key=LTI_CONSUMER_KEY,
lti_consumer_secret=LTI_CONSUMER_SECRET,
lti_max_timestamp_age=10,
)
self.configure_lti_provider(
name='Tool Consumer with Secret in Settings', enabled=True,
lti_consumer_key=OTHER_LTI_CONSUMER_KEY,
lti_consumer_secret='',
lti_max_timestamp_age=10,
)
self.lti = Client(
client_key=LTI_CONSUMER_KEY,
client_secret=LTI_CONSUMER_SECRET,
signature_type=SIGNATURE_TYPE_BODY,
)
def test_lti_login(self):
# The user initiates a login from an external site
(uri, _headers, body) = self.lti.sign(
uri=LTI_TPA_LOGIN_URL, http_method='POST',
headers={'Content-Type': FORM_ENCODED},
body={
'user_id': LTI_USER_ID,
'custom_tpa_next': '/account/finish_auth/?course_id=my_course_id&enrollment_action=enroll',
}
)
login_response = self.client.post(path=uri, content_type=FORM_ENCODED, data=body)
# The user should be redirected to the registration form
self.assertEqual(login_response.status_code, 302)
self.assertTrue(login_response['Location'].endswith(reverse('signin_user')))
register_response = self.client.get(login_response['Location'])
self.assertEqual(register_response.status_code, 200)
self.assertIn('"currentProvider": "LTI Test Tool Consumer"', register_response.content)
self.assertIn('"errorMessage": null', register_response.content)
# Now complete the form:
ajax_register_response = self.client.post(
reverse('user_api_registration'),
{
'email': EMAIL,
'name': 'Myself',
'username': EDX_USER_ID,
'honor_code': True,
}
)
self.assertEqual(ajax_register_response.status_code, 200)
continue_response = self.client.get(LTI_TPA_COMPLETE_URL)
# The user should be redirected to the finish_auth view which will enroll them.
# FinishAuthView.js reads the URL parameters directly from $.url
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(
continue_response['Location'],
'http://testserver/account/finish_auth/?course_id=my_course_id&enrollment_action=enroll'
)
# Now check that we can login again
self.client.logout()
self.verify_user_email(EMAIL)
(uri, _headers, body) = self.lti.sign(
uri=LTI_TPA_LOGIN_URL, http_method='POST',
headers={'Content-Type': FORM_ENCODED},
body={'user_id': LTI_USER_ID}
)
login_2_response = self.client.post(path=uri, content_type=FORM_ENCODED, data=body)
# The user should be redirected to the dashboard
self.assertEqual(login_2_response.status_code, 302)
self.assertEqual(login_2_response['Location'], LTI_TPA_COMPLETE_URL)
continue_2_response = self.client.get(login_2_response['Location'])
self.assertEqual(continue_2_response.status_code, 302)
self.assertTrue(continue_2_response['Location'].endswith(reverse('dashboard')))
# Check that the user was created correctly
user = User.objects.get(email=EMAIL)
self.assertEqual(user.username, EDX_USER_ID)
def test_reject_initiating_login(self):
response = self.client.get(LTI_TPA_LOGIN_URL)
self.assertEqual(response.status_code, 405) # Not Allowed
def test_reject_bad_login(self):
login_response = self.client.post(
path=LTI_TPA_LOGIN_URL, content_type=FORM_ENCODED,
data="invalid=login"
)
# The user should be redirected to the login page with an error message
# (auth_entry defaults to login for this provider)
self.assertEqual(login_response.status_code, 302)
self.assertTrue(login_response['Location'].endswith(reverse('signin_user')))
error_response = self.client.get(login_response['Location'])
self.assertIn(
'Authentication failed: LTI parameters could not be validated.',
error_response.content
)
def test_can_load_consumer_secret_from_settings(self):
lti = Client(
client_key=OTHER_LTI_CONSUMER_KEY,
client_secret=OTHER_LTI_CONSUMER_SECRET,
signature_type=SIGNATURE_TYPE_BODY,
)
(uri, _headers, body) = lti.sign(
uri=LTI_TPA_LOGIN_URL, http_method='POST',
headers={'Content-Type': FORM_ENCODED},
body={
'user_id': LTI_USER_ID,
'custom_tpa_next': '/account/finish_auth/?course_id=my_course_id&enrollment_action=enroll',
}
)
with self.settings(SOCIAL_AUTH_LTI_CONSUMER_SECRETS={OTHER_LTI_CONSUMER_KEY: OTHER_LTI_CONSUMER_SECRET}):
login_response = self.client.post(path=uri, content_type=FORM_ENCODED, data=body)
# The user should be redirected to the registration form
self.assertEqual(login_response.status_code, 302)
self.assertTrue(login_response['Location'].endswith(reverse('signin_user')))
register_response = self.client.get(login_response['Location'])
self.assertEqual(register_response.status_code, 200)
self.assertIn(
'"currentProvider": "Tool Consumer with Secret in Settings"',
register_response.content
)
self.assertIn('"errorMessage": null', register_response.content)
| solashirai/edx-platform | common/djangoapps/third_party_auth/tests/specs/test_lti.py | Python | agpl-3.0 | 7,079 |
"""
By specifying the 'proxy' Meta attribute, model subclasses can specify that
they will take data directly from the table of their base class table rather
than using a new table of their own. This allows them to act as simple proxies,
providing a modified interface to the data from the base class.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# A couple of managers for testing managing overriding in proxy model cases.
class PersonManager(models.Manager):
def get_queryset(self):
return super(PersonManager, self).get_queryset().exclude(name="fred")
class SubManager(models.Manager):
def get_queryset(self):
return super(SubManager, self).get_queryset().exclude(name="wilma")
@python_2_unicode_compatible
class Person(models.Model):
"""
A simple concrete base class.
"""
name = models.CharField(max_length=50)
objects = PersonManager()
def __str__(self):
return self.name
class Abstract(models.Model):
"""
A simple abstract base class, to be used for error checking.
"""
data = models.CharField(max_length=10)
class Meta:
abstract = True
class MyPerson(Person):
"""
A proxy subclass, this should not get a new table. Overrides the default
manager.
"""
class Meta:
proxy = True
ordering = ["name"]
permissions = (
("display_users", "May display users information"),
)
objects = SubManager()
other = PersonManager()
def has_special_name(self):
return self.name.lower() == "special"
class ManagerMixin(models.Model):
excluder = SubManager()
class Meta:
abstract = True
class OtherPerson(Person, ManagerMixin):
"""
A class with the default manager from Person, plus an secondary manager.
"""
class Meta:
proxy = True
ordering = ["name"]
class StatusPerson(MyPerson):
"""
A non-proxy subclass of a proxy, it should get a new table.
"""
status = models.CharField(max_length=80)
# We can even have proxies of proxies (and subclass of those).
class MyPersonProxy(MyPerson):
class Meta:
proxy = True
class LowerStatusPerson(MyPersonProxy):
status = models.CharField(max_length=80)
@python_2_unicode_compatible
class User(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class UserProxy(User):
class Meta:
proxy = True
class UserProxyProxy(UserProxy):
class Meta:
proxy = True
# We can still use `select_related()` to include related models in our querysets.
class Country(models.Model):
name = models.CharField(max_length=50)
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country, models.CASCADE)
def __str__(self):
return self.name
class StateProxy(State):
class Meta:
proxy = True
# Proxy models still works with filters (on related fields)
# and select_related, even when mixed with model inheritance
@python_2_unicode_compatible
class BaseUser(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return ':'.join((self.__class__.__name__, self.name,))
class TrackerUser(BaseUser):
status = models.CharField(max_length=50)
class ProxyTrackerUser(TrackerUser):
class Meta:
proxy = True
@python_2_unicode_compatible
class Issue(models.Model):
summary = models.CharField(max_length=255)
assignee = models.ForeignKey(ProxyTrackerUser, models.CASCADE, related_name='issues')
def __str__(self):
return ':'.join((self.__class__.__name__, self.summary,))
class Bug(Issue):
version = models.CharField(max_length=50)
reporter = models.ForeignKey(BaseUser, models.CASCADE)
class ProxyBug(Bug):
"""
Proxy of an inherited class
"""
class Meta:
proxy = True
class ProxyProxyBug(ProxyBug):
"""
A proxy of proxy model with related field
"""
class Meta:
proxy = True
class Improvement(Issue):
"""
A model that has relation to a proxy model
or to a proxy of proxy model
"""
version = models.CharField(max_length=50)
reporter = models.ForeignKey(ProxyTrackerUser, models.CASCADE)
associated_bug = models.ForeignKey(ProxyProxyBug, models.CASCADE)
class ProxyImprovement(Improvement):
class Meta:
proxy = True
| benjaminjkraft/django | tests/proxy_models/models.py | Python | bsd-3-clause | 4,514 |
"""
SCGI-->WSGI application proxy, "SWAP".
(Originally written by Titus Brown.)
This lets an SCGI front-end like mod_scgi be used to execute WSGI
application objects. To use it, subclass the SWAP class like so::
class TestAppHandler(swap.SWAP):
def __init__(self, *args, **kwargs):
self.prefix = '/canal'
self.app_obj = TestAppClass
swap.SWAP.__init__(self, *args, **kwargs)
where 'TestAppClass' is the application object from WSGI and '/canal'
is the prefix for what is served by the SCGI Web-server-side process.
Then execute the SCGI handler "as usual" by doing something like this::
scgi_server.SCGIServer(TestAppHandler, port=4000).serve()
and point mod_scgi (or whatever your SCGI front end is) at port 4000.
Kudos to the WSGI folk for writing a nice PEP & the Quixote folk for
writing a nice extensible SCGI server for Python!
"""
import six
import sys
import time
from scgi import scgi_server
def debug(msg):
timestamp = time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(time.time()))
sys.stderr.write("[%s] %s\n" % (timestamp, msg))
class SWAP(scgi_server.SCGIHandler):
"""
SCGI->WSGI application proxy: let an SCGI server execute WSGI
application objects.
"""
app_obj = None
prefix = None
def __init__(self, *args, **kwargs):
assert self.app_obj, "must set app_obj"
assert self.prefix is not None, "must set prefix"
args = (self,) + args
scgi_server.SCGIHandler.__init__(*args, **kwargs)
def handle_connection(self, conn):
"""
Handle an individual connection.
"""
input = conn.makefile("r")
output = conn.makefile("w")
environ = self.read_env(input)
environ['wsgi.input'] = input
environ['wsgi.errors'] = sys.stderr
environ['wsgi.version'] = (1, 0)
environ['wsgi.multithread'] = False
environ['wsgi.multiprocess'] = True
environ['wsgi.run_once'] = False
# dunno how SCGI does HTTPS signalling; can't test it myself... @CTB
if environ.get('HTTPS','off') in ('on','1'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
## SCGI does some weird environ manglement. We need to set
## SCRIPT_NAME from 'prefix' and then set PATH_INFO from
## REQUEST_URI.
prefix = self.prefix
path = environ['REQUEST_URI'][len(prefix):].split('?', 1)[0]
environ['SCRIPT_NAME'] = prefix
environ['PATH_INFO'] = path
headers_set = []
headers_sent = []
chunks = []
def write(data):
chunks.append(data)
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent
six.reraise(exc_info[0], exc_info[1], exc_info[2])
finally:
exc_info = None # avoid dangling circular ref
elif headers_set:
raise AssertionError("Headers already set!")
headers_set[:] = [status, response_headers]
return write
###
result = self.app_obj(environ, start_response)
try:
for data in result:
chunks.append(data)
# Before the first output, send the stored headers
if not headers_set:
# Error -- the app never called start_response
status = '500 Server Error'
response_headers = [('Content-type', 'text/html')]
chunks = ["XXX start_response never called"]
else:
status, response_headers = headers_sent[:] = headers_set
output.write('Status: %s\r\n' % status)
for header in response_headers:
output.write('%s: %s\r\n' % header)
output.write('\r\n')
for data in chunks:
output.write(data)
finally:
if hasattr(result,'close'):
result.close()
# SCGI backends use connection closing to signal 'fini'.
try:
input.close()
output.close()
conn.close()
except IOError as err:
debug("IOError while closing connection ignored: %s" % err)
def serve_application(application, prefix, port=None, host=None, max_children=None):
"""
Serve the specified WSGI application via SCGI proxy.
``application``
The WSGI application to serve.
``prefix``
The prefix for what is served by the SCGI Web-server-side process.
``port``
Optional port to bind the SCGI proxy to. Defaults to SCGIServer's
default port value.
``host``
Optional host to bind the SCGI proxy to. Defaults to SCGIServer's
default host value.
``host``
Optional maximum number of child processes the SCGIServer will
spawn. Defaults to SCGIServer's default max_children value.
"""
class SCGIAppHandler(SWAP):
def __init__ (self, *args, **kwargs):
self.prefix = prefix
self.app_obj = application
SWAP.__init__(self, *args, **kwargs)
kwargs = dict(handler_class=SCGIAppHandler)
for kwarg in ('host', 'port', 'max_children'):
if locals()[kwarg] is not None:
kwargs[kwarg] = locals()[kwarg]
scgi_server.SCGIServer(**kwargs).serve()
| endlessm/chromium-browser | third_party/catapult/third_party/Paste/paste/util/scgiserver.py | Python | bsd-3-clause | 5,612 |
# orm/exc.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQLAlchemy ORM exceptions."""
from .. import exc as sa_exc, util
NO_STATE = (AttributeError, KeyError)
"""Exception types that may be raised by instrumentation implementations."""
class StaleDataError(sa_exc.SQLAlchemyError):
"""An operation encountered database state that is unaccounted for.
Conditions which cause this to happen include:
* A flush may have attempted to update or delete rows
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
* A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
* A object is detached from its parent object, however
the object was previously attached to a different parent
identity which was garbage collected, and a decision
cannot be made if the new parent was really the most
recent "parent".
.. versionadded:: 0.7.4
"""
ConcurrentModificationError = StaleDataError
class FlushError(sa_exc.SQLAlchemyError):
"""A invalid condition was detected during flush()."""
class UnmappedError(sa_exc.InvalidRequestError):
"""Base for exceptions that involve expected mappings not present."""
class ObjectDereferencedError(sa_exc.SQLAlchemyError):
"""An operation cannot complete due to an object being garbage
collected.
"""
class DetachedInstanceError(sa_exc.SQLAlchemyError):
"""An attempt to access unloaded attributes on a
mapped instance that is detached."""
class UnmappedInstanceError(UnmappedError):
"""An mapping operation was requested for an unknown instance."""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, obj, msg=None):
if not msg:
try:
base.class_mapper(type(obj))
name = _safe_cls_name(type(obj))
msg = ("Class %r is mapped, but this instance lacks "
"instrumentation. This occurs when the instance"
"is created before sqlalchemy.orm.mapper(%s) "
"was called." % (name, name))
except UnmappedClassError:
msg = _default_unmapped(type(obj))
if isinstance(obj, type):
msg += (
'; was a class (%s) supplied where an instance was '
'required?' % _safe_cls_name(obj))
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedClassError(UnmappedError):
"""An mapping operation was requested for an unknown class."""
def __init__(self, cls, msg=None):
if not msg:
msg = _default_unmapped(cls)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class ObjectDeletedError(sa_exc.InvalidRequestError):
"""A refresh operation failed to retrieve the database
row corresponding to an object's known primary key identity.
A refresh operation proceeds when an expired attribute is
accessed on an object, or when :meth:`.Query.get` is
used to retrieve an object which is, upon retrieval, detected
as expired. A SELECT is emitted for the target row
based on primary key; if no row is returned, this
exception is raised.
The true meaning of this exception is simply that
no row exists for the primary key identifier associated
with a persistent object. The row may have been
deleted, or in some cases the primary key updated
to a new value, outside of the ORM's management of the target
object.
"""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, state, msg=None):
if not msg:
msg = "Instance '%s' has been deleted, or its "\
"row is otherwise not present." % base.state_str(state)
sa_exc.InvalidRequestError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedColumnError(sa_exc.InvalidRequestError):
"""Mapping operation was requested on an unknown column."""
class NoResultFound(sa_exc.InvalidRequestError):
"""A database result was required but none was found."""
class MultipleResultsFound(sa_exc.InvalidRequestError):
"""A single database result was required but more than one were found."""
def _safe_cls_name(cls):
try:
cls_name = '.'.join((cls.__module__, cls.__name__))
except AttributeError:
cls_name = getattr(cls, '__name__', None)
if cls_name is None:
cls_name = repr(cls)
return cls_name
@util.dependencies("sqlalchemy.orm.base")
def _default_unmapped(base, cls):
try:
mappers = base.manager_of_class(cls).mappers
except NO_STATE:
mappers = {}
except TypeError:
mappers = {}
name = _safe_cls_name(cls)
if not mappers:
return "Class '%s' is not mapped" % name
| pcu4dros/pandora-core | workspace/lib/python3.5/site-packages/sqlalchemy/orm/exc.py | Python | mit | 5,439 |
#!/usr/bin/env python
"""
exec_command
Implements exec_command function that is (almost) equivalent to
commands.getstatusoutput function but on NT, DOS systems the
returned status is actually correct (though, the returned status
values may be different by a factor). In addition, exec_command
takes keyword arguments for (re-)defining environment variables.
Provides functions:
exec_command --- execute command in a specified directory and
in the modified environment.
find_executable --- locate a command using info from environment
variable PATH. Equivalent to posix `which`
command.
Author: Pearu Peterson <pearu@cens.ioc.ee>
Created: 11 January 2003
Requires: Python 2.x
Succesfully tested on:
======== ============ =================================================
os.name sys.platform comments
======== ============ =================================================
posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3
PyCrust 0.9.3, Idle 1.0.2
posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2
posix sunos5 SunOS 5.9, Python 2.2, 2.3.2
posix darwin Darwin 7.2.0, Python 2.3
nt win32 Windows Me
Python 2.3(EE), Idle 1.0, PyCrust 0.7.2
Python 2.1.1 Idle 0.8
nt win32 Windows 98, Python 2.1.1. Idle 0.8
nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests
fail i.e. redefining environment variables may
not work. FIXED: don't use cygwin echo!
Comment: also `cmd /c echo` will not work
but redefining environment variables do work.
posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)
nt win32 Windows XP, Python 2.3.3
======== ============ =================================================
Known bugs:
* Tests, that send messages to stderr, fail when executed from MSYS prompt
because the messages are lost at some point.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['exec_command', 'find_executable']
import os
import sys
import shlex
from numpy.distutils.misc_util import is_sequence, make_temp_file
from numpy.distutils import log
from numpy.distutils.compat import get_exception
from numpy.compat import open_latin1
def temp_file_name():
fo, name = make_temp_file()
fo.close()
return name
def get_pythonexe():
pythonexe = sys.executable
if os.name in ['nt', 'dos']:
fdir, fn = os.path.split(pythonexe)
fn = fn.upper().replace('PYTHONW', 'PYTHON')
pythonexe = os.path.join(fdir, fn)
assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)
return pythonexe
def find_executable(exe, path=None, _cache={}):
"""Return full path of a executable or None.
Symbolic links are not followed.
"""
key = exe, path
try:
return _cache[key]
except KeyError:
pass
log.debug('find_executable(%r)' % exe)
orig_exe = exe
if path is None:
path = os.environ.get('PATH', os.defpath)
if os.name=='posix':
realpath = os.path.realpath
else:
realpath = lambda a:a
if exe.startswith('"'):
exe = exe[1:-1]
suffixes = ['']
if os.name in ['nt', 'dos', 'os2']:
fn, ext = os.path.splitext(exe)
extra_suffixes = ['.exe', '.com', '.bat']
if ext.lower() not in extra_suffixes:
suffixes = extra_suffixes
if os.path.isabs(exe):
paths = ['']
else:
paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]
for path in paths:
fn = os.path.join(path, exe)
for s in suffixes:
f_ext = fn+s
if not os.path.islink(f_ext):
f_ext = realpath(f_ext)
if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):
log.info('Found executable %s' % f_ext)
_cache[key] = f_ext
return f_ext
log.warn('Could not locate executable %s' % orig_exe)
return None
############################################################
def _preserve_environment( names ):
log.debug('_preserve_environment(%r)' % (names))
env = {}
for name in names:
env[name] = os.environ.get(name)
return env
def _update_environment( **env ):
log.debug('_update_environment(...)')
for name, value in env.items():
os.environ[name] = value or ''
def _supports_fileno(stream):
"""
Returns True if 'stream' supports the file descriptor and allows fileno().
"""
if hasattr(stream, 'fileno'):
try:
r = stream.fileno()
return True
except IOError:
return False
else:
return False
def exec_command(command, execute_in='', use_shell=None, use_tee=None,
_with_python = 1, **env ):
"""
Return (status,output) of executed command.
Parameters
----------
command : str
A concatenated string of executable and arguments.
execute_in : str
Before running command ``cd execute_in`` and after ``cd -``.
use_shell : {bool, None}, optional
If True, execute ``sh -c command``. Default None (True)
use_tee : {bool, None}, optional
If True use tee. Default None (True)
Returns
-------
res : str
Both stdout and stderr messages.
Notes
-----
On NT, DOS systems the returned status is correct for external commands.
Wild cards will not work for non-posix systems or when use_shell=0.
"""
log.debug('exec_command(%r,%s)' % (command,\
','.join(['%s=%r'%kv for kv in env.items()])))
if use_tee is None:
use_tee = os.name=='posix'
if use_shell is None:
use_shell = os.name=='posix'
execute_in = os.path.abspath(execute_in)
oldcwd = os.path.abspath(os.getcwd())
if __name__[-12:] == 'exec_command':
exec_dir = os.path.dirname(os.path.abspath(__file__))
elif os.path.isfile('exec_command.py'):
exec_dir = os.path.abspath('.')
else:
exec_dir = os.path.abspath(sys.argv[0])
if os.path.isfile(exec_dir):
exec_dir = os.path.dirname(exec_dir)
if oldcwd!=execute_in:
os.chdir(execute_in)
log.debug('New cwd: %s' % execute_in)
else:
log.debug('Retaining cwd: %s' % oldcwd)
oldenv = _preserve_environment( list(env.keys()) )
_update_environment( **env )
try:
# _exec_command is robust but slow, it relies on
# usable sys.std*.fileno() descriptors. If they
# are bad (like in win32 Idle, PyCrust environments)
# then _exec_command_python (even slower)
# will be used as a last resort.
#
# _exec_command_posix uses os.system and is faster
# but not on all platforms os.system will return
# a correct status.
if (_with_python and _supports_fileno(sys.stdout) and
sys.stdout.fileno() == -1):
st = _exec_command_python(command,
exec_command_dir = exec_dir,
**env)
elif os.name=='posix':
st = _exec_command_posix(command,
use_shell=use_shell,
use_tee=use_tee,
**env)
else:
st = _exec_command(command, use_shell=use_shell,
use_tee=use_tee,**env)
finally:
if oldcwd!=execute_in:
os.chdir(oldcwd)
log.debug('Restored cwd to %s' % oldcwd)
_update_environment(**oldenv)
return st
def _exec_command_posix( command,
use_shell = None,
use_tee = None,
**env ):
log.debug('_exec_command_posix(...)')
if is_sequence(command):
command_str = ' '.join(list(command))
else:
command_str = command
tmpfile = temp_file_name()
stsfile = None
if use_tee:
stsfile = temp_file_name()
filter = ''
if use_tee == 2:
filter = r'| tr -cd "\n" | tr "\n" "."; echo'
command_posix = '( %s ; echo $? > %s ) 2>&1 | tee %s %s'\
% (command_str, stsfile, tmpfile, filter)
else:
stsfile = temp_file_name()
command_posix = '( %s ; echo $? > %s ) > %s 2>&1'\
% (command_str, stsfile, tmpfile)
#command_posix = '( %s ) > %s 2>&1' % (command_str,tmpfile)
log.debug('Running os.system(%r)' % (command_posix))
status = os.system(command_posix)
if use_tee:
if status:
# if command_tee fails then fall back to robust exec_command
log.warn('_exec_command_posix failed (status=%s)' % status)
return _exec_command(command, use_shell=use_shell, **env)
if stsfile is not None:
f = open_latin1(stsfile, 'r')
status_text = f.read()
status = int(status_text)
f.close()
os.remove(stsfile)
f = open_latin1(tmpfile, 'r')
text = f.read()
f.close()
os.remove(tmpfile)
if text[-1:]=='\n':
text = text[:-1]
return status, text
def _exec_command_python(command,
exec_command_dir='', **env):
log.debug('_exec_command_python(...)')
python_exe = get_pythonexe()
cmdfile = temp_file_name()
stsfile = temp_file_name()
outfile = temp_file_name()
f = open(cmdfile, 'w')
f.write('import os\n')
f.write('import sys\n')
f.write('sys.path.insert(0,%r)\n' % (exec_command_dir))
f.write('from exec_command import exec_command\n')
f.write('del sys.path[0]\n')
f.write('cmd = %r\n' % command)
f.write('os.environ = %r\n' % (os.environ))
f.write('s,o = exec_command(cmd, _with_python=0, **%r)\n' % (env))
f.write('f=open(%r,"w")\nf.write(str(s))\nf.close()\n' % (stsfile))
f.write('f=open(%r,"w")\nf.write(o)\nf.close()\n' % (outfile))
f.close()
cmd = '%s %s' % (python_exe, cmdfile)
status = os.system(cmd)
if status:
raise RuntimeError("%r failed" % (cmd,))
os.remove(cmdfile)
f = open_latin1(stsfile, 'r')
status = int(f.read())
f.close()
os.remove(stsfile)
f = open_latin1(outfile, 'r')
text = f.read()
f.close()
os.remove(outfile)
return status, text
def quote_arg(arg):
if arg[0]!='"' and ' ' in arg:
return '"%s"' % arg
return arg
def _exec_command( command, use_shell=None, use_tee = None, **env ):
log.debug('_exec_command(...)')
if use_shell is None:
use_shell = os.name=='posix'
if use_tee is None:
use_tee = os.name=='posix'
using_command = 0
if use_shell:
# We use shell (unless use_shell==0) so that wildcards can be
# used.
sh = os.environ.get('SHELL', '/bin/sh')
if is_sequence(command):
argv = [sh, '-c', ' '.join(list(command))]
else:
argv = [sh, '-c', command]
else:
# On NT, DOS we avoid using command.com as it's exit status is
# not related to the exit status of a command.
if is_sequence(command):
argv = command[:]
else:
argv = shlex.split(command)
if hasattr(os, 'spawnvpe'):
spawn_command = os.spawnvpe
else:
spawn_command = os.spawnve
argv[0] = find_executable(argv[0]) or argv[0]
if not os.path.isfile(argv[0]):
log.warn('Executable %s does not exist' % (argv[0]))
if os.name in ['nt', 'dos']:
# argv[0] might be internal command
argv = [os.environ['COMSPEC'], '/C'] + argv
using_command = 1
_so_has_fileno = _supports_fileno(sys.stdout)
_se_has_fileno = _supports_fileno(sys.stderr)
so_flush = sys.stdout.flush
se_flush = sys.stderr.flush
if _so_has_fileno:
so_fileno = sys.stdout.fileno()
so_dup = os.dup(so_fileno)
if _se_has_fileno:
se_fileno = sys.stderr.fileno()
se_dup = os.dup(se_fileno)
outfile = temp_file_name()
fout = open(outfile, 'w')
if using_command:
errfile = temp_file_name()
ferr = open(errfile, 'w')
log.debug('Running %s(%s,%r,%r,os.environ)' \
% (spawn_command.__name__, os.P_WAIT, argv[0], argv))
if sys.version_info[0] >= 3 and os.name == 'nt':
# Pre-encode os.environ, discarding un-encodable entries,
# to avoid it failing during encoding as part of spawn. Failure
# is possible if the environment contains entries that are not
# encoded using the system codepage as windows expects.
#
# This is not necessary on unix, where os.environ is encoded
# using the surrogateescape error handler and decoded using
# it as part of spawn.
encoded_environ = {}
for k, v in os.environ.items():
try:
encoded_environ[k.encode(sys.getfilesystemencoding())] = v.encode(
sys.getfilesystemencoding())
except UnicodeEncodeError:
log.debug("ignoring un-encodable env entry %s", k)
else:
encoded_environ = os.environ
argv0 = argv[0]
if not using_command:
argv[0] = quote_arg(argv0)
so_flush()
se_flush()
if _so_has_fileno:
os.dup2(fout.fileno(), so_fileno)
if _se_has_fileno:
if using_command:
#XXX: disabled for now as it does not work from cmd under win32.
# Tests fail on msys
os.dup2(ferr.fileno(), se_fileno)
else:
os.dup2(fout.fileno(), se_fileno)
try:
status = spawn_command(os.P_WAIT, argv0, argv, encoded_environ)
except Exception:
errmess = str(get_exception())
status = 999
sys.stderr.write('%s: %s'%(errmess, argv[0]))
so_flush()
se_flush()
if _so_has_fileno:
os.dup2(so_dup, so_fileno)
os.close(so_dup)
if _se_has_fileno:
os.dup2(se_dup, se_fileno)
os.close(se_dup)
fout.close()
fout = open_latin1(outfile, 'r')
text = fout.read()
fout.close()
os.remove(outfile)
if using_command:
ferr.close()
ferr = open_latin1(errfile, 'r')
errmess = ferr.read()
ferr.close()
os.remove(errfile)
if errmess and not status:
# Not sure how to handle the case where errmess
# contains only warning messages and that should
# not be treated as errors.
#status = 998
if text:
text = text + '\n'
#text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess)
text = text + errmess
print (errmess)
if text[-1:]=='\n':
text = text[:-1]
if status is None:
status = 0
if use_tee:
print (text)
return status, text
def test_nt(**kws):
pythonexe = get_pythonexe()
echo = find_executable('echo')
using_cygwin_echo = echo != 'echo'
if using_cygwin_echo:
log.warn('Using cygwin echo in win32 environment is not supported')
s, o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'AAA\',\'\')"')
assert s==0 and o=='', (s, o)
s, o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'AAA\')"',
AAA='Tere')
assert s==0 and o=='Tere', (s, o)
os.environ['BBB'] = 'Hi'
s, o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'BBB\',\'\')"')
assert s==0 and o=='Hi', (s, o)
s, o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'BBB\',\'\')"',
BBB='Hey')
assert s==0 and o=='Hey', (s, o)
s, o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'BBB\',\'\')"')
assert s==0 and o=='Hi', (s, o)
elif 0:
s, o=exec_command('echo Hello')
assert s==0 and o=='Hello', (s, o)
s, o=exec_command('echo a%AAA%')
assert s==0 and o=='a', (s, o)
s, o=exec_command('echo a%AAA%', AAA='Tere')
assert s==0 and o=='aTere', (s, o)
os.environ['BBB'] = 'Hi'
s, o=exec_command('echo a%BBB%')
assert s==0 and o=='aHi', (s, o)
s, o=exec_command('echo a%BBB%', BBB='Hey')
assert s==0 and o=='aHey', (s, o)
s, o=exec_command('echo a%BBB%')
assert s==0 and o=='aHi', (s, o)
s, o=exec_command('this_is_not_a_command')
assert s and o!='', (s, o)
s, o=exec_command('type not_existing_file')
assert s and o!='', (s, o)
s, o=exec_command('echo path=%path%')
assert s==0 and o!='', (s, o)
s, o=exec_command('%s -c "import sys;sys.stderr.write(sys.platform)"' \
% pythonexe)
assert s==0 and o=='win32', (s, o)
s, o=exec_command('%s -c "raise \'Ignore me.\'"' % pythonexe)
assert s==1 and o, (s, o)
s, o=exec_command('%s -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"'\
% pythonexe)
assert s==0 and o=='012', (s, o)
s, o=exec_command('%s -c "import sys;sys.exit(15)"' % pythonexe)
assert s==15 and o=='', (s, o)
s, o=exec_command('%s -c "print \'Heipa\'"' % pythonexe)
assert s==0 and o=='Heipa', (s, o)
print ('ok')
def test_posix(**kws):
s, o=exec_command("echo Hello",**kws)
assert s==0 and o=='Hello', (s, o)
s, o=exec_command('echo $AAA',**kws)
assert s==0 and o=='', (s, o)
s, o=exec_command('echo "$AAA"',AAA='Tere',**kws)
assert s==0 and o=='Tere', (s, o)
s, o=exec_command('echo "$AAA"',**kws)
assert s==0 and o=='', (s, o)
os.environ['BBB'] = 'Hi'
s, o=exec_command('echo "$BBB"',**kws)
assert s==0 and o=='Hi', (s, o)
s, o=exec_command('echo "$BBB"',BBB='Hey',**kws)
assert s==0 and o=='Hey', (s, o)
s, o=exec_command('echo "$BBB"',**kws)
assert s==0 and o=='Hi', (s, o)
s, o=exec_command('this_is_not_a_command',**kws)
assert s!=0 and o!='', (s, o)
s, o=exec_command('echo path=$PATH',**kws)
assert s==0 and o!='', (s, o)
s, o=exec_command('python -c "import sys,os;sys.stderr.write(os.name)"',**kws)
assert s==0 and o=='posix', (s, o)
s, o=exec_command('python -c "raise \'Ignore me.\'"',**kws)
assert s==1 and o, (s, o)
s, o=exec_command('python -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"',**kws)
assert s==0 and o=='012', (s, o)
s, o=exec_command('python -c "import sys;sys.exit(15)"',**kws)
assert s==15 and o=='', (s, o)
s, o=exec_command('python -c "print \'Heipa\'"',**kws)
assert s==0 and o=='Heipa', (s, o)
print ('ok')
def test_execute_in(**kws):
pythonexe = get_pythonexe()
tmpfile = temp_file_name()
fn = os.path.basename(tmpfile)
tmpdir = os.path.dirname(tmpfile)
f = open(tmpfile, 'w')
f.write('Hello')
f.close()
s, o = exec_command('%s -c "print \'Ignore the following IOError:\','\
'open(%r,\'r\')"' % (pythonexe, fn),**kws)
assert s and o!='', (s, o)
s, o = exec_command('%s -c "print open(%r,\'r\').read()"' % (pythonexe, fn),
execute_in = tmpdir,**kws)
assert s==0 and o=='Hello', (s, o)
os.remove(tmpfile)
print ('ok')
def test_svn(**kws):
s, o = exec_command(['svn', 'status'],**kws)
assert s, (s, o)
print ('svn ok')
def test_cl(**kws):
if os.name=='nt':
s, o = exec_command(['cl', '/V'],**kws)
assert s, (s, o)
print ('cl ok')
if os.name=='posix':
test = test_posix
elif os.name in ['nt', 'dos']:
test = test_nt
else:
raise NotImplementedError('exec_command tests for ', os.name)
############################################################
if __name__ == "__main__":
test(use_tee=0)
test(use_tee=1)
test_execute_in(use_tee=0)
test_execute_in(use_tee=1)
test_svn(use_tee=1)
test_cl(use_tee=1)
| LumPenPacK/NetworkExtractionFromImages | win_build/nefi2_win_amd64_msvc_2015/site-packages/numpy/distutils/exec_command.py | Python | bsd-2-clause | 20,462 |
""" Unit tests for nonlinear solvers
Author: Ondrej Certik
May 2007
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_, dec, TestCase, run_module_suite
from scipy._lib.six import xrange
from scipy.optimize import nonlin, root
from numpy import matrix, diag, dot
from numpy.linalg import inv
import numpy as np
from test_minpack import pressure_network
SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden,
'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing,
'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2,
'krylov': nonlin.newton_krylov}
MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
#-------------------------------------------------------------------------------
# Test problems
#-------------------------------------------------------------------------------
def F(x):
x = np.asmatrix(x).T
d = matrix(diag([3,2,1.5,1,0.5]))
c = 0.01
f = -d*x - c*float(x.T*x)*x
return f
F.xin = [1,1,1,1,1]
F.KNOWN_BAD = {}
def F2(x):
return x
F2.xin = [1,2,3,4,5,6]
F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing}
def F2_lucky(x):
return x
F2_lucky.xin = [0,0,0,0,0,0]
F2_lucky.KNOWN_BAD = {}
def F3(x):
A = np.mat('-2 1 0; 1 -2 1; 0 1 -2')
b = np.mat('1 2 3')
return np.dot(A, x) - b
F3.xin = [1,2,3]
F3.KNOWN_BAD = {}
def F4_powell(x):
A = 1e4
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
F4_powell.xin = [-1, -2]
F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing,
'diagbroyden': nonlin.diagbroyden}
def F5(x):
return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
F5.xin = [2., 0, 2, 0]
F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
def F6(x):
x1, x2 = x
J0 = np.array([[-4.256, 14.7],
[0.8394989, 0.59964207]])
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
np.sin(x2 * np.exp(x1) - 1)])
return -np.linalg.solve(J0, v)
F6.xin = [-0.5, 1.4]
F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
class TestNonlin(object):
"""
Check the Broyden methods for a few test problems.
broyden1, broyden2, and newton_krylov must succeed for
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
"""
def _check_nonlin_func(self, f, func, f_tol=1e-2):
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
assert_(np.absolute(f(x)).max() < f_tol)
def _check_root(self, f, method, f_tol=1e-2):
res = root(f, f.xin, method=method,
options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
assert_(np.absolute(res.fun).max() < f_tol)
@dec.knownfailureif(True)
def _check_func_fail(self, *a, **kw):
pass
def test_problem_nonlin(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for func in SOLVERS.values():
if func in f.KNOWN_BAD.values():
if func in MUST_WORK.values():
yield self._check_func_fail, f, func
continue
yield self._check_nonlin_func, f, func
def test_tol_norm_called(self):
# Check that supplying tol_norm keyword to nonlin_solve works
self._tol_norm_used = False
def local_norm_func(x):
self._tol_norm_used = True
return np.absolute(x).max()
nonlin.newton_krylov(F, F.xin, f_tol=1e-2, maxiter=200, verbose=0,
tol_norm=local_norm_func)
assert_(self._tol_norm_used)
def test_problem_root(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for meth in SOLVERS:
if meth in f.KNOWN_BAD:
if meth in MUST_WORK:
yield self._check_func_fail, f, meth
continue
yield self._check_root, f, meth
class TestSecant(TestCase):
"""Check that some Jacobian approximations satisfy the secant condition"""
xs = [np.array([1,2,3,4,5], float),
np.array([2,3,4,5,1], float),
np.array([3,4,5,1,2], float),
np.array([4,5,1,2,3], float),
np.array([9,1,9,1,3], float),
np.array([0,1,9,1,3], float),
np.array([5,5,7,1,1], float),
np.array([1,2,7,5,1], float),]
fs = [x**2 - 1 for x in xs]
def _check_secant(self, jac_cls, npoints=1, **kw):
"""
Check that the given Jacobian approximation satisfies secant
conditions for last `npoints` points.
"""
jac = jac_cls(**kw)
jac.setup(self.xs[0], self.fs[0], None)
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
jac.update(x, f)
for k in xrange(min(npoints, j+1)):
dx = self.xs[j-k+1] - self.xs[j-k]
df = self.fs[j-k+1] - self.fs[j-k]
assert_(np.allclose(dx, jac.solve(df)))
# Check that the `npoints` secant bound is strict
if j >= npoints:
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
df = self.fs[j-npoints+1] - self.fs[j-npoints]
assert_(not np.allclose(dx, jac.solve(df)))
def test_broyden1(self):
self._check_secant(nonlin.BroydenFirst)
def test_broyden2(self):
self._check_secant(nonlin.BroydenSecond)
def test_broyden1_update(self):
# Check that BroydenFirst update works as for a dense matrix
jac = nonlin.BroydenFirst(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
B = np.identity(5) * (-1/0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
jac.update(x, f)
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
def test_broyden2_update(self):
# Check that BroydenSecond update works as for a dense matrix
jac = nonlin.BroydenSecond(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
H = np.identity(5) * (-0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
jac.update(x, f)
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
def test_anderson(self):
# Anderson mixing (with w0=0) satisfies secant conditions
# for the last M iterates, see [Ey]_
#
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
class TestLinear(TestCase):
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
np.random.seed(123)
A = np.random.randn(N, N)
if complex:
A = A + 1j*np.random.randn(N, N)
b = np.random.randn(N)
if complex:
b = b + 1j*np.random.randn(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
class TestJacobianDotSolve(object):
"""Check that solve/dot methods in Jacobian approximations are consistent"""
def _func(self, x):
return x**2 - 1 + np.dot(self.A, x)
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
np.random.seed(123)
N = 7
def rand(*a):
q = np.random.rand(*a)
if complex:
q = q + 1j*np.random.rand(*a)
return q
def assert_close(a, b, msg):
d = abs(a - b).max()
f = tol + abs(b).max()*tol
if d > f:
raise AssertionError('%s: err %g' % (msg, d))
self.A = rand(N, N)
# initialize
x0 = np.random.rand(N)
jac = jac_cls(**kw)
jac.setup(x0, self._func(x0), self._func)
# check consistency
for k in xrange(2*N):
v = rand(N)
if hasattr(jac, '__array__'):
Jd = np.array(jac)
if hasattr(jac, 'solve'):
Gv = jac.solve(v)
Gv2 = np.linalg.solve(Jd, v)
assert_close(Gv, Gv2, 'solve vs array')
if hasattr(jac, 'rsolve'):
Gv = jac.rsolve(v)
Gv2 = np.linalg.solve(Jd.T.conj(), v)
assert_close(Gv, Gv2, 'rsolve vs array')
if hasattr(jac, 'matvec'):
Jv = jac.matvec(v)
Jv2 = np.dot(Jd, v)
assert_close(Jv, Jv2, 'dot vs array')
if hasattr(jac, 'rmatvec'):
Jv = jac.rmatvec(v)
Jv2 = np.dot(Jd.T.conj(), v)
assert_close(Jv, Jv2, 'rmatvec vs array')
if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
Jv = jac.matvec(v)
Jv2 = jac.solve(jac.matvec(Jv))
assert_close(Jv, Jv2, 'dot vs solve')
if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
Jv = jac.rmatvec(v)
Jv2 = jac.rmatvec(jac.rsolve(Jv))
assert_close(Jv, Jv2, 'rmatvec vs rsolve')
x = rand(N)
jac.update(x, self._func(x))
def test_broyden1(self):
self._check_dot(nonlin.BroydenFirst, complex=False)
self._check_dot(nonlin.BroydenFirst, complex=True)
def test_broyden2(self):
self._check_dot(nonlin.BroydenSecond, complex=False)
self._check_dot(nonlin.BroydenSecond, complex=True)
def test_anderson(self):
self._check_dot(nonlin.Anderson, complex=False)
self._check_dot(nonlin.Anderson, complex=True)
def test_diagbroyden(self):
self._check_dot(nonlin.DiagBroyden, complex=False)
self._check_dot(nonlin.DiagBroyden, complex=True)
def test_linearmixing(self):
self._check_dot(nonlin.LinearMixing, complex=False)
self._check_dot(nonlin.LinearMixing, complex=True)
def test_excitingmixing(self):
self._check_dot(nonlin.ExcitingMixing, complex=False)
self._check_dot(nonlin.ExcitingMixing, complex=True)
def test_krylov(self):
self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-4)
self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-4)
class TestNonlinOldTests(TestCase):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def test_broyden1(self):
x = nonlin.broyden1(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_broyden2(self):
x = nonlin.broyden2(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_anderson(self):
x = nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
assert_(nonlin.norm(x) < 0.33)
def test_linearmixing(self):
x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
assert_(nonlin.norm(x) < 1e-7)
assert_(nonlin.norm(F(x)) < 1e-7)
def test_exciting(self):
x = nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
assert_(nonlin.norm(x) < 1e-5)
assert_(nonlin.norm(F(x)) < 1e-5)
def test_diagbroyden(self):
x = nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
assert_(nonlin.norm(x) < 1e-8)
assert_(nonlin.norm(F(x)) < 1e-8)
def test_root_broyden1(self):
res = root(F, F.xin, method='broyden1',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_broyden2(self):
res = root(F, F.xin, method='broyden2',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_anderson(self):
res = root(F, F.xin, method='anderson',
options={'nit': 12,
'jac_options': {'alpha': 0.03, 'M': 5}})
assert_(nonlin.norm(res.x) < 0.33)
def test_root_linearmixing(self):
res = root(F, F.xin, method='linearmixing',
options={'nit': 60,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-7)
assert_(nonlin.norm(res.fun) < 1e-7)
def test_root_excitingmixing(self):
res = root(F, F.xin, method='excitingmixing',
options={'nit': 20,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-5)
assert_(nonlin.norm(res.fun) < 1e-5)
def test_root_diagbroyden(self):
res = root(F, F.xin, method='diagbroyden',
options={'nit': 11,
'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-8)
assert_(nonlin.norm(res.fun) < 1e-8)
if __name__ == "__main__":
run_module_suite()
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/scipy/optimize/tests/test_nonlin.py | Python | gpl-2.0 | 15,160 |
# databases/__init__.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Include imports from the sqlalchemy.dialects package for backwards
compatibility with pre 0.6 versions.
"""
from ..dialects.sqlite import base as sqlite
from ..dialects.postgresql import base as postgresql
postgres = postgresql
from ..dialects.mysql import base as mysql
from ..dialects.drizzle import base as drizzle
from ..dialects.oracle import base as oracle
from ..dialects.firebird import base as firebird
from ..dialects.mssql import base as mssql
from ..dialects.sybase import base as sybase
__all__ = (
'drizzle',
'firebird',
'mssql',
'mysql',
'postgresql',
'sqlite',
'oracle',
'sybase',
)
| jessekl/flixr | venv/lib/python2.7/site-packages/sqlalchemy/databases/__init__.py | Python | mit | 881 |
class MyExc(Exception):
pass
e = MyExc(100, "Some error")
print(e)
print(repr(e))
print(e.args)
try:
raise MyExc("Some error")
except MyExc as e:
print("Caught exception:", repr(e))
try:
raise MyExc("Some error2")
except Exception as e:
print("Caught exception:", repr(e))
try:
raise MyExc("Some error2")
except:
print("Caught user exception")
| AriZuu/micropython | tests/basics/subclass_native3.py | Python | mit | 376 |
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""MX-like base classes."""
import cStringIO
import struct
import dns.exception
import dns.rdata
import dns.name
class MXBase(dns.rdata.Rdata):
"""Base class for rdata that is like an MX record.
@ivar preference: the preference value
@type preference: int
@ivar exchange: the exchange name
@type exchange: dns.name.Name object"""
__slots__ = ['preference', 'exchange']
def __init__(self, rdclass, rdtype, preference, exchange):
super(MXBase, self).__init__(rdclass, rdtype)
self.preference = preference
self.exchange = exchange
def to_text(self, origin=None, relativize=True, **kw):
exchange = self.exchange.choose_relativity(origin, relativize)
return '%d %s' % (self.preference, exchange)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
preference = tok.get_uint16()
exchange = tok.get_name()
exchange = exchange.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, preference, exchange)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
pref = struct.pack("!H", self.preference)
file.write(pref)
self.exchange.to_wire(file, compress, origin)
def to_digestable(self, origin = None):
return struct.pack("!H", self.preference) + \
self.exchange.to_digestable(origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(preference, ) = struct.unpack('!H', wire[current : current + 2])
current += 2
rdlen -= 2
(exchange, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if not origin is None:
exchange = exchange.relativize(origin)
return cls(rdclass, rdtype, preference, exchange)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.exchange = self.exchange.choose_relativity(origin, relativize)
def _cmp(self, other):
sp = struct.pack("!H", self.preference)
op = struct.pack("!H", other.preference)
v = cmp(sp, op)
if v == 0:
v = cmp(self.exchange, other.exchange)
return v
class UncompressedMX(MXBase):
"""Base class for rdata that is like an MX record, but whose name
is not compressed when converted to DNS wire format, and whose
digestable form is not downcased."""
def to_wire(self, file, compress = None, origin = None):
super(UncompressedMX, self).to_wire(file, None, origin)
def to_digestable(self, origin = None):
f = cStringIO.StringIO()
self.to_wire(f, None, origin)
return f.getvalue()
class UncompressedDowncasingMX(MXBase):
"""Base class for rdata that is like an MX record, but whose name
is not compressed when convert to DNS wire format."""
def to_wire(self, file, compress = None, origin = None):
super(UncompressedDowncasingMX, self).to_wire(file, None, origin)
| enigmamarketing/csf-allow-domains | usr/local/csf/bin/csf-allow-domains/dns/rdtypes/mxbase.py | Python | mit | 3,968 |
"""Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| Achuth17/scikit-learn | sklearn/learning_curve.py | Python | bsd-3-clause | 13,467 |