id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
9745155 | # USAGE
# python /home/nmorales/cxgn/DroneImageScripts/ImageProcess/RemoveBackground.py --image_path /folder/mypic.png --outfile_path /export/mychoppedimages/outimage.png
# import the necessary packages
import argparse
import imutils
import cv2
import numpy as np
import math
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image_path", required=True, help="image path")
ap.add_argument("-o", "--outfile_path", required=True, help="file path directory where the output will be saved")
ap.add_argument("-t", "--lower_threshold", required=True, help="lower threshold value to remove from image")
ap.add_argument("-l", "--upper_threshold", required=True, help="upper threshold value to remove from image")
args = vars(ap.parse_args())
input_image = args["image_path"]
outfile_path = args["outfile_path"]
upper_thresh = args["upper_threshold"]
lower_thresh = args["lower_threshold"]
src = cv2.imread(input_image, cv2.IMREAD_GRAYSCALE)
th, dst = cv2.threshold(src, int(float(lower_thresh)), int(float(upper_thresh)), cv2.THRESH_TOZERO)
#cv2.imshow("Result", dst)
cv2.imwrite(outfile_path, dst)
#cv2.waitKey(0)
| StarcoderdataPython |
11367794 | <reponame>tdiprima/code
class itemproperty(object):
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
if doc is None and fget is not None and hasattr(fget, "__doc__"):
doc = fget.__doc__
self._get = fget
self._set = fset
self._del = fdel
self.__doc__ = doc
def __get__(self, instance, owner):
if instance is None:
return self
else:
return bounditemproperty(self, instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def getter(self, fget):
return itemproperty(fget, self._set, self._del, self.__doc__)
def setter(self, fset):
return itemproperty(self._get, fset, self._del, self.__doc__)
def deleter(self, fdel):
return itemproperty(self._get, self._set, fdel, self.__doc__)
class bounditemproperty(object):
def __init__(self, item_property, instance):
self.__item_property = item_property
self.__instance = instance
def __getitem__(self, key):
fget = self.__item_property._get
if fget is None:
raise AttributeError("unreadable attribute item")
return fget(self.__instance, key)
def __setitem__(self, key, value):
fset = self.__item_property._set
if fset is None:
raise AttributeError("can't set attribute item")
fset(self.__instance, key, value)
def __delitem__(self, key):
fdel = self.__item_property._del
if fdel is None:
raise AttributeError("can't delete attribute item")
fdel(self.__instance, key)
if __name__ == "__main__":
class Element(object):
def __init__(self, tag, value=None):
self.tag = tag
self.value = value
self.children = {}
@itemproperty
def xpath(self, path):
"""Get or set the value at a relative path."""
path = path.split('/')
element = self
for tag in path:
if tag in element.children:
element = element.children[tag]
else:
raise KeyError('path does not exist')
return element.value
@xpath.setter
def xpath(self, path, value):
path = path.split('/')
element = self
for tag in path:
element = element.children.setdefault(tag, Element(tag))
element.value = value
@xpath.deleter
def xpath(self, path):
path = path.split('/')
element = self
for tag in path[:-1]:
if tag in element.children:
element = element.children[tag]
else:
raise KeyError('path does not exist')
tag = path[-1]
if tag in element.children:
del element.children[tag]
else:
raise KeyError('path does not exist')
tree = Element('root')
tree.xpath['unladen/swallow'] = 'african'
assert tree.xpath['unladen/swallow'] == 'african'
assert tree.children['unladen'].xpath['swallow'] == 'african'
assert tree.children['unladen'].children['swallow'].value == 'african'
tree.xpath['unladen/swallow'] = 'european'
assert tree.xpath['unladen/swallow'] == 'european'
assert len(tree.children) == 1
assert len(tree.children['unladen'].children) == 1
tree.xpath['unladen/swallow/airspeed'] = 42
assert tree.xpath['unladen/swallow'] == 'european'
assert tree.xpath['unladen/swallow/airspeed'] == 42
del tree.xpath['unladen/swallow']
assert 'swallow' not in tree.children['unladen'].children
try:
tree.xpath['unladen/swallow/airspeed']
except KeyError:
pass
else:
assert False
| StarcoderdataPython |
6610117 | <filename>bbcprc/old/files.py
import contextlib
import os
def with_suffix(root, suffix=None):
for f in os.listdir(root):
if not suffix or f.endswith(suffix):
yield os.path.join(root, f)
@contextlib.contextmanager
def delete_on_fail(fname, mode='wb', open=open, delete=True):
with open(fname, mode) as fp:
try:
yield fp
except Exception:
if delete:
try:
os.remove(fname)
except Exception:
pass
raise
| StarcoderdataPython |
11207087 | # Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the different models available in the lightweightMMM lib.
Currently this file contains a main model with three possible options for
processing the media data. Which essentially grants the possibility of building
three different models.
- Adstock
- Hill-Adstock
- Carryover
"""
from typing import Any, Callable, Mapping, Optional
import frozendict
import jax.numpy as jnp
import numpyro
from numpyro import distributions as dist
from lightweight_mmm import media_transforms
def transform_adstock(media_data: jnp.ndarray,
normalise: bool = True) -> jnp.ndarray:
"""Transforms the input data with the adstock function and exponent.
Args:
media_data: Media data to be transformed.
normalise: Whether to normalise the output values.
Returns:
The transformed media data.
"""
with numpyro.plate("lag_weight_plate", media_data.shape[1]):
lag_weight = numpyro.sample("lag_weight",
dist.Beta(concentration1=2., concentration0=1.))
with numpyro.plate("exponent_plate", media_data.shape[1]):
exponent = numpyro.sample("exponent",
dist.Beta(concentration1=9., concentration0=1.))
adstock = media_transforms.adstock(
data=media_data, lag_weight=lag_weight, normalise=normalise)
return media_transforms.apply_exponent_safe(data=adstock, exponent=exponent)
def transform_hill_adstock(media_data: jnp.ndarray,
normalise: bool = True) -> jnp.ndarray:
"""Transforms the input data with the adstock and hill functions.
Args:
media_data: Media data to be transformed.
normalise: Whether to normalise the output values.
Returns:
The transformed media data.
"""
with numpyro.plate("lag_weight_plate", media_data.shape[1]):
lag_weight = numpyro.sample("lag_weight",
dist.Beta(concentration1=2., concentration0=1.))
with numpyro.plate("half_max_effective_concentration_plate",
media_data.shape[1]):
half_max_effective_concentration = numpyro.sample(
"half_max_effective_concentration",
dist.Gamma(concentration=1., rate=1.))
with numpyro.plate("slope_plate", media_data.shape[1]):
slope = numpyro.sample("slope", dist.Gamma(concentration=1., rate=1.))
return media_transforms.hill(
data=media_transforms.adstock(
data=media_data, lag_weight=lag_weight, normalise=normalise),
half_max_effective_concentration=half_max_effective_concentration,
slope=slope)
def transform_carryover(media_data: jnp.ndarray,
number_lags: int = 13) -> jnp.ndarray:
"""Transforms the input data with the carryover function and exponent.
Args:
media_data: Media data to be transformed.
number_lags: Number of lags for the carryover function.
Returns:
The transformed media data.
"""
with numpyro.plate("ad_effect_retention_rate_plate", media_data.shape[1]):
ad_effect_retention_rate = numpyro.sample(
"ad_effect_retention_rate",
dist.Beta(concentration1=1., concentration0=1.))
with numpyro.plate("peak_effect_delay_plate", media_data.shape[1]):
peak_effect_delay = numpyro.sample("peak_effect_delay",
dist.HalfNormal(scale=2.))
with numpyro.plate("exponent_plate", media_data.shape[1]):
exponent = numpyro.sample("exponent",
dist.Beta(concentration1=9., concentration0=1.))
carryover = media_transforms.carryover(
data=media_data,
ad_effect_retention_rate=ad_effect_retention_rate,
peak_effect_delay=peak_effect_delay,
number_lags=number_lags)
return media_transforms.apply_exponent_safe(data=carryover, exponent=exponent)
def media_mix_model(media_data: jnp.ndarray,
target_data: jnp.ndarray,
cost_prior: jnp.ndarray,
degrees_seasonality: int,
frequency: int,
transform_function: Callable[[jnp.array], jnp.array],
transform_kwargs: Mapping[str,
Any] = frozendict.frozendict(),
weekday_seasonality: bool = False,
extra_features: Optional[jnp.array] = None) -> None:
"""Media mix model.
Args:
media_data: Media data to be be used in the model.
target_data: Target data for the model.
cost_prior: Cost prior for each of the media channels.
degrees_seasonality: Number of degrees of seasonality to use.
frequency: Frequency of the time span which was used to aggregate the data.
Eg. if weekly data then frequency is 52.
transform_function: Function to use to transform the media data in the
model. Currently the following are supported: 'transform_adstock',
'transform_carryover' and 'transform_hill_adstock'.
transform_kwargs: Any extra keyword arguments to pass to the transform
function. For example the adstock function can take a boolean to noramlise
output or not.
weekday_seasonality: In case of daily data you can estimate a weekday (7)
parameter.
extra_features: Extra features data to include in the model.
"""
data_size = media_data.shape[0]
intercept = numpyro.sample("intercept", dist.Normal(loc=0., scale=2.))
sigma = numpyro.sample("sigma", dist.Gamma(concentration=1., rate=1.))
beta_trend = numpyro.sample("beta_trend", dist.Normal(loc=0., scale=1.))
expo_trend = numpyro.sample("expo_trend",
dist.Beta(concentration1=1., concentration0=1.))
with numpyro.plate("media_plate", media_data.shape[1]) as i:
beta_media = numpyro.sample("beta_media",
dist.HalfNormal(scale=cost_prior[i]))
with numpyro.plate("gamma_seasonality_plate", 2):
with numpyro.plate("seasonality_plate", degrees_seasonality):
gamma_seasonality = numpyro.sample("gamma_seasonality",
dist.Normal(loc=0., scale=1.))
if weekday_seasonality:
with numpyro.plate("weekday_plate", 7):
weekday = numpyro.sample("weekday", dist.Normal(loc=0., scale=.5))
weekday_series = weekday[jnp.arange(data_size) % 7]
media_transformed = numpyro.deterministic(
name="media_transformed",
value=transform_function(media_data, **transform_kwargs))
seasonality = media_transforms.calculate_seasonality(
number_periods=data_size,
degrees=degrees_seasonality,
frequency=frequency,
gamma_seasonality=gamma_seasonality)
# expo_trend is B(1, 1) so that the exponent on time is in [.5, 1.5].
prediction = (
intercept + beta_trend * jnp.arange(data_size) ** (expo_trend + 0.5) +
seasonality + media_transformed.dot(beta_media))
if extra_features is not None:
with numpyro.plate("extra_features_plate", extra_features.shape[1]):
beta_extra_features = numpyro.sample("beta_extra_features",
dist.Normal(loc=0., scale=1.))
prediction += extra_features.dot(beta_extra_features)
if weekday_seasonality:
prediction += weekday_series
mu = numpyro.deterministic(name="mu", value=prediction)
numpyro.sample(
name="target", fn=dist.Normal(loc=mu, scale=sigma), obs=target_data)
| StarcoderdataPython |
24685 | <gh_stars>1-10
import gzip
import numpy as np
import os
import pandas as pd
import shutil
import sys
import tarfile
import urllib
import zipfile
from scipy.sparse import vstack
from sklearn import datasets
from sklearn.externals.joblib import Memory
if sys.version_info[0] >= 3:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
mem = Memory("./mycache")
@mem.cache
def get_higgs(num_rows=None):
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz'
filename = 'HIGGS.csv'
if not os.path.isfile(filename):
urlretrieve(url, filename + '.gz')
with gzip.open(filename + '.gz', 'rb') as f_in:
with open(filename, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
higgs = pd.read_csv(filename)
X = higgs.iloc[:, 1:].values
y = higgs.iloc[:, 0].values
if num_rows is not None:
X = X[0:num_rows]
y = y[0:num_rows]
return X, y
@mem.cache
def get_cover_type(num_rows=None):
data = datasets.fetch_covtype()
X = data.data
y = data.target
if num_rows is not None:
X = X[0:num_rows]
y = y[0:num_rows]
return X, y
@mem.cache
def get_synthetic_regression(num_rows=None):
if num_rows is None:
num_rows = 10000000
return datasets.make_regression(n_samples=num_rows, bias=100, noise=1.0)
@mem.cache
def get_year(num_rows=None):
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt.zip'
filename = 'YearPredictionMSD.txt'
if not os.path.isfile(filename):
urlretrieve(url, filename + '.zip')
zip_ref = zipfile.ZipFile(filename + '.zip', 'r')
zip_ref.extractall()
zip_ref.close()
year = pd.read_csv('YearPredictionMSD.txt', header=None)
X = year.iloc[:, 1:].values
y = year.iloc[:, 0].values
if num_rows is not None:
X = X[0:num_rows]
y = y[0:num_rows]
return X, y
@mem.cache
def get_url(num_rows=None):
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/url/url_svmlight.tar.gz'
filename = 'url_svmlight.tar.gz'
if not os.path.isfile(filename):
urlretrieve(url, filename)
tar = tarfile.open(filename, "r:gz")
tar.extractall()
tar.close()
num_files = 120
files = ['url_svmlight/Day{}.svm'.format(day) for day in range(num_files)]
data = datasets.load_svmlight_files(files)
X = vstack(data[::2])
y = np.concatenate(data[1::2])
y[y < 0.0] = 0.0
if num_rows is not None:
X = X[0:num_rows]
y = y[0:num_rows]
return X, y
| StarcoderdataPython |
4925091 | from typing import Optional
from .event import Event
from .event import NONAME
from .output import Output, ConsoleOutput, FileOutput
class Core(Output):
project: str
env: str
console_output: Optional[ConsoleOutput]
file_output: Optional[FileOutput]
"""
Core 维护着日志系统的输出器(包括命令行输出器和文件输出器),保持全局配置
"""
def __init__(self, opts: Optional[dict]):
self.project = NONAME
self.env = NONAME
self.console_output = None
self.file_output = None
if opts is None:
return
if "project" in opts:
self.project = str(opts["project"])
if "env" in opts:
self.env = str(opts["env"])
if "console" in opts:
self.console_output = ConsoleOutput(opts["console"])
if "file" in opts:
self.file_output = FileOutput(opts["file"])
def append_event(self, event: Event) -> None:
if self.console_output is not None:
self.console_output.append_event(event)
if self.file_output is not None:
self.file_output.append_event(event)
def create_event(self) -> Event:
e = Event()
e.project = self.project
e.env = self.env
e.output = self
return e
class CoreProvider(object):
"""
CoreProvider 封装一层 Core 为 Logger 切换 Core 成为可能
"""
core: Core
def get_core(self) -> Core:
return self.core
| StarcoderdataPython |
306285 | <gh_stars>10-100
class TestDemo:
print('testing')
| StarcoderdataPython |
16705 | from systems.plugins.index import BaseProvider
import os
class Provider(BaseProvider('task', 'upload')):
def execute(self, results, params):
file_path = self.get_path(self.field_file)
if not os.path.exists(file_path):
self.command.error("Upload task provider file {} does not exist".format(file_path))
ssh = self._get_ssh()
ssh.upload(file_path, self.field_remote_path,
mode = self.field_mode,
owner = self.field_owner,
group = self.field_group
)
| StarcoderdataPython |
3241909 | <reponame>shantanusharma/bigmler<filename>bigmler/whizzml/dispatcher.py
# -*- coding: utf-8 -*-
#
# Copyright 2016-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer whizzml main processing
Functions to process the whizzml options
"""
import sys
import os
import bigmler.utils as u
from bigmler.whizzml.package import create_package
from bigmler.dispatcher import SESSIONS_LOG, clear_log_files
from bigmler.command import get_context
COMMAND_LOG = ".bigmler_whizzml"
DIRS_LOG = ".bigmler_whizzml_dir_stack"
LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]
SETTINGS = {
"command_log": COMMAND_LOG,
"sessions_log": SESSIONS_LOG,
"dirs_log": DIRS_LOG}
def whizzml_dispatcher(args=sys.argv[1:]):
"""Main processing of the parsed options for BigMLer whizzml
"""
# If --clear-logs the log files are cleared
if "--clear-logs" in args:
clear_log_files(LOG_FILES)
command_args, command, api, _, resume = get_context(args, SETTINGS)
# package_dir
if command_args.package_dir is not None:
command_args.package_dir = os.path.expanduser(command_args.package_dir)
create_package(command_args, api, command,
resume=resume)
else:
sys.exit("You must use the --package-dir flag pointing to the"
" directory where the metadata.json file is. Type\n"
" bigmler whizzml --help\n"
" to see all the available options.")
| StarcoderdataPython |
6626215 | # Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'test-compile-as-managed',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'CompileAsManaged': 'true',
'ExceptionHandling': '0' # /clr is incompatible with /EHs
}
},
'sources': ['compile-as-managed.cc'],
},
{
'target_name': 'test-compile-as-unmanaged',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'CompileAsManaged': 'false',
}
},
'sources': ['compile-as-managed.cc'],
},
]
}
| StarcoderdataPython |
3251813 | # See https://github.com/confluentinc/confluent-kafka-python
from confluent_kafka.admin import AdminClient, NewTopic
app_settings = {
"bootstrap.servers": "TODO",
"topics": [
"topic1",
"topic2",
],
}
a = AdminClient({"bootstrap.servers": app_settings["bootstrap.servers"]})
# Note: In a multi-cluster production scenario, it is more typical to use a replication_factor of 3 for durability.
new_topics = [NewTopic(topic, num_partitions=3, replication_factor=1) for topic in app_settings["topics"]]
# Call create_topics to asynchronously create topics. A dict of <topic,future> is returned.
fs = a.create_topics(new_topics)
# Wait for each operation to finish.
for topic, f in fs.items():
try:
f.result() # The result itself is None
print(f"Topic {topic} created")
except Exception as e:
print(f"Failed to create topic {topic}: {e}")
| StarcoderdataPython |
4884236 | <reponame>ezekielkibiego/projects254
# Generated by Django 2.2.24 on 2022-02-12 12:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='images')),
('description', models.TextField(max_length=600)),
('techs_used', models.TextField(max_length=100, null=True)),
('url', models.URLField(null=True)),
('link', models.URLField(null=True)),
('date', models.DateTimeField(auto_now_add=True, null=True)),
],
),
]
| StarcoderdataPython |
6422045 | <reponame>gembcior/FortressTools<filename>src/fortresstools/command/__init__.py<gh_stars>0
from .base import UnsupportedExecutor
from .dir import *
from .git import *
from .cmake import *
from .pip import *
from .venv import *
from .rsync import *
from .svn import *
from .test import *
| StarcoderdataPython |
6618548 | <reponame>baggakunal/learning-python<filename>src/prime_number.py
from math import sqrt
def is_prime(num: int) -> bool:
if num < 2:
return False
for i in range(2, int(sqrt(num)) + 1):
if num % i == 0:
return False
return True
def main():
print([n for n in range(101) if is_prime(n)])
if __name__ == '__main__':
main()
| StarcoderdataPython |
3458579 | from svbench.io_tools import *
from svbench.quant_tools import *
from svbench.loaders import * | StarcoderdataPython |
9659937 | <reponame>MaciejTe/integration
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"define factories from where to create namespaces"
from .docker_compose_manager import (
DockerComposeStandardSetup,
DockerComposeMonitorCommercialSetup,
DockerComposeDockerClientSetup,
DockerComposeRofsClientSetup,
DockerComposeLegacyClientSetup,
DockerComposeSignedArtifactClientSetup,
DockerComposeShortLivedTokenSetup,
DockerComposeFailoverServerSetup,
DockerComposeEnterpriseSetup,
DockerComposeCustomSetup,
DockerComposeCompatibilitySetup,
DockerComposeMTLSSetup,
DockerComposeMenderClient_2_5,
)
from .kubernetes_manager import KubernetesEnterpriseSetup, isK8S
class ContainerManagerFactory:
def getStandardSetup(self, name=None, num_clients=1):
"""Standard setup consisting on all core backend services and optionally clients
The num_clients define how many QEMU Mender clients will be spawn.
"""
pass
def getMonitorCommercialSetup(self, name=None, num_clients=1):
"""Monitor client setup consisting on all core backend services and monitor-client
The num_clients define how many QEMU Mender clients will be spawn.
"""
pass
def getDockerClientSetup(self, name=None):
"""Standard setup with one Docker client instead of QEMU one"""
pass
def getRofsClientSetup(self, name=None):
"""Standard setup with one QEMU Read-Only FS client instead of standard R/W"""
pass
def getLegacyClientSetup(self, name=None):
"""Setup with one Mender client v1.7"""
pass
def getSignedArtifactClientSetup(self, name=None):
"""Standard setup with pre-installed verification key in the client"""
pass
def getShortLivedTokenSetup(self, name=None):
"""Standard setup on which deviceauth has a short lived token (expire timeout = 0)"""
pass
def getFailoverServerSetup(self, name=None):
"""Setup with two servers and one client.
First server (A) behaves as usual, whereas the second server (B) should
not expect any clients. Client is initially set up against server A.
"""
pass
def getEnterpriseSetup(self, name=None, num_clients=0):
"""Setup with enterprise versions for the applicable services"""
pass
def getEnterpriseSMTPSetup(self, name=None):
"""Enterprise setup with SMTP enabled"""
pass
def getCustomSetup(self, name=None):
"""A noop setup for tests that use custom setups
It only implements teardown() for these tests to still have a way
for the framework to clean after them (most importantly on errors).
"""
pass
class DockerComposeManagerFactory(ContainerManagerFactory):
def getStandardSetup(self, name=None, num_clients=1):
return DockerComposeStandardSetup(name, num_clients)
def getMonitorCommercialSetup(self, name=None, num_clients=0):
return DockerComposeMonitorCommercialSetup(name, num_clients)
def getDockerClientSetup(self, name=None):
return DockerComposeDockerClientSetup(name)
def getRofsClientSetup(self, name=None):
return DockerComposeRofsClientSetup(name)
def getLegacyClientSetup(self, name=None):
return DockerComposeLegacyClientSetup(name)
def getSignedArtifactClientSetup(self, name=None):
return DockerComposeSignedArtifactClientSetup(name)
def getShortLivedTokenSetup(self, name=None):
return DockerComposeShortLivedTokenSetup(name)
def getFailoverServerSetup(self, name=None):
return DockerComposeFailoverServerSetup(name)
def getEnterpriseSetup(self, name=None, num_clients=0):
return DockerComposeEnterpriseSetup(name, num_clients)
def getCompatibilitySetup(self, name=None, **kwargs):
return DockerComposeCompatibilitySetup(name, **kwargs)
def getMTLSSetup(self, name=None, **kwargs):
return DockerComposeMTLSSetup(name, **kwargs)
def getMenderClient_2_5(self, name=None, **kwargs):
return DockerComposeMenderClient_2_5(name, **kwargs)
def getCustomSetup(self, name=None):
return DockerComposeCustomSetup(name)
class KubernetesManagerFactory(ContainerManagerFactory):
def getEnterpriseSetup(self, name=None, num_clients=0):
return KubernetesEnterpriseSetup(name, num_clients)
def getMonitorCommercialSetup(self, name=None, num_clients=0):
return KubernetesEnterpriseSetup(name, num_clients)
def get_factory():
if isK8S():
return KubernetesManagerFactory()
else:
return DockerComposeManagerFactory()
| StarcoderdataPython |
5128469 | from .alexnet import AlexNetV1, AlexNetV2, AlexNetV3
from .resnet import ResNet
from .resnet2plus1d import ResNet2Plus1d
from .resnet3d import ResNet3d
from .resnet3d_csn import ResNet3dCSN
from .resnet3d_slowfast import ResNet3dSlowFast
from .resnet3d_slowonly import ResNet3dSlowOnly
from .resnet_tin import ResNetTIN
from .resnet_tsm import ResNetTSM
__all__ = [
'ResNet', 'ResNet3d', 'ResNetTSM', 'ResNet2Plus1d', 'ResNet3dSlowFast',
'ResNet3dSlowOnly', 'ResNet3dCSN', 'ResNetTIN', 'AlexNetV1', 'AlexNetV2',
'AlexNetV3'
]
| StarcoderdataPython |
8034011 | <reponame>marici/recipebook
# -*- coding: utf-8 -*-
'''
The MIT License
Copyright (c) 2009 Marici, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from datetime import datetime
from django.conf import settings
from django.core import urlresolvers, serializers
from django.http import HttpResponse, Http404, HttpResponseForbidden
from django.template import loader, Context, RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from maricilib.django.decorators import postmethod
from maricilib.django.shortcuts import render_to_response_of_class
from maricilib.django.core.paginator import Paginator
from maricilib.django.apps.taskqueue.queue import get_taskqueue
from maricilib.django.apps.taskqueue.tasks import SendEmailTask
from recipes.models import Contest, Recipe
per_page = 10
def show_current_contest_list(request, page=1):
'''
募集中のお題を表示します。募集中とは、以下のものを指します。
* contest.publised_atが現在時刻より大きい
* contest.closed_atが現在時刻より小さい
@param page: ページ (デフォルトは1)
@context page_obj: object_listにクエリセットを含むPageオブジェクト
@return: 200レスポンス (成功)
'''
contests = Contest.objects.get_current_contests()
page_obj = Paginator(contests, per_page).page(page)
d = {'current': True,
'title': u'レシピを募集中のお題',
'page_obj': page_obj}
return render_to_response('recipes/contests.html',
d, RequestContext(request))
def show_closed_contest_list(request, page=1):
'''
募集が終了したお題を表示します。募集が終了したとは、以下のものを指します。
* contest.publised_atが現在時刻より小さい
* contest.closed_atが現在時刻より小さい
@param page: ページ (デフォルトは1)
@context page_obj: object_listにクエリセットを含むPageオブジェクト
@return: 200レスポンス (成功)
'''
contests = Contest.objects.get_closed_contests_qs()
page_obj = Paginator(contests, per_page).page(page)
d = {'current': False,
'title': u'募集終了したお題',
'page_obj': page_obj}
return render_to_response('recipes/contests.html',
d, RequestContext(request))
def show_contest(request, contest_id=None):
'''
お題の詳細を表示します。
is_publishedが現在時刻より小さいお題は表示できません。
@param page: ページ (デフォルトは1)
@context contest: Contestインスタンス
@context contests: 全ての募集中のお題
@return: 404レスポンス (お題が存在しないか、published_atが未来の場合)
@return: 200レスポンス (成功)
'''
contest = get_object_or_404(Contest, pk=contest_id)
if not contest.is_published():
raise Http404
contests = Contest.objects.get_current_contests()
d = {'contest': contest,
'contests': contests}
if contest.is_really_finished():
award_recipes = contest.get_awarded_recipes()
d['top_award_recipes'] = award_recipes[:2]
d['award_recipes'] = award_recipes[2:]
return render_to_response('recipes/contest.html',
d, RequestContext(request))
def show_recipes(request, contest_id=None, page=1):
'''
お題に対するレシピの一覧を新しい順に表示します。
対象になるのは以下のレシピです。
* recipe.contest が指定されたお題
* is_draftがFalse
@param contest_id: お題ID
@param page: ページ (デフォルトは1)
@context page_obj: object_listにRecipeインスタンスを持つPageオブジェクト
@return: 400レスポンス (お題が存在しないか、TODO: published_atが未来の場合)
@return: 200レスポンス (成功)
'''
contest = get_object_or_404(Contest, pk=contest_id)
recipes = contest.recipe_set.filter(is_draft=False)
page_obj = Paginator(recipes, per_page).page(page)
links = [{'url': urlresolvers.reverse('recipes-contests-show',
kwargs={'contest_id': contest.id}),
'name': contest.name}]
d = {'title': u'%s に投稿されたレシピ一覧' % contest.name,
'page_obj': page_obj, 'links': links}
return render_to_response('recipes/recipes.html',
d, RequestContext(request))
@postmethod
@login_required
def submit_recipe(request, contest_id=None, recipe_id=None):
'''
指定されたIDのお題に指定されたIDのレシピを投稿します。
投稿されたレシピは、recipe.contest = contestとなります。
レシピの作成者でなければ投稿を行うことはできません。
また、既にお題にひもづいているレシピを再投稿することはできません。
@param contest_id: ContestインスタンスのID
@param recipe_id: RecipeインスタンスのID
@return: 200レスポンス (成功。JSONを返す)
@return: 302レスポンス (ログインしていない場合。ログインページへ)
@return: 403レスポンス (recipe.context != None or
request.user != recipe.user の場合)
@return: 404レスポンス (指定されたIDのRecipe, Contestインスタンスが存在しない場合)
'''
contest = get_object_or_404(Contest, pk=contest_id)
recipe = get_object_or_404(Recipe, pk=recipe_id)
if recipe.user != request.user or recipe.contest:
return render_to_response_of_class(HttpResponseForbidden, '403.html')
recipe.contest = contest
recipe.save()
data = serializers.serialize('json', [recipe])
return HttpResponse(data, mimetype='application/javascript')
def search_contests(request, query=None, page=1):
'''
お題を検索します。
@param query: 検索文字列
@param page: 表示ページ デフォルトは1
@context page_obj: object_listに結果を含むオブジェクト
@return: 200レスポンス (成功)
'''
query = query or request.GET['query']
title = u'%s のコンテスト検索結果' % query
queries = query.split()
contests = Contest.objects.search(queries, page=page, per_page=per_page)
page_obj = Paginator(contests.get('object_list'), per_page).page(page)
links = [{'name': u'全体から検索',
'url': urlresolvers.reverse('gp-search',
kwargs={'query': query})}]
return render_to_response('recipes/contests.html',
{'page_obj': page_obj,
'title': title,
'links': links},
RequestContext(request))
@postmethod
@login_required
def mail_recipe_template(request, contest_id=None):
'''
レシピテンプレートをPOSTのalter_emailで指定されたアドレスにメールで送信します。
ログインユーザだけが行うことができます。
alter_emailの値がprofile.alter_emailと異なる場合はprofile.alter_emailを変更します。
@param contest_id: お題ID
@return: 302レスポンス (ログインページへ。ログインしていない場合)
@return: 404レスポンス (指定されたIDのお題が存在しない場合)
@return: 200レスポンス (レシピのJSONデータを返す。成功した場合)
'''
site = Site.objects.get_current()
profile = request.user.get_profile()
contest = get_object_or_404(Contest, pk=contest_id) if contest_id else None
email = request.POST.get('alter_email', profile.alter_email)
if email != profile.alter_email:
profile.alter_email = email
if profile.has_available_token():
profile.token_issued_at = datetime.now()
else:
profile.issue_recipe_token()
profile.save()
c = Context({'user': request.user, 'contest': contest,
'token': profile.recipe_token})
t = loader.get_template('recipes/email/recipe_template.txt')
if contest:
subject = u'[%s] %s へのレシピ投稿' % (site.name, contest.name)
else:
subject = u'[%s] レシピ投稿' % site.name
body = t.render(c)
task = SendEmailTask(dict(subject=subject, body=body,
from_address=settings.EMAIL_FROM,
to_list=[email]))
get_taskqueue().send_task(task, queue_name=settings.QUEUENAME_EMAIL)
json = serializers.serialize('json', [])
return HttpResponse(json, mimetype='application/json')
| StarcoderdataPython |
6665419 | <reponame>sbruch/xe-ndcg-experiments<filename>lib.py<gh_stars>1-10
import math
import numpy as np
import random
import lightgbm as gbm
class SplitConfig(object):
def __init__(self, population_pct, sample_size, transformations=None):
"""Creates a split configuration.
Args:
population_pct: (float) The percentage of the original dataset
to use as the population.
sample_size: (int) The number of queries to sample from the population
to form the split.
transformations: list of `Transformation` objects to apply to
sampled queries.
"""
self.population_pct = population_pct
self.sample_size = sample_size
self.transformations = transformations
if self.transformations is None:
self.transformations = []
class Collection(object):
"""Data structure that holds a collection of queries."""
def __init__(self, paths):
self.features = {}
self.relevances = {}
for path in paths:
for line in open(path, "r"):
items = line.split()
rel = int(items[0])
qid = int(items[1].split(":")[1])
if qid not in self.features:
self.features[qid] = []
self.relevances[qid] = []
self.features[qid].append(
np.array([float(s.split(':')[1]) for s in items[2:]]))
self.relevances[qid].append(rel)
self.qids = [x for x, _ in self.features.items()]
@property
def num_queries(self):
return len(self.qids)
def generate_splits(self, configs, params=None):
"""Generates splits for training and evaluation.
Args:
configs: list of `SplitConfig` objects.
params: (dict) Parameters to pass to LightGBM.Dataset.
Returns:
List of `lightgbm.Dataset` objects.
"""
# Randomly shuffle the query IDs.
random.shuffle(self.qids)
# Gather query IDs for each split population.
population_qids = []
lower = 0
for pct in [c.population_pct for c in configs]:
upper = int(lower + pct * self.num_queries + 1)
if upper >= self.num_queries:
upper = self.num_queries
population_qids.append(self.qids[lower:upper])
lower = upper
# Sample queries to form each split.
split_qids = []
for sample_size in [c.sample_size for c in configs]:
split_qids.append(np.random.choice(
population_qids[len(split_qids)], sample_size))
# List of datasets to return
datasets = []
for qids in split_qids:
# Create a deep copy of features and relevances.
relevances = [np.copy(self.relevances[qid]) for qid in qids]
features = [np.copy(self.features[qid]) for qid in qids]
for transform in configs[len(datasets)].transformations:
features, relevances = transform.apply(features, relevances)
groups = [len(rels) for rels in relevances]
relevances = np.concatenate(relevances)
features = np.concatenate(features).reshape([len(relevances), -1])
if len(datasets) == 0:
dataset = gbm.Dataset(data=features, label=relevances,
group=groups, params=params,
silent=True, free_raw_data=False)
else:
dataset = gbm.Dataset(data=features, label=relevances,
group=groups, reference=datasets[0],
silent=True, free_raw_data=False)
datasets.append(dataset)
return datasets
class Transformation(object):
def apply(self, features, relevances):
"""Applies a transformation.
Args:
features: A 3D ndarray.
relevances: A 2D ndarray.
Returns:
A tuple consisting of new features and relevances.
"""
raise NotImplementedError
class PerturbLabels(Transformation):
def __init__(self, factor, dist):
"""Creates a `Transformation` to perturb labels.
Args:
factor: (float) Percentage of labels to perturb per query.
dist: list of floats. The probabilities associated with each label.
"""
self.factor = factor
self.dist = dist
def apply(self, features, relevances):
for idx, rels in enumerate(relevances):
labels = np.random.choice(len(self.dist), len(rels), p=self.dist)
v = np.random.rand(len(rels))
relevances[idx] = np.where(np.less(v, self.factor), labels, rels)
return features, relevances
class AugmentListByExternalNegativeSamples(Transformation):
def __init__(self, factor):
"""
Creates a `Transformation` to augment lists by sampling negative
examples from other queries.
Args:
factor: (float) Factor by which each list will be augmented.
"""
self.factor = factor
def apply(self, features, relevances):
extra_features = []
for idx in range(len(features)):
size = int(self.factor * len(features[idx]))
v = np.random.randint(0, len(features) - 1, size)
indices = np.where(np.less(v, idx), v, v + 1)
extras = []
for r in indices:
b = np.random.randint(0, len(features[r]))
extras.append(np.copy(features[r][b]))
extra_features.append(extras)
for idx in range(len(features)):
features[idx] = np.append(features[idx], extra_features[idx])
relevances[idx] = np.append(
relevances[idx], np.zeros(len(extra_features[idx])))
return features, relevances
class GenerateClicks(Transformation):
def __init__(self, impressions, click_prob):
"""
Creates a `Transformation` to generate clicks using a random ranker.
Args:
impressions: (int) Number of impressions per query.
click_prob: list of floats. Click probability given relevance.
"""
self.impressions = impressions
self.click_prob = click_prob
def apply(self, features, relevances):
_features = []
_relevances = []
for idx in range(len(features)):
indices = np.arange(len(features[idx]))
for _ in range(self.impressions):
np.random.shuffle(indices)
v = np.random.rand(len(indices))
f = []
clicked = False
for i in indices:
f.append(np.copy(features[idx][i]))
if v[i] <= self.click_prob[relevances[idx][i]]:
clicked = True
break
r = np.zeros(len(f))
if clicked:
r[-1] = 1
_features.append(f)
_relevances.append(r)
return _features, _relevances
class NDCG(object):
def __init__(self, cutoffs):
self.cutoffs = cutoffs
def eval(self, preds, data):
"""Computes NDCG at rank cutoff.
Args:
preds: list of floats.
data: A `lightgbm.Dataset` object.
"""
# Transform the relevance labels and predictions to the correct shape.
relevances = []
scores = []
idx = 0
for group in data.group:
relevances.append(data.label[idx:idx + group])
scores.append(preds[idx:idx + group])
idx += group
ndcg_at = {}
count = 0
for s, r in zip(scores, relevances):
# Skip queries with no relevant documents.
if sum(r) == 0:
continue
count += 1
sorted_by_scores = [i for _,i in sorted(zip(s,r), key=lambda p: p[0], reverse=True)]
gains_scores = [pow(2, i) - 1. for i in sorted_by_scores]
gains_rels = sorted(gains_scores, reverse=True)
discounts = [1./math.log(i+2, 2) for i, _ in enumerate(sorted_by_scores)]
for cutoff in self.cutoffs:
dcg = sum([g*d for g, d in zip(gains_scores[:cutoff], discounts[:cutoff])])
max_dcg = sum([g*d for g, d in zip(gains_rels[:cutoff], discounts[:cutoff])])
if cutoff not in ndcg_at:
ndcg_at[cutoff] = 0.
ndcg_at[cutoff] += dcg / max_dcg
results = []
for cutoff in self.cutoffs:
results.append(('ndcg@{}'.format(cutoff), ndcg_at[cutoff]/count, True))
return results
| StarcoderdataPython |
372799 | #!/usr/bin/env python3
# pylint: disable=missing-docstring,too-many-public-methods
import pathlib
import shutil
import tempfile
import time
import unittest
import uuid
from typing import List, Optional # pylint: disable=unused-import
import zmq
import persizmq
import persizmq.filter
class TestContext:
def __init__(self, base_url: str = "inproc://persizmq_test") -> None:
self.url = base_url + str(uuid.uuid4())
self.context = zmq.Context()
self.publisher = self.context.socket(zmq.PUB) # pylint: disable=no-member
self.subscribers = [] # type: List[zmq.Socket]
self.tmp_dir = None # type: Optional[pathlib.Path]
def subscriber(self) -> zmq.Socket:
"""
Creates a new subscriber that listens to whatever the publisher of this instance
publishes.
The subscriber will be closed by this instance.
:return: zmq subscriber
"""
subscriber = self.context.socket(zmq.SUB) # pylint: disable=no-member
self.subscribers.append(subscriber)
subscriber.setsockopt_string(zmq.SUBSCRIBE, "") # pylint: disable=no-member
subscriber.connect(self.url)
return subscriber
def __enter__(self):
self.tmp_dir = pathlib.Path(tempfile.mkdtemp())
self.publisher.bind(self.url)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for subscriber in self.subscribers:
subscriber.close()
shutil.rmtree(self.tmp_dir.as_posix())
self.publisher.close()
self.context.term()
class TestThreadedSubscriber(unittest.TestCase):
def test_operational(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
thread_sub = persizmq.ThreadedSubscriber(
callback=lambda msg: None, subscriber=subscriber, on_exception=lambda exc: None)
# Threaded subscriber is already operational after the constructor.
self.assertTrue(thread_sub.operational)
with thread_sub:
self.assertTrue(thread_sub.operational)
# Threaded subscriber is not operational after exiting the context.
self.assertFalse(thread_sub.operational)
def test_a_message(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
class Helper:
def __init__(self):
self.msg_received = None
def callback(self, msg: bytes):
self.msg_received = msg
helper = Helper()
thread_sub = persizmq.ThreadedSubscriber(
callback=helper.callback, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
ctx.publisher.send(b"0001")
time.sleep(0.01)
self.assertEqual(b"0001", helper.msg_received)
def test_exception(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
def callback(msg: bytes) -> None: # pylint: disable=unused-argument
raise Exception("Here I come!")
exception = None
def on_exception(exc):
nonlocal exception
exception = exc
thread_sub = persizmq.ThreadedSubscriber(
callback=callback, subscriber=subscriber, on_exception=on_exception)
with thread_sub:
ctx.publisher.send(b"0002")
time.sleep(0.01)
self.assertIsNotNone(exception)
self.assertEqual("Here I come!", str(exception))
class TestPersistentSubscriber(unittest.TestCase):
def test_no_message_received(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
msg = storage.front()
self.assertIsNone(msg)
self.assertFalse(storage.pop_front())
def test_a_message(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
ctx.publisher.send(b"1984")
time.sleep(0.01)
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1984", msg)
self.assertTrue(storage.pop_front())
msg = storage.front()
self.assertIsNone(msg)
self.assertFalse(storage.pop_front())
def test_multiple_messages(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
# publish a message
ctx.publisher.send(b"1985")
time.sleep(0.01)
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1985", msg)
self.assertTrue(storage.pop_front())
msg = storage.front()
self.assertIsNone(msg)
self.assertFalse(storage.pop_front())
# publish two in a row
ctx.publisher.send(b"1986")
ctx.publisher.send(b"1987")
time.sleep(0.01)
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1986", msg)
# ask for the same front
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1986", msg)
self.assertTrue(storage.pop_front())
# publish a third one
ctx.publisher.send(b"1988")
time.sleep(0.01)
# check the second one
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1987", msg)
self.assertTrue(storage.pop_front())
# check the third one
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1988", msg)
self.assertTrue(storage.pop_front())
def test_persistency(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
# publish a message
ctx.publisher.send(b"1985")
time.sleep(0.01)
# simulate a restart
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"1985", msg)
self.assertTrue(storage.pop_front())
msg = storage.front()
self.assertIsNone(msg)
self.assertFalse(storage.pop_front())
def test_order(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
# Make sure the correct order is kept even for a lot of messages.
for i in range(2000, 2020):
ctx.publisher.send("{}".format(i).encode())
time.sleep(0.01)
# simulate a restart
with ctx.subscriber() as subscriber:
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir.as_posix())
thread_sub = persizmq.ThreadedSubscriber(
callback=storage.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
for i in range(2000, 2020):
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual("{}".format(i).encode(), msg)
self.assertTrue(storage.pop_front())
class TestFilters(unittest.TestCase):
def test_that_it_works(self):
# pylint: disable=too-many-statements
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
pers_dir_filter = ctx.tmp_dir / 'filter'
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir)
thread_sub = persizmq.ThreadedSubscriber(
subscriber=subscriber, callback=lambda msg: None, on_exception=lambda exc: None)
thread_sub.callback = \
lambda msg: storage.add_message(
persizmq.filter.MinPeriod(min_period=1, persistent_dir=pers_dir_filter)(msg))
with thread_sub:
# Send two messages.
ctx.publisher.send(b"3000")
ctx.publisher.send(b"3001")
time.sleep(0.01)
# Make sure only one arrived.
msg = storage.front()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"3000", msg)
self.assertTrue(storage.pop_front())
msg = storage.front()
self.assertIsNone(msg)
# Rebuild the persistent subscriber.
del storage
del thread_sub
storage = persizmq.PersistentStorage(persistent_dir=ctx.tmp_dir)
thread_sub = persizmq.ThreadedSubscriber(
subscriber=subscriber, callback=lambda msg: None, on_exception=lambda exc: None)
thread_sub.callback = lambda msg: storage.add_message(
persizmq.filter.MinPeriod(min_period=10, persistent_dir=pers_dir_filter)(msg))
with thread_sub:
# Send one message and make sure that the last timestamp was correctly loaded
# (the new message must be rejected).
ctx.publisher.send(b"3002")
time.sleep(0.01)
msg = storage.front()
self.assertIsNone(msg)
thread_sub.callback = lambda msg: storage.add_message(persizmq.filter.MaxSize(max_size=1000)(msg))
# Generate a too large message and check that it is rejected.
ctx.publisher.send(b"x" * 1001)
time.sleep(0.01)
msg = storage.front()
self.assertIsNone(msg)
class TestPersistentLatest(unittest.TestCase):
def test_that_it_works(self):
with TestContext() as ctx:
with ctx.subscriber() as subscriber:
persi_latest = persizmq.PersistentLatestStorage(persistent_dir=ctx.tmp_dir)
thread_sub = persizmq.ThreadedSubscriber(
callback=persi_latest.add_message, subscriber=subscriber, on_exception=lambda exc: None)
with thread_sub:
# Make sure only the newest one is kept.
self.assertFalse(persi_latest.new_message)
ctx.publisher.send(b"4000")
time.sleep(0.01)
self.assertTrue(persi_latest.new_message)
ctx.publisher.send(b"4001")
time.sleep(0.01)
self.assertTrue(persi_latest.new_message)
msg = persi_latest.message()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"4001", msg)
self.assertFalse(persi_latest.new_message)
# The same for lots of messages.
for i in range(4010, 4020):
ctx.publisher.send("{}".format(i).encode())
time.sleep(0.01)
msg = persi_latest.message()
self.assertIsNotNone(msg)
assert isinstance(msg, bytes)
self.assertEqual(b"4019", msg)
self.assertFalse(persi_latest.new_message)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9696920 | # ai.py
#
# Author: <NAME>
# Created On: 21 Feb 2019
import numpy as np
from . import astar
SEARCH_TARGET = 0
MOVE = 1
class AI:
def __init__(self, player):
self.player = player
self.path = []
self.state = SEARCH_TARGET
self.weight_self = 3
self.weight_enemy = 6
self.weight_crossroad = 3
self.map_positions = np.empty((0, 0))
self.bomb_times = np.empty((0, 0))
def __update_map_positions(self, map):
if map.size != self.map_positions.shape:
width, height = map.size
self.map_positions = np.empty((width, height, 2))
self.map_positions[:, :, 0] = np.arange(width) \
.reshape(1, width).repeat(height, 0)
self.map_positions[:, :, 1] = np.arange(height) \
.reshape(height, 1).repeat(width, 1)
def __update_bomb_times(self, bombs, map):
if map.size != self.bomb_times.shape:
self.bomb_times = np.empty(map.size, dtype=np.int)
self.bomb_times[:, :] = 1e16
# define the four diections west, east, south, north
directions = np.array([(1, 0), (-1, 0), (0, 1), (0, -1)])
for bomb in bombs:
pos = bomb.pos
self.bomb_times[pos[0], pos[1]] = bomb.time
for dir in directions:
# try to spread the explosions as far as possible
for delta in range(1, bomb.range):
npos = pos + dir * delta
# check if the position is valid, if not stop explosion
# spread here
if not map.is_valid(npos) or map.is_blocked(npos) or \
map.has_explosion(npos):
break
self.bomb_times[pos[0], pos[1]] = bomb.time
def update(self, world):
self.player.drop_bomb = False
self.player.move[:] = 0
if self.state == MOVE:
if self.path:
next_pos = self.path.pop(0)
if world.map.is_blocked(next_pos) or world.map.has_explosion(next_pos):
self.path = []
self.state = SEARCH_TARGET
next_pos = np.array(next_pos, dtype=np.int)
self.player.move = next_pos - self.player.pos
else:
self.player.drop_bomb = True
self.state = SEARCH_TARGET
if self.state == SEARCH_TARGET:
# init score board, each tile gets a score the maximum is chosen as
# target
score = np.zeros(world.map.size)
# get mask of tiles which are not blocked
unblock = ~world.map.blocked
width, height = score.shape
# create array of tile positions, create lazily
self.__update_map_positions(world.map)
self.__update_bomb_times(world.bombs, world.map)
# calculate distances of this player to all other tiles (manhatten)
self_dist = np.abs(self.map_positions - self.player.pos).sum(2)
# normalize distances into interval [0,1]
self_dist /= self_dist.max()
# make shortest distances have greates value
self_dist -= 1
self_dist *= -1
# check if there are any other players than this one
if len(world.players) > 1:
# calculate distances of all enemies to all other tiles
enemy_dist = []
for enemy in world.players:
# check if this player is not the one controlled by ai
if enemy.id != self.player.id:
diff = self.map_positions - enemy.pos
dist = np.abs(diff).sum(2)
enemy_dist.append(dist)
# convert distance to numpy array
enemy_dist = np.array(enemy_dist)
# find element wise minimum of all player distances
enemy_dist = np.min(enemy_dist, axis=0)
# normalize distances into interval [0,1]
enemy_dist /= enemy_dist.max()
# make shortest distances have greates value
enemy_dist -= 1
enemy_dist *= -1
else:
# no enemies, distances are zero
enemy_dist = np.zeros((width, height))
# detect how many neighbouring unblocked tiles each tile has
crossroads = np.zeros((width, height))
# add +1 if left neighbour is not blocked
crossroads[1:, :] += unblock[:-1, :] * 1
# add +1 if right neighbour is not blocked
crossroads[:-1, :] += unblock[1:, :] * 1
# add +1 if upper neighbour is not blocked
crossroads[:, 1:] += unblock[:, :-1] * 1
# add +1 if lower neighbour is not blocked
crossroads[:, :-1] += unblock[:, 1:] * 1
# normalize into interval [0,1]
crossroads /= 4
# calculate score as weighted sum
score += self.weight_self * self_dist
score += self.weight_enemy * enemy_dist
score += self.weight_crossroad * crossroads
# set all blocked tiles to zero
score[world.map.blocked] = 0
def is_valid(node, path):
return world.map.is_valid(node) and \
not world.map.is_blocked(node) and \
not world.map.has_explosion(node) and \
self.bomb_times[node[0], node[1]] - len(path) - 1 > 0
found = False
iterations = 0
while not found and iterations < 10:
# retrieve tile with maximum score
target = np.unravel_index(np.argmax(score), score.shape)
# set score to 0
score[target[0], target[1]] = 0
# search path with astar
self.path = astar.search(self.player.pos, target,
is_valid=is_valid)
if self.path:
self.state = MOVE
found = True
iterations += 1
if not found:
print('No path found!')
| StarcoderdataPython |
6537889 | #!/usr/bin/env python2
import random
import math
import copy
from Spell import *
class Pokemon:
def __init__(self, name, baseHp, lifePerLevel, attack, attackPerLevel, baseDef, defencePerLevel, spells, elements):
self.level = 1
self.exp = 0
self.name = name
self.baseHp = baseHp
self.lifePerLevel = lifePerLevel
self.defencePerLevel = defencePerLevel
self.spells = spells
self.attackPerLevel = attackPerLevel
self.baseDef = baseDef
self.pokeid = 0
self.attack = attack
self.active_pokemon = self.pokeid
self.battle = None
self.life = self.getMaxLife()
self.username = self.name
self.elements = elements
def getActivePokemon(self):
return self
def hasAlivePokemon(self):
return False
def removePokemon(self, pokemon):
self.active_pokemon = None
def addSpell(self, spell):
self.spells.append(spell)
def getSpells(self):
return self.spells
def getSpell(self, name):
for e in self.spells:
if e.name == name:
return e
return False
def expForNextLevel(self):
# return 1.2 * (self.level**3) - 15 * (self.level**2) + 100 * self.level - 140
return 1.25 * (self.level**3) + 50
def gainExp(self, fromPokemon):
self.addExp(self.calcGainedExp(fromPokemon))
def calcGainedExp(self, fromPokemon):
return 200 * fromPokemon.level / 7
def addExp(self, exp):
self.exp += exp
needed = self.expForNextLevel()
while self.exp >= needed:
self.exp = self.exp - needed
self.level += 1
self.life = self.getMaxLife()
def getMaxLife(self):
return self.lifePerLevel * self.level + self.baseHp
def getAttack(self):
return self.attackPerLevel * self.level + self.attack
def getDefence(self):
return self.defencePerLevel * self.level + self.baseDef
def str(self):
spells = ""
for elem in self.spells:
if len(spells) != 0:
spells += ", "
spells += elem.name
elements = ""
for elem in self.elements:
if len(elements) != 0:
elements += ", "
elements += elem.name
return "(pokeId(" + str(self.pokeid) + "), Nom(" + self.name + "), Niveau(" + str(self.level) + "), Attaque(" + str(self.getAttack()) + "), VieMax(" + str(self.getMaxLife()) + "), Defense(" + str(self.getDefence()) + "), Sorts: (" + spells + "), Exp: (" + str(self.exp) + " / " + str(self.expForNextLevel()) + "), Elements(" + elements + "))"
def fight(self, spellName, defencer):
spell = self.getSpell(spellName)
if spell == False:
return "Sort '" + spellName + "' introuvable."
rep = spell.use(self, defencer)
return rep[0] + self.name + " utilise " + spell.name + " (" + spell.element.name + ") et fait " + str(rep[1]) + " dommages a " + defencer.name + " (pv: " + str(defencer.life) + " / " + str(defencer.getMaxLife()) + ")"
class PokemonsManager:
def __init__(self):
self.pokemons = [
Pokemon("ZeratoR", 140, 6, 10, 1, 5, 0.1, [
Spell("Son_Pere", 10, 90, elementsManager.get("Feu")),
Spell("Mute", 15, 50, elementsManager.get("Feu")),
Spell("Rend_l'argent", 50, 100, elementsManager.get("Feu")),
Spell("Dailymotion_drop", 100, 100, elementsManager.get("Feu"))
], [
elementsManager.get("Feu")
]),
Pokemon("Noxer", 80, 6, 5, 1, 0.5, 0.2, [
Spell("Ventre_devoreur", 30, 80, elementsManager.get("Terre")),
Spell("Millenium", 50, 80, elementsManager.get("Terre"))
], [
elementsManager.get("Terre")
]),
Pokemon("Furiie", 100, 4, 10, 1, 2, 0.05, [
Spell("Cri_strident", 20, 100, elementsManager.get("Eau")),
Spell("League_of_legends", 100, 20, elementsManager.get("Terre")),
Spell("Bisous", 20, 50, elementsManager.get("Eau"))
], [
elementsManager.get("Eau")
]),
Pokemon("MisterMV", 140, 6, 9, 1, 0.1, 0.1, [
Spell("SAUCISSON", 10, 100, elementsManager.get("Terre")),
Spell("Speedrun", 20, 80, elementsManager.get("Feu")),
Spell("Jeu_a_la_pisse", 100, 30, elementsManager.get("Terre"))
], [
elementsManager.get("Terre")
]),
Pokemon("<NAME>", 100, 5, 20, 1, 0.1, 0.1, [
Spell("LEEEEEROY_JENKINS", 5000, 10, elementsManager.get("Feu"))
], [
elementsManager.get("Feu")
]),
Pokemon("AlexMog", 180, 5, 20, 1, 3, 0.5, [
Spell("Tardbecile", 30, 100, elementsManager.get("Eau")),
Spell("Equilibrage_ratte", 70, 10, elementsManager.get("Eau")),
Spell("Blague_de_merde", 50, 30, elementsManager.get("Eau"))
], [
elementsManager.get("Eau")
]),
Pokemon("Demoneth", 160, 5, 10, 1, 4, 0.2, [
Spell("Molotov_sur_orange", 20, 50, elementsManager.get("Feu")),
Spell("Live_o_maniaque", 15, 100, elementsManager.get("Feu")),
Spell("La_co_marche", 100, 10, elementsManager.get("Feu"))
], [
elementsManager.get("Feu")
])
]
def getRandom(self):
ret = copy.copy(self.pokemons[random.randint(0, len(self.pokemons) - 1)])
return ret
def getFromName(self, name):
for elem in self.pokemons:
if elem.name == name:
return elem
return False
global pokemonsManager
pokemonsManager = PokemonsManager()
| StarcoderdataPython |
6502011 | from attr import Factory, NOTHING
from prettyprinter.prettyprinter import pretty_call_alt, register_pretty
def is_instance_of_attrs_class(value):
cls = type(value)
try:
cls.__attrs_attrs__
except AttributeError:
return False
return True
def pretty_attrs(value, ctx):
cls = type(value)
attributes = cls.__attrs_attrs__
kwargs = []
for attribute in attributes:
if not attribute.repr:
continue
display_attr = False
if attribute.default == NOTHING:
display_attr = True
elif isinstance(attribute.default, Factory):
default_value = (
attribute.default.factory(value)
if attribute.default.takes_self
else attribute.default.factory()
)
if default_value != getattr(value, attribute.name):
display_attr = True
else:
if attribute.default != getattr(value, attribute.name):
display_attr = True
if display_attr:
kwargs.append((attribute.name, getattr(value, attribute.name)))
return pretty_call_alt(ctx, cls, kwargs=kwargs)
def install():
register_pretty(predicate=is_instance_of_attrs_class)(pretty_attrs)
| StarcoderdataPython |
11287236 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 - 2021 Geode-solutions
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, platform
if sys.version_info >= (3,8,0) and platform.system() == "Windows":
for path in [x.strip() for x in os.environ['PATH'].split(';') if x]:
os.add_dll_directory(path)
import opengeode
import opengeode_io_py_model as model_io
def test_brep_cube(brep):
# Number of components
if brep.nb_corners() != 8:
raise ValueError("[Test] Number of corners is not correct" )
if brep.nb_lines() != 12:
raise ValueError("[Test] Number of lines is not correct" )
if brep.nb_surfaces() != 6:
raise ValueError("[Test] Number of surfaces is not correct" )
if brep.nb_blocks() != 1:
raise ValueError("[Test] Number of blocks is not correct" )
# Number of vertices and elements in components
for c in brep.corners():
if c.mesh().nb_vertices() != 1:
raise ValueError("[Test] Number of vertices in corners should be 1" )
for l in brep.lines():
if l.mesh().nb_vertices() != 5:
raise ValueError("[Test] Number of vertices in lines should be 5" )
if l.mesh().nb_edges() != 4:
raise ValueError("[Test] Number of edges in lines should be 4" )
for s in brep.surfaces():
if s.mesh().nb_vertices() != 29:
raise ValueError("[Test] Number of vertices in surfaces should be 29" )
if s.mesh().nb_polygons() != 40:
raise ValueError("[Test] Number of polygons in surfaces should be 40" )
for b in brep.blocks():
if b.mesh().nb_vertices() != 131:
raise ValueError("[Test] Number of vertices in blocks should be 131" )
if b.mesh().nb_polyhedra() != 364:
raise ValueError("[Test] Number of polyhedra in blocks should be 364" )
# Number of component boundaries and incidences
for c in brep.corners():
if brep.nb_boundaries( c.id() ) != 0:
raise ValueError("[Test] Number of corner boundary should be 0" )
if brep.nb_incidences( c.id() ) != 3:
raise ValueError("[Test] Number of corner incidences should be 3" )
for l in brep.lines():
if brep.nb_boundaries( l.id() ) != 2:
raise ValueError("[Test] Number of line boundary should be 2" )
if brep.nb_incidences( l.id() ) != 2:
raise ValueError("[Test] Number of line incidences should be 2" )
for s in brep.surfaces():
if brep.nb_boundaries( s.id() ) != 4:
raise ValueError("[Test] Number of surface boundary should be 4" )
if brep.nb_incidences( s.id() ) != 1:
raise ValueError("[Test] Number of surface incidences should be 1" )
for b in brep.blocks():
if brep.nb_boundaries( b.id() ) != 6:
raise ValueError("[Test] Number of block boundary should be 6" )
if brep.nb_incidences( b.id() ) != 0:
raise ValueError("[Test] Number of block incidences should be 0" )
def test_brep_cone(brep):
# Number of components
if brep.nb_corners() != 6:
raise ValueError("[Test] Number of corners is not correct")
if brep.nb_lines() != 13:
raise ValueError("[Test] Number of lines is not correct")
if brep.nb_surfaces() != 12:
raise ValueError("[Test] Number of surfaces is not correct")
if brep.nb_blocks() != 4:
raise ValueError("[Test] Number of blocks is not correct")
# Number of vertices and elements in components
for c in brep.corners():
if c.mesh().nb_vertices() != 1:
raise ValueError("[Test] Number of vertices in corners should be 1")
for l in brep.lines():
if l.mesh().nb_vertices() == 0:
raise ValueError("[Test] Number of vertices in lines should not be null")
if l.mesh().nb_edges() == 0:
raise ValueError("[Test] Number of edges in lines should not be null")
for s in brep.surfaces():
if s.mesh().nb_vertices() == 0:
raise ValueError("[Test] Number of vertices in surfaces should not be null")
if s.mesh().nb_polygons() == 0:
raise ValueError("[Test] Number of polygons in surfaces should not be null")
for b in brep.blocks():
if b.mesh().nb_vertices() == 0:
raise ValueError("[Test] Number of vertices in blocks should not be null")
if b.mesh().nb_polyhedra() == 0:
raise ValueError("[Test] Number of polyhedra in blocks should not be null")
# Number of component boundaries and incidences
for c in brep.corners():
if brep.nb_boundaries( c.id() ) != 0:
raise ValueError("[Test] Number of corner boundary should be 0" )
if brep.nb_incidences( c.id() ) != 4 and brep.nb_incidences( c.id() ) != 5:
raise ValueError("[Test] Number of corner incidences should be 4 or 5" )
for l in brep.lines():
if brep.nb_boundaries( l.id() ) != 2:
raise ValueError("[Test] Number of line boundary should be 2" )
if brep.nb_incidences( l.id() ) < 2 or brep.nb_incidences( l.id() ) > 4:
raise ValueError("[Test] Number of line incidences should be 2, 3 or 4" )
for s in brep.surfaces():
if brep.nb_boundaries( s.id() ) != 3:
raise ValueError("[Test] Number of surface boundary should be 3" )
if brep.nb_incidences( s.id() ) != 1 and brep.nb_incidences( s.id() ) != 2:
raise ValueError("[Test] Number of surface incidences should be 1 or 2" )
for b in brep.blocks():
if brep.nb_boundaries( b.id() ) != 4:
raise ValueError("[Test] Number of block boundary should be 4" )
if brep.nb_incidences( b.id() ) != 0:
raise ValueError("[Test] Number of block incidences should be 0" )
if __name__ == '__main__':
model_io.initialize_model_io()
test_dir = os.path.dirname(__file__)
data_dir = os.path.abspath(os.path.join(test_dir, "../../../../tests/data"))
brep_cube = opengeode.load_brep( os.path.join(data_dir, "cube_v22.msh" ))
test_brep_cube(brep_cube)
opengeode.save_brep(brep_cube, "cube_v22.og_brep")
reloaded_brep_cube = opengeode.load_brep("cube_v22.og_brep")
test_brep_cube(reloaded_brep_cube)
brep_cone = opengeode.load_brep(os.path.join(data_dir, "cone_v4.msh" ))
test_brep_cone(brep_cone)
opengeode.save_brep(brep_cone, "cone_v4.og_brep")
reloaded_brep_cone = opengeode.load_brep("cone_v4.og_brep")
test_brep_cone(reloaded_brep_cone)
| StarcoderdataPython |
6649675 | <reponame>ethansaxenian/RosettaDecode
LONGMONTHS = (1, 3, 5, 7, 8, 10, 12) # Jan Mar May Jul Aug Oct Dec
def fiveweekendspermonth2(start=START, stop=STOP):
return [date(yr, month, 31)
for yr in range(START.year, STOP.year)
for month in LONGMONTHS
if date(yr, month, 31).timetuple()[6] == 6 # Sunday
]
dates2 = fiveweekendspermonth2()
assert dates2 == dates
| StarcoderdataPython |
328889 | from manim import *
class s08b_Algorithms_Activity(Scene):
def construct(self):
# Actors.
title = Text("Algorithms")
subtitle = Text("(Activity)").scale(0.75)
# Positioning.
title.shift(0.50*UP)
subtitle.next_to(title, DOWN)
# Animations.
actors = [title, subtitle]
for actor in actors:
self.play(Write(actor))
self.wait(0.5)
# Cleanup.
self.wait(0.5)
self.play(*[FadeOut(actor) for actor in actors])
| StarcoderdataPython |
6656577 | # Copyright (c) 2012-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Elastic File System"
prefix = "elasticfilesystem"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
Backup = Action("Backup")
ClientMount = Action("ClientMount")
ClientRootAccess = Action("ClientRootAccess")
ClientWrite = Action("ClientWrite")
CreateAccessPoint = Action("CreateAccessPoint")
CreateFileSystem = Action("CreateFileSystem")
CreateMountTarget = Action("CreateMountTarget")
CreateTags = Action("CreateTags")
DeleteAccessPoint = Action("DeleteAccessPoint")
DeleteFileSystem = Action("DeleteFileSystem")
DeleteFileSystemPolicy = Action("DeleteFileSystemPolicy")
DeleteMountTarget = Action("DeleteMountTarget")
DeleteTags = Action("DeleteTags")
DescribeAccessPoints = Action("DescribeAccessPoints")
DescribeBackupPolicy = Action("DescribeBackupPolicy")
DescribeFileSystemPolicy = Action("DescribeFileSystemPolicy")
DescribeFileSystems = Action("DescribeFileSystems")
DescribeLifecycleConfiguration = Action("DescribeLifecycleConfiguration")
DescribeMountTargetSecurityGroups = Action("DescribeMountTargetSecurityGroups")
DescribeMountTargets = Action("DescribeMountTargets")
DescribeTags = Action("DescribeTags")
ListTagsForResource = Action("ListTagsForResource")
ModifyMountTargetSecurityGroups = Action("ModifyMountTargetSecurityGroups")
PutBackupPolicy = Action("PutBackupPolicy")
PutFileSystemPolicy = Action("PutFileSystemPolicy")
PutLifecycleConfiguration = Action("PutLifecycleConfiguration")
Restore = Action("Restore")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateFileSystem = Action("UpdateFileSystem")
| StarcoderdataPython |
1850660 | <filename>libs/helpers.py
from ncclient import manager
from lxml import etree
def get_running_config(ip, port, uname, pw, device_params):
session = manager.connect(host=ip, port=port, username=uname, password=pw, device_params=device_params, hostkey_verify=False)
config = session.get_config(source='running').data_xml
config_tree = etree.fromstring(config.encode('UTF-8'))
return config_tree | StarcoderdataPython |
3519367 | # -*- coding:utf-8 -*-
from conf import *
from utils import *
import abc
class CNNModel(metaclass=abc.ABCMeta):
def __init__(self, param):
# input_shape = x_train.shape[1:]
self.param = param
self.train_poison = None
self.test_poison = None
self.classifier = None
def init(self, data):
self.input_shape = data.x_train.shape[1:]
self.min_ = data.min_
self.max_ = data.max_
def set_learning_phase(self, learning_phase):
K.set_learning_phase(learning_phase)
@abc.abstractmethod
def init_model(self):
pass
def predict_acc(self, x, y, is_poison, type_str):
# Evaluate the classifier on the test set
self.test_preds = np.argmax(self.classifier.predict(x), axis=1)
self.test_acc = np.sum(self.test_preds == np.argmax(y, axis=1)) / y.shape[0]
print("\n%s accuracy: %.2f%%" % (type_str, self.test_acc * 100))
# Evaluate the classifier on poisonous data in test set
# self.poison_preds = np.argmax(self.classifier.predict(x[is_poison]), axis=1)
self.poison_preds = self.test_preds[is_poison]
self.poison_acc = np.sum(self.poison_preds == np.argmax(y[is_poison], axis=1)) / max(is_poison.sum(),1)
print("\nPoisonous %s set accuracy (i.e. effectiveness of poison): %.2f%%" % (type_str, self.poison_acc * 100))
# Evaluate the classifier on clean data
# self.clean_preds = np.argmax(self.classifier.predict(x[is_poison == 0]), axis=1)
self.clean_preds = self.test_preds[is_poison==0]
self.clean_acc = np.sum(self.clean_preds == np.argmax(y[is_poison == 0], axis=1)) / y[is_poison == 0].shape[0]
print("\nClean %s set accuracy: %.2f%%" % (type_str, self.clean_acc * 100))
# when result_dict is not empty, start record experiment results
# to validate backdoor insert effectiveness
# check whether the backdoor data with poison label is predicted by the model with poison label
def predict(self, data):
# Evaluate the classifier on the train set
self.predict_acc(data.x_train, data.y_train, data.is_poison_train, 'train')
# visualize predict
# for i in range(3):
# data.visiualize_img_by_idx(np.where(np.array(data.is_poison_train) == 1)[0][i], self.poison_preds[i])
# Evaluate the classifier on the test set
self.predict_acc(data.x_test, data.y_test, data.is_poison_test, 'test')
'''
# visualize predict
for i in range(3):
print(np.where(np.array(data.is_poison_test) == 1)[0][i])
data.visiualize_img_by_idx(np.where(np.array(data.is_poison_test) == 1)[0][i], self.poison_preds[i], False)
'''
def predict_robust(self, x, y, is_poison, type_str=''):
self.test_preds = np.argmax(self.classifier.predict(x), axis=1)
self.test_acc = np.sum(self.test_preds == np.argmax(y, axis=1)) / y.shape[0]
print("\n%s accuracy: %.2f%%" % (type_str, self.test_acc * 100))
# Evaluate the classifier on poisonous data in test set
# self.poison_preds = np.argmax(self.classifier.predict(x[is_poison]), axis=1)
self.poison_preds = self.test_preds[is_poison]
self.poison_acc = np.sum(self.poison_preds == np.argmax(y[is_poison], axis=1)) / max(is_poison.sum(),1)
print("\nPoisonous %s set accuracy (i.e. effectiveness of poison): %.2f%%" % (type_str, self.poison_acc * 100))
# Evaluate the classifier on clean data
# self.clean_preds = np.argmax(self.classifier.predict(x[is_poison == 0]), axis=1)
self.clean_preds = self.test_preds[is_poison==0]
self.clean_acc = np.sum(self.clean_preds == np.argmax(y[is_poison == 0], axis=1)) / y[is_poison == 0].shape[0]
print("\nClean %s set accuracy: %.2f%%" % (type_str, self.clean_acc * 100))
def set_param(self, param):
self.classifier.param = param
self.param = param
def get_train_poison(self):
return self.train_poison
def set_train_poison(self, poison):
self.train_poison = poison
def get_test_poison(self):
return self.test_poison
def set_test_poison(self, poison):
self.test_poison = poison
def predict_instance(self, x):
return self.classifier.predict(x)[0]
def get_input_shape(self):
return self.input_shape
def set_input_shape(self, input_shape):
self.input_shape = input_shape
def get_classifier(self):
return self.classifier
def set_classifier(self, classifier):
self.classifier = classifier
def get_input_tensor(self):
return self.classifier.get_input_tensor()
def get_output_tensor(self):
return self.classifier.get_output_tensor()
@abc.abstractmethod
def get_dense_tensor(self):
pass
| StarcoderdataPython |
115214 | <filename>yj_anova_test.py
#coding:utf-8
from scipy import stats
import numpy as np
from pandas import Series,DataFrame
from openpyxl import load_workbook
import math
import uuid
import os
def chart(data_ws,result_ws):
pass
def _produc_random_value(mean,stdrange):
b = np.random.uniform(*stdrange)
a = b/math.sqrt(2)
x1,x2 = mean-a, mean+a
return x1,x2,b
def _set_od_value(ws,row,x1,x2):
if row % 2 == 1:
ws['F'+str(row)]=x1
ws['F'+str(row+1)]=x2
def _get_mean_value(ws,row):
if row % 2 == 1:
return ws['G'+str(row)].value
else:
return ws['G'+str(row-1)].value
def _get_stdev_value(ws,row):
if row % 2 == 1:
return ws['H'+str(row)].value
else:
return ws['H'+str(row-1)].value
def _set_stdev_value(ws,row,stdev):
if row % 2 == 1:
ws['H'+str(row)] = stdev
def _get_one_row(ws,row):
time = ws['A'+str(row)].value
organ = ws['B'+str(row)].value
sp = ws['C'+str(row)].value
c = ws['D'+str(row)].value
rep = ws['E'+str(row)].value
od = ws['F'+str(row)].value
mean = _get_mean_value(ws,row)
stdev = _get_stdev_value(ws,row)
return Series([time,organ,sp,c,rep,float(od),float(mean),stdev],\
index=['time','organ','sp','c','rep','od','mean','stdev'])
def get_whole_dataframe(ws):
data={}
for i in range(3,ws.max_row+1):
data[i]=_get_one_row(ws,i)
return DataFrame(data).T
def _fill_data_ws(ws,stdrange):
for i in range(3,ws.max_row+1,2):
mean = _get_mean_value(ws,i)
x1,x2,b=_produc_random_value(mean,stdrange)
_set_od_value(ws,i,x1,x2)
_set_stdev_value(ws,i,b)
def _set_p_talbe_header(ws,result_ws):
for i in range(3,ws.max_row+1,10):
group = []
for j in range(i,i+10,2):
gname=ws['A'+str(j)].value+'_'+\
ws['B'+str(j)].value+'_'+\
ws['C'+str(j)].value+'_'+\
str(ws['D'+str(j)].value)
group.append(gname)
for k in range(5):
result_ws['B'+str(i+k+1)]=group[k]
result_ws[chr(ord('C')+k)+str(i)]=group[k]
# for i in range(3,ws.max_row+1,20):
# group = []
# for j in range(i,i+10,2):
# gname=ws['A'+str(j)].value+'_'+\
# ws['B'+str(j)].value+'_'+\
# ws['C'+str(j)].value+'_'+\
# ws['C'+str(j+10)].value+'_'+\
# str(ws['D'+str(j)].value)
# group.append(gname)
# for k in range(5):
# result_ws['J'+str(i+2*k+6)] = group[k]
def produce_p_table(ws,result_ws):
df = get_whole_dataframe(ws)
_set_p_talbe_header(ws,result_ws)
for (time,organ,sp),group_l1 in df.groupby(['time','organ','sp']):
group_l2 = [g for c,g in group_l1.groupby(['c'])]
i = group_l2[0].index[0]
for m in range(5):
for n in range(m+1,5):
g1 = group_l2[m]
g2 = group_l2[n]
f,p = stats.f_oneway(g1['od'],g2['od'])
result_ws[chr(ord('C')+m)+str(i+1+n)]=p
# for (time,organ,c),group_l1 in df.groupby(['time','organ','c']):
# group_l2 = [g for c,g in group_l1.groupby(['sp'])]
# i = group_l2[0].index[0]
# g1 = group_l2[0]
# g2 = group_l2[1]
# f,p = stats.f_oneway(g1['od'],g2['od'])
# result_ws['K'+str(i+6)]=p
def calc(data_ws,result_ws):
_fill_data_ws(data_ws,(0.1,0.6))
for i in range(3,data_ws.max_row+1,10):
group=[]
for j in range(i,i+10,2):
gname=data_ws['A'+str(j)].value+'_'+\
data_ws['B'+str(j)].value+'_'+\
data_ws['C'+str(j)].value+'_'+\
str(data_ws['D'+str(j)].value)
group.append([gname,Series([data_ws['F'+str(j)].value,\
data_ws['F'+str(j+1)].value])])
for k in range(5):
result_ws['B'+str(i+k+1)]=group[k][0]
result_ws[chr(ord('C')+k)+str(i)]=group[k][0]
for m in range(5):
for n in range(m,5):
args = [group[m][1],group[n][1]]
f,p = stats.f_oneway(*args)
result_ws[chr(ord('C')+m)+str(i+1+n)]=p
def main():
wb = load_workbook(filename = 'data/PODz.xlsx')
salt = wb.get_sheet_by_name('salt')
alkali = wb.get_sheet_by_name('alkali')
salt_result = wb.create_sheet(title="salt_result")
alkali_result = wb.create_sheet(title="alkali_result")
calc(salt,salt_result)
calc(alkali,alkali_result)
wb.save(filename = 'data/PODz_result.xlsx')
print('处理完成!')
def test(data_file,result_file):
wb = load_workbook(data_file)
sheetnames = wb.get_sheet_names()
for name in sheetnames:
sheet = wb.get_sheet_by_name(name)
result_sheet = wb.create_sheet(title='result_'+name)
r = input(name+'->请输入标准差范围(以英文逗号隔开):')
x,y = r.split(',')
x,y = float(x),float(y)
_fill_data_ws(sheet, (x,y))
print(name+"->填充随机值完成!")
produce_p_table(sheet, result_sheet)
print(name+"->计算P值完成!")
# salt = wb.get_sheet_by_name('salt')
# alkali = wb.get_sheet_by_name('alkali')
# salt_result = wb.create_sheet(title='salt_result')
# alkali_result = wb.create_sheet(title="alkali_result")
# _fill_data_ws(salt, stdrange)
# _fill_data_ws(alkali, stdrange)
# produce_p_table(salt, salt_result)
# produce_p_table(alkali, alkali_result)
wb.save(result_file)
def add_tags(result_file):
wb = load_workbook(result_file)
if __name__ == "__main__":
# main()
data_file = 'data2/ggb (copy).xlsx'
result_file = data_file.split('.')[0]+'_result('\
+str(uuid.uuid1())[:8]+').xlsx'
test(data_file,result_file)
print(data_file+':处理完成!')
| StarcoderdataPython |
4886512 | <gh_stars>0
""" Swagger documentation. """
INDEX = {
"responses": {
"200": {
"description": "A greeting."
}
},
}
| StarcoderdataPython |
8060658 | # -*- coding: utf-8 -*-
from django.shortcuts import HttpResponse, render_to_response
from django.http import HttpResponseRedirect
from django.contrib.admin.views.decorators import staff_member_required
from django.utils.translation import ugettext as _
from grappelli.models.bookmarks import Bookmark, BookmarkItem
from grappelli.settings import ADMIN_TITLE, ADMIN_URL
def add_bookmark(request):
"""
Add Site to Bookmarks.
"""
if request.method == 'POST':
if request.POST.get('path') and request.POST.get('title'):
next = request.POST.get('path')
try:
bookmark = Bookmark.objects.get(user=request.user)
except Bookmark.DoesNotExist:
bookmark = Bookmark(user=request.user)
bookmark.save()
try:
bookmarkitem = BookmarkItem.objects.get(bookmark=bookmark, link=request.POST.get('path'))
msg = _('Site is already bookmarked.')
except BookmarkItem.DoesNotExist:
try:
bookmarkitem = BookmarkItem(bookmark=bookmark, title=request.POST.get('title'), link=request.POST.get('path'))
bookmarkitem.save()
msg = _('Site was added to Bookmarks.')
except:
msg = _('Error: Site could not be added to Bookmarks.')
else:
msg = _('Error: Site could not be added to Bookmarks.')
next = request.POST.get('path')
else:
msg = _('Error: Site could not be added to Bookmarks.')
next = ADMIN_URL
# MESSAGE & REDIRECT
request.user.message_set.create(message=msg)
return HttpResponseRedirect(next)
add_bookmark = staff_member_required(add_bookmark)
def remove_bookmark(request):
"""
Remove Site from Bookmarks.
"""
if request.GET:
if request.GET.get('path'):
next = request.GET.get('path')
try:
bookmarkitem = BookmarkItem.objects.get(bookmark__user=request.user, link=request.GET.get('path'))
bookmarkitem.delete()
msg = _('Site was removed from Bookmarks.')
except BookmarkItem.DoesNotExist:
msg = _('Error: Site could not be removed from Bookmarks.')
else:
msg = _('Error: Site could not be removed from Bookmarks.')
next = ADMIN_URL
else:
msg = _('Error: Site could not be removed from Bookmarks.')
# MESSAGE & REDIRECT
request.user.message_set.create(message=msg)
return HttpResponseRedirect(next)
remove_bookmark = staff_member_required(remove_bookmark)
def get_bookmark(request):
"""
Get Bookmarks for the currently logged-in User (AJAX request).
"""
if request.method == 'GET':
if request.GET.get('path'):
object_list = BookmarkItem.objects.filter(bookmark__user=request.user).order_by('order')
try:
bookmark = Bookmark.objects.get(user=request.user)
except Bookmark.DoesNotExist:
bookmark = Bookmark(user=request.user)
bookmark.save()
try:
BookmarkItem.objects.get(bookmark__user=request.user, link=request.GET.get('path'))
is_bookmark = True
except BookmarkItem.DoesNotExist:
is_bookmark = False
else:
object_list = ""
is_bookmark = ""
else:
object_list = ""
is_bookmark = ""
return render_to_response('admin/includes_grappelli/bookmarks.html', {
'object_list': object_list,
'bookmark': bookmark,
'is_bookmark': is_bookmark,
'admin_title': ADMIN_TITLE,
'path': request.GET.get('path', ''),
})
get_bookmark = staff_member_required(get_bookmark)
| StarcoderdataPython |
9605754 | <reponame>mohibeyki/remoteAPI<filename>remoteAPI/exceptions.py
#!/usr/bin/env python3
from rest_framework import status
class ServiceError(Exception):
"""
Base class for microservice errors
Typically a Http response is generated from this.
"""
def __init__(self, type, message, suggested_http_status=None):
super().__init__(message)
self.type = type
self.message = message
self.suggested_http_status = suggested_http_status
class BadRequestError(ServiceError):
"""
Is raised when an invalid request comes from client
"""
def __init__(self, type, message, suggested_http_status=None):
super().__init__(type, message, suggested_http_status or status.HTTP_400_BAD_REQUEST)
class NotFoundError(ServiceError):
"""
Is raised when a requested entity does not exist
"""
def __init__(self, type, message):
super().__init__(type, message, status.HTTP_404_NOT_FOUND)
class ServerError(ServiceError):
"""
Is raised when an internal server error occurs
"""
def __init__(self,
type='server_error',
message='Unknown error; please try again later',
suggested_http_status=None):
super().__init__(type, message, suggested_http_status or status.HTTP_500_INTERNAL_SERVER_ERROR)
class ApiCallError(ServiceError):
"""
Is raised when a valid (expected) error status is returned from a remote API call.
"""
def __init__(self, type, message, status):
super().__init__(type, message, status)
| StarcoderdataPython |
389815 | <filename>DD/IP/TEMPLATES/Session 3/propContours.py
############################################
## PROJECT CELL
## Image Processing Workshop
############################################
## Import OpenCV
import numpy
import cv2
############################################
## Read the image
img = cv2.imread('map.png')
## Do the processing
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,170,255,0)
##find the contours
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
##select any contour
#i =
##find the co-ordinates of centroid of the contour
#M = cv2.moments(contours[i])
#cx = int(M['m10']/M['m00'])
#cy = int(M['m01']/M['m00'])
##find the area of the contour
cv2.drawContours(img,contours,i,(0,0,255),3)
cv2.imshow('contours',img)
############################################
############################################
## Close and exit
cv2.waitKey(0)
cv2.destroyAllWindows()
############################################
| StarcoderdataPython |
6544032 | import pandas as pd
import numpy as np
import altair as alt
import streamlit as st
import sys, argparse, logging
import json
def spell(spell_inputs):
mana = spell_inputs
x_col = st.selectbox("Select x axis for line chart", mana.columns)
xcol_string = x_col + ":O"
if st.checkbox("Show as continuous?", key="line_chart_x_is_cont"):
xcol_string = x_col + ":Q"
y_col = st.selectbox("Select y axis for line chart", mana.columns)
z_col = st.selectbox("Select z axis for line chart", mana.columns)
if st.checkbox("Show chart?", key="line_chart_show"):
chart = (
alt.Chart(mana)
.mark_line(point=True)
.encode(x=xcol_string, y=y_col, color=z_col, tooltip=list(mana.columns))
.interactive()
.properties(title="Line Chart for " + x_col + "," + y_col)
.configure_title(fontSize=20,)
.configure_axis(labelFontSize=20, titleFontSize=20)
.configure_legend(labelFontSize=20, titleFontSize=20)
)
st.altair_chart(chart, use_container_width=True)
return None, mana
| StarcoderdataPython |
1819465 | #!/usr/bin/env Python3
'''
TypeLoader backend functionality
'''
| StarcoderdataPython |
3460864 | <reponame>danmar3/twodlearn<gh_stars>0
# ***********************************************************************
# General purpose optimizer
#
# Wrote by: <NAME> (<EMAIL>)
# Modern Heuristics Research Group (MHRG)
# Virginia Commonwealth University (VCU), Richmond, VA
# http://www.people.vcu.edu/~mmanic/
#
# ***********************************************************************
from __future__ import division
from __future__ import print_function
import os
try:
import queue
except ImportError:
import Queue as queue
import shutil
import warnings
import threading
import collections
import numpy as np
from time import time
import tensorflow as tf
import twodlearn as tdl
from twodlearn import monitoring
from tqdm import tqdm
try:
from types import SimpleNamespace
except ImportError:
from argparse import Namespace as SimpleNamespace
class DataFeeder:
def __init__(self, feed_train, feed_valid=None):
self.train_feeder = feed_train
if feed_valid is None:
self.valid_feeder = None
else:
self.valid_feeder = feed_valid
def stop(self):
# self.train_feeder.stop()
# if self.valid_feeder is not None:
# self.valid_feeder.stop()
return
def __del__(self):
self.stop()
def feed_train(self):
return self.train_feeder()
def feed_valid(self):
return self.valid_feeder()
class ConstantLr(object):
def __init__(self, value):
self.placeholder = tf.placeholder(tf.float32)
self.value = value
def __call__(self, step, global_step):
return self.value
class OptimizationManager:
''' Performs a standard mini-batch training with validation evaluation '''
def _init_options(self, options):
default = {'progress/window_size': 50,
'progress/reset_multiplier': 10,
'progress/max_trials': 20}
options = tdl.core.check_defaults(options, default)
return options
def __init__(self, session, optimizer=None, step_op=None, monitor_manager=None,
n_logging=100, saver=None, options=None, optimizer_op=None):
self.session = session
self.optimizer = optimizer
self.step_op = step_op
if optimizer_op is not None:
warnings.warn('optimizer_op is deprecated, specify optimizer and '
'step_op instead')
self.optimizer = None
self.step_op = optimizer_op
self.monitor_manager = monitor_manager
self.n_logging = n_logging
self.n_steps = 0
self.saver = saver
self.options = self._init_options(options)
def check_progress(self, step, xp):
"""Check if progress was made in the last call to the optimizer
Args:
step (int): current optimizer step.
xp (list): list of outputs from the training monitors.
Returns:
bool: variables were reset.
"""
if (self.monitor_manager is None) or (self.saver is None):
return False
if len(self.monitor_manager.train.monitors) == 1:
monitor = self.monitor_manager.train.monitors[0]
xp = xp[0]
else:
# TODO: add a way to specify which monitor is measuring performance
# of the optimization process
return False
if ((self.options['progress/window_size'] < step) and
(monitor.min is not np.inf) and
(len(self.saver.checkpoints) > 1)):
mean = monitor.mean(self.options['progress/window_size'])
if (self.options['progress/reset_multiplier']*(mean - monitor.min)
< (xp - monitor.min)):
print('Optimizer seems to have diverged from previous '
'sub-optimal region ({}). Resetting...'
''.format(xp))
self.saver.restore()
return True
return False
def check_nan(self, step, xp):
"""Check if the result from the optimizer includes Nan values.
Args:
step (int): current step of the optimizer.
xp (list): list of outputs from the optimizer
Returns:
bool: True if variables were reset.
"""
if any([np.isnan(oi).any() for oi in xp
if oi is not None]):
if self.saver is None:
raise ValueError(
'Optimization returned NaN at step {}.'
'No checkpoint saver to restore state.'.format(step))
else:
print('Optimization returned NaN at step {}.'
'Restoring last checkpoint'.format(step))
self.saver.restore()
return True
return False
def run_step(self, step, ops, feed_dict):
"""Run a step of the optimizer.
Args:
step (type): Description of parameter `step`.
ops (type): Description of parameter `ops`.
feed_dict (type): Description of parameter `feed_dict`.
Returns:
type: Description of returned object.
"""
step_op, train_ops, monitor_ops = ops
n_trials = 0
while True:
out = self.session.run([step_op] + train_ops + monitor_ops,
feed_dict=feed_dict)
# check number of trials
n_trials += 1
if n_trials > self.options['progress/max_trials']:
return out
# Check for NaN
if self.check_nan(step, xp=out):
continue
# check for progress
if self.check_progress(step=step, xp=out[1:1 + len(train_ops)]):
continue
break
return out
def run(self,
n_train_steps, feed_train=None,
n_valid_steps=1, valid_eval_freq=1, feed_valid=None,
monitor_training=True):
if feed_train is None:
def feed_train(): return None
if feed_valid is None:
def feed_valid(): return None
data_feeder = DataFeeder(feed_train, feed_valid)
if self.monitor_manager:
train_monitors = self.monitor_manager.train.tf_monitors
train_ops = [m.op for m in train_monitors]
valid_monitors = self.monitor_manager.valid.tf_monitors
valid_ops = [m.op for m in valid_monitors]
else:
train_monitors = []
train_ops = []
valid_monitors = []
valid_ops = []
if monitor_training and self.monitor_manager:
monitor_monitors = self.monitor_manager.monitoring.tf_monitors
monitor_ops = [m.op for m in monitor_monitors]
else:
monitor_monitors = []
monitor_ops = []
# safer function
if self.saver is not None:
self.saver.reset()
# run optimizer
try:
for step in range(1, n_train_steps):
# Run optimization step
out = self.run_step(
step=step,
ops=(self.step_op, train_ops, monitor_ops),
feed_dict=data_feeder.feed_train())
self.n_steps += 1
# feed data to monitors
if train_ops:
train_output = out[1:1 + len(train_ops)]
for i, monitor in enumerate(train_monitors):
monitor.feed(train_output[i], self.n_steps)
if monitor_ops:
monitor_output = out[1 + len(train_ops):]
for i, monitor in enumerate(monitor_monitors):
monitor.feed(monitor_output[i], self.n_steps)
# file loggers
self.monitor_manager.train.write_data()
self.monitor_manager.monitoring.write_data()
# run validation evaluation
if valid_ops and (step % valid_eval_freq == 0):
for step_valid in range(0, n_valid_steps):
valid_output = self.session.run(
valid_ops,
feed_dict=data_feeder.feed_valid())
for i, monitor in enumerate(valid_monitors):
monitor.feed(valid_output[i], self.n_steps)
# file loggers
self.monitor_manager.valid.write_data()
# saver function
if (self.saver is not None):
self.saver.add_checkpoint(step)
# log
if (step % self.n_logging == 0) and self.monitor_manager:
# print information
train_info = [(m.name, m.mean()) for m in train_monitors]
valid_info = [(m.name, m.mean()) for m in valid_monitors]
# log information in files
# self.monitor_manager.train.write_stats()
# self.monitor_manager.valid.write_stats()
# self.monitor_manager.monitoring.write_stats()
print("{} | {} | {}".format(step, train_info, valid_info))
finally:
# clean up
data_feeder.stop()
self.monitor_manager.flush()
if self.saver is not None:
self.saver.restore()
self.saver.save()
class Optimizer(tdl.core.TdlModel):
_submodels = ['learning_rate', 'monitor_manager', 'optimizer', 'saver']
def _init_options(self, options):
default = {'progress/window_size': 50,
'progress/reset_multiplier': 10,
'progress/max_trials': 20}
options = tdl.core.check_defaults(options, default)
return options
@tdl.core.InputArgument
def session(self, value):
return (value if value is not None
else tf.get_default_session()
if tf.get_default_session() is not None
else tf.InteractiveSession())
@tdl.core.InputArgument
def log_folder(self, value):
if value is None:
if tdl.core.is_property_set(self, 'monitor_manager'):
value = self.monitor_manager.log_folder
else:
value = 'tmp/monitors/'
return value
def _monitor_from_dict(self, value):
train = (value if 'train' not in value
else value['train'] if isinstance(value['train'], dict)
else {'train': value['train']})
valid = (None if 'valid' not in value
else value['valid'] if isinstance(value['value'], dict)
else {'valid': value['valid']})
monitor = (None if 'monitoring' not in value
else value['monitoring']
if isinstance(value['monitoring'], dict)
else {'monitoring': value['monitoring']})
return monitoring.SimpleTrainingMonitor(
train_vars=train, valid_vars=valid, monitoring_vars=monitor,
log_folder=self.log_folder)
@tdl.core.Submodel
def monitor_manager(self, value):
tdl.core.assert_initialized_if_available(
self, 'monitor_manager', ['log_folder'])
if value is None:
value = {'train': {'train/loss': self.loss}}
monitor_manager = (self._monitor_from_dict(value)
if isinstance(value, dict)
else value)
loss_monitor = filter(
lambda monitor: (tf.convert_to_tensor(monitor.op) ==
tf.convert_to_tensor(self.loss)),
monitor_manager.train.monitors)
if not loss_monitor:
monitor_manager.train.add_monitor(
monitoring.OpMonitor(self.loss, name=self.loss.name))
return monitor_manager
@tdl.core.LazzyProperty
def loss_monitor(self):
return list(filter(lambda monitor: monitor.op == self.loss,
self.monitor_manager.train.monitors))[0]
@tdl.core.Submodel
def learning_rate(self, value):
if value is None:
return ConstantLr(0.02)
else:
return value
@tdl.core.Submodel
def optimizer(self, value):
if value is None:
Optimizer = tf.train.AdamOptimizer
elif callable(value):
Optimizer = value
else:
return value
if hasattr(self.learning_rate, 'placeholder'):
optimizer = Optimizer(learning_rate=self.learning_rate.placeholder)
else:
optimizer = Optimizer(learning_rate=self.learning_rate)
return optimizer
@tdl.core.Submodel
def step_op(self, _):
step_op = self.optimizer.minimize(tf.convert_to_tensor(self.loss),
var_list=self.var_list)
self.reset()
return step_op
@property
def var_optim(self):
'''Variables created by the optimizer'''
vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=self.scope)
for var in self.var_list:
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=var.name.split(':')[0])
vars += [vi for vi in var_list
if vi is not var]
return vars
def reset(self):
'''Reset optimizer variables (var_optim)'''
list(map(lambda x: x.initializer.run(), self.var_optim))
@tdl.core.Submodel
def saver(self, value):
tdl.core.assert_initialized(self, 'saver', ['monitor_manager'])
if value != 'default':
return value
if self.monitor_manager is None:
return None
monitor = (self.monitor_manager.valid.monitors[0]
if self.monitor_manager.valid.monitors
else self.monitor_manager.train.monitors[0])
return EarlyStopping(
monitor=monitor,
var_list=self.var_list,
logger_path=self.monitor_manager.log_folder,
session=self.session)
def __init__(self, loss, var_list, session=None, metrics=None,
n_logging=100, log_folder=None, options=None,
**kargs):
self.loss = tf.convert_to_tensor(loss)
self.var_list = (var_list if var_list is not None
else tf.trainable_variables())
self.n_logging = n_logging
self.n_steps = 0
if 'saver' not in kargs:
kargs['saver'] = 'default'
if metrics is not None and 'monitor_manager' in kargs:
raise ValueError('cannot specify metrics and monitor_manager '
'at the same time')
metrics = (kargs['monitor_manager'] if 'monitor_manager' in kargs
else metrics)
kargs = {key: val for key, val in kargs.items()
if key is not 'monitor_manager'}
if log_folder is not None:
kargs['log_folder'] = log_folder
super(Optimizer, self).__init__(session=session,
monitor_manager=metrics,
options=options, **kargs)
def feed_train(self):
return dict()
def check_progress(self, step, xp):
"""Check if progress was made in the last call to the optimizer
Args:
step (int): current optimizer step.
xp (list): list of outputs from the training monitors.
Returns:
bool: variables were reset.
"""
if (self.monitor_manager is None) or (self.saver is None):
return False
monitor = self.loss_monitor
xp = xp[self.loss]
if ((self.options['progress/window_size'] < step) and
(monitor.min is not np.inf) and self.saver.checkpoints):
mean = monitor.mean(self.options['progress/window_size'])
if (self.options['progress/reset_multiplier']*(mean - monitor.min)
< (xp - monitor.min)):
print('Optimizer seems to have diverged from previous '
'sub-optimal region ({}). Resetting...'
''.format(xp))
self.saver.restore()
return True
return False
def check_nan(self, step, xp):
"""Check if the result from the optimizer includes Nan values.
Args:
step (int): current step of the optimizer.
xp (list): list of outputs from the optimizer
Returns:
bool: True if variables were reset.
"""
if any([np.isnan(oi).any() for oi in xp
if oi is not None]):
if self.saver is None:
print('Optimization returned NaN at step {}.'
'Re-initializing variables'.format(step))
self.session.run([v.initializer for v in self.var_list])
else:
print('Optimization returned NaN at step {}.'
'Restoring last checkpoint'.format(step))
if self.saver.checkpoints:
self.saver.restore()
else:
self.session.run([v.initializer for v in self.var_list])
return True
return False
def run_step(self, step, ops, feed_dict):
"""Run a step of the optimizer.
Args:
step (type): Description of parameter `step`.
ops (type): Description of parameter `ops`.
feed_dict (type): Description of parameter `feed_dict`.
Returns:
type: Description of returned object.
"""
if isinstance(self.learning_rate, ConstantLr):
feed_dict[self.learning_rate.placeholder] = \
self.learning_rate(step, self.n_steps)
n_trials = 0
while True:
output = self.session.run(ops, feed_dict=feed_dict)
output = {op: output[idx] for idx, op in enumerate(ops)}
# check number of trials
n_trials += 1
if n_trials > self.options['progress/max_trials']:
self.session.run([v.initializer for v in self.var_list])
output = self.session.run(ops, feed_dict=feed_dict)
output = {op: output[idx] for idx, op in enumerate(ops)}
return output
# Check for NaN
if self.check_nan(step, xp=output.values()):
continue
# check for progress
if self.check_progress(step=step, xp=output):
continue
break
return output
def run(self,
n_train_steps, feed_train=None,
n_valid_steps=1, valid_eval_freq=1, feed_valid=None,
monitor_training=True):
if feed_train is None:
def feed_train(): return dict()
if feed_valid is None:
def feed_valid(): return dict()
data_feeder = DataFeeder(feed_train, feed_valid)
if self.monitor_manager:
train_monitors = self.monitor_manager.train.tf_monitors
train_ops = [m.op for m in train_monitors]
valid_monitors = self.monitor_manager.valid.tf_monitors
valid_ops = [m.op for m in valid_monitors]
else:
train_monitors = []
train_ops = []
valid_monitors = []
valid_ops = []
if monitor_training and self.monitor_manager:
monitor_monitors = self.monitor_manager.monitoring.tf_monitors
monitor_ops = [m.op for m in monitor_monitors]
else:
monitor_monitors = []
monitor_ops = []
# saver function
if self.saver is not None:
self.saver.reset()
# run optimizer
try:
for step in tqdm(range(1, n_train_steps)):
# Run optimization step
xp = self.run_step(
step=step,
ops=[self.step_op] + train_ops + monitor_ops,
feed_dict=data_feeder.feed_train())
self.n_steps += 1
# feed data to monitors
if train_ops:
for i, monitor in enumerate(train_monitors):
monitor.feed(xp[monitor.op], self.n_steps)
if monitor_ops:
for i, monitor in enumerate(monitor_monitors):
monitor.feed(xp[monitor.op], self.n_steps)
# file loggers
self.monitor_manager.train.write_data()
self.monitor_manager.monitoring.write_data()
# run validation evaluation
if valid_ops and (step % valid_eval_freq == 0):
for step_valid in range(0, n_valid_steps):
valid_output = self.session.run(
valid_ops,
feed_dict=data_feeder.feed_valid())
for i, monitor in enumerate(valid_monitors):
monitor.feed(valid_output[i], self.n_steps)
# file loggers
self.monitor_manager.valid.write_data()
# saver function
if (self.saver is not None):
self.saver.add_checkpoint(step)
# log
if (step % self.n_logging == 0) and self.monitor_manager:
# print information
train_info = [(m.name, m.mean()) for m in train_monitors]
valid_info = [(m.name, m.mean()) for m in valid_monitors]
# log information in files
print("{} | {} | {}".format(step, train_info, valid_info))
finally:
# clean up
data_feeder.stop()
self.monitor_manager.flush()
if self.saver is not None:
if self.saver.checkpoints:
self.saver.restore()
self.saver.save()
class SimpleSaver(tdl.core.TdlObject):
@property
def checkpoints(self):
return None
def __init__(self, var_list, logger_path, session):
self.session = session
self.var_list = var_list
super(SimpleSaver, self).__init__(save={'logger_path': logger_path})
@tdl.core.EncapsulatedMethod
def save(self, locals, value):
self._saver = tf.train.Saver(var_list=self.var_list)
self._saver_id = 0
self._logger_path = os.path.join(value['logger_path'], 'optimizer')
if os.path.exists(self._logger_path):
shutil.rmtree(self._logger_path)
os.makedirs(self._logger_path)
@save.eval
def save(self, locals):
print('saving weights in {}'.format(self._logger_path))
saver_path = os.path.join(self._logger_path, 'var_checkpoint')
self._saver.save(
sess=self.session,
save_path=saver_path,
global_step=self._saver_id)
self._saver_id += 1
def add_checkpoint(self, step):
return
def reset(self):
return
def restore_file(self):
saver_path = os.path.join(self._logger_path,
'var_checkpoint-{}'.format(self._saver_id-1))
self._saver.restore(self.session, saver_path)
class EarlyStoppingV2(tdl.core.TdlObject):
@property
def checkpoints(self):
return self._checkpoints
@property
def optimizer(self):
return self._optimizer
@property
def session(self):
return self.optimizer.session
@tdl.core.Submodel
def objective(self, value):
if isinstance(value, monitoring.TrainingMonitor):
return value
else:
valid_match = filter(lambda x: x.op == value,
self.optimizer.monitor_manager.valid.monitors)
if valid_match:
return valid_match[0]
else:
raise ValueError('{} not found in set of valid monitors.'
''.format(value))
@tdl.core.EncapsulatedMethod
def restore(self, local, value):
local.placeholders = {var: tf.placeholder(tf.float32)
for var in self.optimizer.var_list}
assign_vars = [var.assign(local.placeholders[var])
for var in self.optimizer.var_list]
local.assign_vars = tf.group(assign_vars)
@restore.eval
def restore(self, local):
ckpt = self.checkpoints[-1]
feed_dict = {local.placeholders[var]: ckpt[var]
for var in self.optimizer.var_list}
self.session.run(local.assign_vars, feed_dict=feed_dict)
def reset(self):
self.check_progress.local.best_value = np.nan
def __init__(self, optimizer, objective, minimize=True):
self._optimizer = optimizer
super(type(self), self).__init__(objective=objective)
def __bool__(self):
return len(self.checkpoints) > 0
class EarlyStopping(tdl.core.TdlObject):
@property
def checkpoints(self):
return self._ckpts
def _init_options(self, options):
default = {'start_steps': 300,
'ckpts_dt': 5.0,
'window_size': 50}
options = tdl.core.check_defaults(options, default)
return options
def __init__(self, monitor, var_list, logger_path,
session, check_func=None, options=None):
self.monitor = monitor
self.session = session
self.var_list = var_list
self.options = self._init_options(options)
super(EarlyStopping, self).__init__(save={'logger_path': logger_path})
if check_func is not None:
raise NotImplementedError('Custom check_func not yet implemented.'
'Use None for the moment.')
self.check_func = (check_func if check_func is not None
else self.check_lower)
def check_progress(self, step, monitor):
if ((self.options['window_size'] < step) and
(monitor.min is not np.inf) and
(len(self._ckpts) > 1)):
mean = monitor.mean(self.options['window_size'])
current_value = monitor.current_value
if 10*(mean - monitor.min) < (current_value - monitor.min):
# pdb.set_trace()
print('Optimizer seems to have diverged from previous '
'sub-optimal region ({}). Resetting...'
''.format(current_value))
self.restore()
@tdl.core.EncapsulatedMethod
def check_lower(self, local, value):
local.time_last_ckpt = time()
local.best_value = np.nan
@check_lower.eval
def check_lower(self, local, step):
current_value = self.monitor.mean(self.options['window_size'])
save = (True if local.best_value is np.nan
else current_value < local.best_value)
save = ((time() - local.time_last_ckpt) > self.options['ckpts_dt']
and (step > self.options['start_steps'])
and save)
if save:
print(np.abs(local.best_value - current_value) /
(self.monitor.max - self.monitor.min))
local.time_last_ckpt = time()
local.best_value = current_value
return save
def check_greather(self, local):
current_value = self.monitor.mean(self.options['window_size'])
save = (True if local.best_value is np.nan
else local.best_value < current_value)
save = ((time() - local.time_last_ckpt) > self.options['ckpts_dt']
and save)
if save:
print(np.abs(local.best_value - current_value) /
(self.monitor.max - self.monitor.min))
local.time_last_ckpt = time()
local.best_value = current_value
return save
@tdl.core.EncapsulatedMethod
def add_checkpoint(self, local, value):
self._ckpts = collections.deque(maxlen=10)
@add_checkpoint.eval
def add_checkpoint(self, local, step):
if self.check_func(step):
print('checkpoint created')
values = self.session.run(self.var_list)
vars = {var: value for var, value in zip(self.var_list, values)}
self._ckpts.append(vars)
@tdl.core.EncapsulatedMethod
def restore(self, local, value):
local.placeholders = {var: tf.placeholder(tf.float32)
for var in self.var_list}
set_vars = [var.assign(local.placeholders[var])
for var in self.var_list]
local.set_vars = tf.group(set_vars)
@restore.eval
def restore(self, local):
ckpt = self._ckpts[-1]
feed_dict = {local.placeholders[var]: ckpt[var]
for var in self.var_list}
self.session.run(local.set_vars, feed_dict=feed_dict)
def reset(self):
self.check_lower.local.best_value = np.nan
@tdl.core.EncapsulatedMethod
def save(self, locals, value):
self._saver = tf.train.Saver(var_list=self.var_list)
self._saver_id = 0
self._logger_path = os.path.join(value['logger_path'], 'optimizer')
if os.path.exists(self._logger_path):
shutil.rmtree(self._logger_path)
os.makedirs(self._logger_path)
self._save_time = time()
@save.eval
def save(self, locals):
print('saving weights in {}'.format(self._logger_path))
saver_path = os.path.join(self._logger_path, 'var_checkpoint')
self._saver.save(
sess=self.session,
save_path=saver_path,
global_step=self._saver_id)
self._saver_id += 1
def restore_file(self):
saver_path = os.path.join(self._logger_path,
'var_checkpoint-{}'.format(self._saver_id-1))
self._saver.restore(self.session, saver_path)
| StarcoderdataPython |
178866 | def _longest_common_subsequence(s1: str, s2: str) -> int:
"""
Let m and n be the lengths of two strings.
Build L[m+1][n+1] from the bottom up.
Note: L[i][j] contains length of LCS of X[0..i-1] and Y[0..j-1]
Runtime: O(mn)
Space Complexity: O(mn)
"""
m, n = len(s1), len(s2)
L = [[0] * (n + 1) for i in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if s1[i - 1] == s2[j - 1]:
L[i][j] = L[i - 1][j - 1] + 1
else:
L[i][j] = max(L[i - 1][j], L[i][j - 1])
return L[m][n]
def longest_common_subsequence(s1: str, s2: str) -> int:
"""
Space-optimized version of LCS.
Let m and n be the lengths of two strings.
Runtime: O(mn)
Space Complexity: O(min(m, n))
"""
m, n = len(s1), len(s2)
if m < n:
s1, s2 = s2, s1
L = [0] * (n + 1)
for a in s1:
prev_row, prev_row_col = 0, 0
for j, b in enumerate(s2):
prev_row, prev_row_col = L[j + 1], prev_row
if a == b:
L[j + 1] = prev_row_col + 1
else:
L[j + 1] = max(L[j], prev_row)
return L[-1]
| StarcoderdataPython |
1845176 | <gh_stars>0
import asterid as ad
def asterid_dm_to_dendropy_dm(D, ts):
pdm = dendropy.PhylogeneticDistanceMatrix()
pdm.taxon_namespace = dendropy.TaxonNamespace()
pdm._mapped_taxa = set()
for i in range(len(ts)):
for j in enumerate(ts):
si = ts[i]
sj = ts[j]
dij = D[i, j]
xi = pdm.taxon_namespace.get_taxon(si)
if not xi:
xi = dendropy.Taxon(si)
pdm.taxon_namespace.add_taxon(xi)
pdm._mapped_taxa.add(xi)
pdm._taxon_phylogenetic_distances[xi] = {}
xj = pdm.taxon_namespace.get_taxon(sj)
if not xj:
xj = dendropy.Taxon(sj)
pdm.taxon_namespace.add_taxon(xj)
pdm._mapped_taxa.add(xj)
pdm._taxon_phylogenetic_distances[xj] = {}
dij = float(dij)
pdm._taxon_phylogenetic_distances[xi][xj] = dij
return pdm
| StarcoderdataPython |
3254314 | <filename>cogs/misc.py
import datetime
import asyncio
import strawpy
import random
import re
import sys
import subprocess
from PythonGists import PythonGists
from appuselfbot import bot_prefix
from discord.ext import commands
from cogs.utils.checks import *
'''Module for miscellaneous commands'''
class Misc:
def __init__(self, bot):
self.bot = bot
self.regionals = {'a': '\N{REGIONAL INDICATOR SYMBOL LETTER A}', 'b': '\N{REGIONAL INDICATOR SYMBOL LETTER B}', 'c': '\N{REGIONAL INDICATOR SYMBOL LETTER C}',
'd': '\N{REGIONAL INDICATOR SYMBOL LETTER D}', 'e': '\N{REGIONAL INDICATOR SYMBOL LETTER E}', 'f': '\N{REGIONAL INDICATOR SYMBOL LETTER F}',
'g': '\N{REGIONAL INDICATOR SYMBOL LETTER G}', 'h': '\N{REGIONAL INDICATOR SYMBOL LETTER H}', 'i': '\N{REGIONAL INDICATOR SYMBOL LETTER I}',
'j': '\N{REGIONAL INDICATOR SYMBOL LETTER J}', 'k': '\N{REGIONAL INDICATOR SYMBOL LETTER K}', 'l': '\N{REGIONAL INDICATOR SYMBOL LETTER L}',
'm': '\N{REGIONAL INDICATOR SYMBOL LETTER M}', 'n': '\N{REGIONAL INDICATOR SYMBOL LETTER N}', 'o': '\N{REGIONAL INDICATOR SYMBOL LETTER O}',
'p': '\N{REGIONAL INDICATOR SYMBOL LETTER P}', 'q': '\N{REGIONAL INDICATOR SYMBOL LETTER Q}', 'r': '\N{REGIONAL INDICATOR SYMBOL LETTER R}',
's': '\N{REGIONAL INDICATOR SYMBOL LETTER S}', 't': '\N{REGIONAL INDICATOR SYMBOL LETTER T}', 'u': '\N{REGIONAL INDICATOR SYMBOL LETTER U}',
'v': '\N{REGIONAL INDICATOR SYMBOL LETTER V}', 'w': '\N{REGIONAL INDICATOR SYMBOL LETTER W}', 'x': '\N{REGIONAL INDICATOR SYMBOL LETTER X}',
'y': '\N{REGIONAL INDICATOR SYMBOL LETTER Y}', 'z': '\N{REGIONAL INDICATOR SYMBOL LETTER Z}', '0': '0⃣', '1': '1⃣', '2': '2⃣', '3': '3⃣',
'4': '4⃣', '5': '5⃣', '6': '6⃣', '7': '7⃣', '8': '8⃣', '9': '9⃣'}
@commands.command(pass_context=True)
async def about(self, ctx):
"""Links to the bot's github page."""
if embed_perms(ctx.message) and ctx.message.content[7:] != 'short':
em = discord.Embed(color=0xad2929, title='\ud83e\udd16 Appu\'s Discord Selfbot', description='**Features:**\n- Custom commands/reactions\n- Save last x images in a channel to your computer\n- Keyword notifier\n'
'- Set/cycle your game status and your avatar\n- Google web and image search\n- MyAnimeList search\n- Spoiler tagging\n'
'- Server info commands\n- Quoting, calculator, creating polls, and much more')
em.add_field(name='\ud83d\udd17 Link to download', value='[Github link](https://github.com/appu1232/Discord-Selfbot/tree/master)')
em.add_field(name='\ud83c\udfa5Quick examples:', value='[Simple commands](http://i.imgur.com/3H9zpop.gif)')
em.set_footer(text='Made by appu1232#2569', icon_url='https://i.imgur.com/RHagTDg.png')
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
else:
await self.bot.send_message(ctx.message.channel, 'https://github.com/appu1232/Selfbot-for-Discord')
await self.bot.delete_message(ctx.message)
@commands.group(aliases=['status'], pass_context=True)
async def stats(self, ctx):
"""Bot stats."""
uptime = (datetime.datetime.now() - self.bot.uptime)
hours, rem = divmod(int(uptime.total_seconds()), 3600)
minutes, seconds = divmod(rem, 60)
days, hours = divmod(hours, 24)
if days:
time = '%s days, %s hours, %s minutes, and %s seconds' % (days, hours, minutes, seconds)
else:
time = '%s hours, %s minutes, and %s seconds' % (hours, minutes, seconds)
try:
game = self.bot.game
except:
game = 'None'
if embed_perms(ctx.message):
em = discord.Embed(title='Bot Stats', color=0x32441c)
em.add_field(name=u'\U0001F553 Uptime', value=time, inline=False)
em.add_field(name=u'\U0001F4E4 Messages sent', value=str(self.bot.icount))
em.add_field(name=u'\U0001F4E5 Messages recieved', value=str(self.bot.message_count))
em.add_field(name=u'\u2757 Mentions', value=str(self.bot.mention_count))
em.add_field(name=u'\u2694 Servers', value=str(len(self.bot.servers)))
em.add_field(name=u'\u270F Keywords logged', value=str(self.bot.keyword_log))
em.add_field(name=u'\U0001F3AE Game', value=game)
mem_usage = '{:.2f} MiB'.format(__import__('psutil').Process().memory_full_info().uss / 1024**2)
em.add_field(name=u'\U0001F4BE Memory usage:', value=mem_usage)
em.set_footer(text='Selfbot made by appu1232#2569')
try:
g = git.cmd.Git(working_dir=os.getcwd())
g.execute(["git", "fetch", "origin", "master"])
version = g.execute(["git", "rev-list", "--right-only", "--count", "master...origin/master"])
commits = g.execute(["git", "rev-list", "--max-count=%s" % version, "origin/master"])
if version == '0':
status = 'Up to date.'
else:
latest = g.execute(["git", "log", "--pretty=oneline", "--abbrev-commit", "--stat", "--pretty", "-%s" % version, "origin/master"])
gist_latest = PythonGists.Gist(description='Latest changes for the selfbot.', content=latest, name='latest.txt')
if version == '1':
status = 'Behind by 1 release. [Latest update.](%s)' % gist_latest
else:
status = '%s releases behind. [Latest updates.](%s)' % (version, gist_latest)
em.add_field(name=u'\U0001f4bb Update status:', value=status)
except:
raise
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
else:
msg = '**Bot Stats:** ```Uptime: %s\nMessages Sent: %s\nMessages Recieved: %s\nMentions: %s\nServers: %s\nKeywords logged: %s\nGame: %s```' % (time, str(self.bot.icount), str(self.bot.message_count), str(self.bot.mention_count), str(len(self.bot.servers)), str(self.bot.keyword_log), game)
await self.bot.send_message(ctx.message.channel, bot_prefix + msg)
await self.bot.delete_message(ctx.message)
# Embeds the message
@commands.command(pass_context=True)
async def embed(self, ctx):
"""Embed given text. Ex: Do >embed for more help"""
if ctx.message.content[6:].strip():
if embed_perms(ctx.message):
msg = ctx.message.content[6:].strip()
title = description = image = thumbnail = color = footer = author = None
embed_values = msg.split('|')
for i in embed_values:
if i.strip().lower().startswith('title='):
title = i.strip()[6:].strip()
elif i.strip().lower().startswith('description='):
description = i.strip()[12:].strip()
elif i.strip().lower().startswith('desc='):
description = i.strip()[5:].strip()
elif i.strip().lower().startswith('image='):
image = i.strip()[6:].strip()
elif i.strip().lower().startswith('thumbnail='):
thumbnail = i.strip()[10:].strip()
elif i.strip().lower().startswith('colour='):
color = i.strip()[7:].strip()
elif i.strip().lower().startswith('color='):
color = i.strip()[6:].strip()
elif i.strip().lower().startswith('footer='):
footer = i.strip()[7:].strip()
elif i.strip().lower().startswith('author='):
author = i.strip()[7:].strip()
if color:
if not color.startswith('0x'):
color = '0x' + color
if color:
em = discord.Embed(title=title, description=description, color=int(color, 16))
else:
em = discord.Embed(title=title, description=description)
for i in embed_values:
if i.strip().lower().startswith('field='):
field_inline = True
field = i.strip().lstrip('field=')
field_name, field_value = field.split('value=')
if 'inline=' in field_value:
field_value, field_inline = field_value.split('inline=')
if 'false' in field_inline.lower() or 'no' in field_inline.lower():
field_inline = False
field_name = field_name.strip().lstrip('name=')
em.add_field(name=field_name, value=field_value.strip(), inline=field_inline)
if author:
if 'icon=' in author:
text, icon = author.split('icon=')
em.set_author(name=text.strip()[5:], icon_url=icon)
else:
em.set_author(name=author)
if image:
em.set_image(url=image)
if thumbnail:
em.set_thumbnail(url=thumbnail)
if footer:
if 'icon=' in footer:
text, icon = footer.split('icon=')
em.set_footer(text=text.strip()[5:], icon_url=icon)
else:
em.set_footer(text=footer)
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'No embed permissions in this channel.')
else:
msg = '**How to use the >embed command:**\n**Example:** >embed title=test this | description=some words | color=3AB35E | field=name=test value=test\n\n**You do NOT need to specify every property, only the ones you want.**\n**All properties and the syntax:**\ntitle=words\ndescription=words\ncolor=hexvalue\nimage=url_to_image (must be https)\nthumbnail=url_to_image\nauthor=words **OR** author=name=words icon=url_to_image\nfooter=words **OR** footer=name=words icon=url_to_image\nfield=name=words value=words (you can add as many fields as you want)\n\n**NOTE:** After the command is sent, the bot will delete your message and replace it with the embed. Make sure you have it saved or else you\'ll have to type it all again if the embed isn\'t how you want it.'
await self.bot.send_message(ctx.message.channel, bot_prefix + msg)
await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True)
async def game(self, ctx):
"""Set playing status. Ex: >game napping >help game for more info
Your game status will not show for yourself, only other people can see it. This is a limitation of how the client works and how the api interacts with the client.
To set a rotating game status, do >game game1 | game2 | game3 | etc.
It will then prompt you with an interval in seconds to wait before changing the game and after that the order in which to change (in order or random)
Ex: >game with matches | sleeping | watching anime"""
if ctx.message.content[6:]:
game = str(ctx.message.clean_content[6:])
# Cycle games if more than one game is given.
if ' | ' in ctx.message.content[6:]:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Input interval in seconds to wait before changing to the next game (``n`` to cancel):')
def check(msg):
return msg.content.isdigit() or msg.content.lower().strip() == 'n'
def check2(msg):
return msg.content == 'random' or msg.content.lower().strip() == 'r' or msg.content.lower().strip() == 'order' or msg.content.lower().strip() == 'o'
reply = await self.bot.wait_for_message(author=ctx.message.author, check=check, timeout=60)
if not reply:
return
if reply.content.lower().strip() == 'n':
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Cancelled')
elif reply.content.strip().isdigit():
interval = int(reply.content.strip())
if interval >= 10:
self.bot.game_interval = interval
games = game.split(' | ')
if len(games) != 2:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Change game in order or randomly? Input ``o`` for order or ``r`` for random:')
s = await self.bot.wait_for_message(author=ctx.message.author, check=check2, timeout=60)
if not s:
return
if s.content.strip() == 'r' or s.content.strip() == 'random':
await self.bot.send_message(ctx.message.channel,
bot_prefix + 'Game set. Game will randomly change every ``%s`` seconds' % reply.content.strip())
loop_type = 'random'
else:
loop_type = 'ordered'
else:
loop_type = 'ordered'
if loop_type == 'ordered':
await self.bot.send_message(ctx.message.channel,
bot_prefix + 'Game set. Game will change every ``%s`` seconds' % reply.content.strip())
games = {'games': game.split(' | '), 'interval': interval, 'type': loop_type}
with open('settings/games.json', 'w') as g:
json.dump(games, g, indent=4)
self.bot.game = game.split(' | ')[0]
else:
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Cancelled. Interval is too short. Must be at least 10 seconds.')
# Set game if only one game is given.
else:
self.bot.game_interval = None
self.bot.game = game
games = {'games': str(self.bot.game), 'interval': '0', 'type': 'none'}
with open('settings/games.json', 'w') as g:
json.dump(games, g, indent=4)
await self.bot.change_presence(game=discord.Game(name=game))
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Game set as: ``Playing %s``' % ctx.message.content[6:])
# Remove game status.
else:
self.bot.game_interval = None
self.bot.game = None
await self.bot.change_presence(game=None)
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Set playing status off')
if os.path.isfile('settings/games.json'):
os.remove('settings/games.json')
@commands.group(aliases=['avatars'], pass_context=True)
async def avatar(self, ctx):
"""Rotate avatars. See README for more info."""
if ctx.invoked_subcommand is None:
with open('settings/avatars.json', 'r+') as a:
avi_config = json.load(a)
if avi_config['password'] == '':
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Cycling avatars requires you to input your password. Your password will not be sent anywhere and no one will have access to it. Enter your password with``>avatar password <password>`` Make sure you are in a private channel where no one can see!')
if avi_config['interval'] != '0':
self.bot.avatar = None
self.bot.avatar_interval = None
avi_config['interval'] = '0'
with open('settings/avatars.json', 'w') as avi:
json.dump(avi_config, avi, indent=4)
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Disabled cycling of avatars.')
else:
if os.listdir('avatars'):
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Enabled cycling of avatars. Input interval in seconds to wait before changing avatars (``n`` to cancel):')
def check(msg):
return msg.content.isdigit() or msg.content.lower().strip() == 'n'
def check2(msg):
return msg.content == 'random' or msg.content.lower().strip() == 'r' or msg.content.lower().strip() == 'order' or msg.content.lower().strip() == 'o'
interval = await self.bot.wait_for_message(author=ctx.message.author, check=check, timeout=60)
if not interval:
return
if interval.content.lower().strip() == 'n':
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Cancelled.')
elif int(interval.content) < 1800:
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Cancelled. Interval is too short. Must be at least 1800 seconds (30 minutes).')
else:
avi_config['interval'] = int(interval.content)
if len(os.listdir('avatars')) != 2:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Change avatars in order or randomly? Input ``o`` for order or ``r`` for random:')
cycle_type = await self.bot.wait_for_message(author=ctx.message.author, check=check2, timeout=60)
if not cycle_type:
return
if cycle_type.content.strip() == 'r' or cycle_type.content.strip() == 'random':
await self.bot.send_message(ctx.message.channel,
bot_prefix + 'Avatar cycling enabled. Avatar will randomly change every ``%s`` seconds' % interval.content.strip())
loop_type = 'random'
else:
loop_type = 'ordered'
else:
loop_type = 'ordered'
avi_config['type'] = loop_type
if loop_type == 'ordered':
await self.bot.send_message(ctx.message.channel,
bot_prefix + 'Avatar cycling enabled. Avatar will change every ``%s`` seconds' % interval.content.strip())
with open('settings/avatars.json', 'r+') as avi:
avi.seek(0)
avi.truncate()
json.dump(avi_config, avi, indent=4)
self.bot.avatar_interval = interval.content
self.bot.avatar = random.choice(os.listdir('avatars'))
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'No images found under ``avatars``. Please add images (.jpg .jpeg and .png types only) to that folder and try again.')
@avatar.command(aliases=['pass', 'pw'], pass_context=True)
async def password(self, ctx, *, msg):
"""Set your discord acc password to rotate avatars. See README for more info."""
with open('settings/avatars.json', 'r+') as a:
avi_config = json.load(a)
avi_config['password'] = msg.strip().strip('"').lstrip('<').rstrip('>')
a.seek(0)
a.truncate()
json.dump(avi_config, a, indent=4)
await self.bot.delete_message(ctx.message)
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Password set. Do ``>avatar`` to toggle cycling avatars.')
@commands.command(pass_context=True)
async def choose(self, ctx, *, choices: str):
"""Choose randomly from the options you give. >choose this | that"""
await self.bot.send_message(ctx.message.channel, bot_prefix + 'I choose: ``{}``'.format(random.choice(choices.split("|"))))
@commands.command(pass_context=True)
async def emoji(self, ctx, *, msg):
"""Get url of emoji (across any server). Ex: >emoji :smug:"""
url = None
exact_match = False
for server in self.bot.servers:
for emoji in server.emojis:
if msg.strip().lower() in str(emoji):
url = emoji.url
if msg.strip() == str(emoji).split(':')[1]:
url = emoji.url
exact_match = True
break
if exact_match:
break
if embed_perms(ctx.message) and url:
em = discord.Embed()
em.set_image(url=url)
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
elif not embed_perms(ctx.message) and url:
await self.bot.send_message(ctx.message.channel, url)
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Could not find emoji.')
return await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True)
async def ping(self, ctx):
"""Get response time."""
msgtime = ctx.message.timestamp.now()
await self.bot.send_message(ctx.message.channel, bot_prefix + ' pong')
now = datetime.datetime.now()
ping = now - msgtime
if embed_perms(ctx.message):
pong = discord.Embed(title='Response Time:', description=str(ping), color=0x7A0000)
pong.set_thumbnail(url='http://odysseedupixel.fr/wp-content/gallery/pong/pong.jpg')
await self.bot.send_message(ctx.message.channel, content=None, embed=pong)
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + '``Response Time: %s``' % str(ping))
@commands.command(pass_context=True)
async def quote(self, ctx, *, msg: str = None):
"""Quote the last message sent in the channel. >help quote for more info.
>quote - quotes the last message sent in the channel.
>quote <words> - tries to search for a message sent recently that contains the given words and quotes it.
>quote <message_id> - quotes the given message. (Enable developer mode to copy message ids)."""
result = None
if msg:
length = len(self.bot.all_log[ctx.message.channel.id + ' ' + ctx.message.server.id])
if length < 201:
size = length
else:
size = 200
for i in range(length-2, length-size, -1):
search = self.bot.all_log[ctx.message.channel.id + ' ' + ctx.message.server.id][i]
if ctx.message.clean_content[6:].lower().strip() in search[0].clean_content.lower() and (search[0].author != ctx.message.author or search[0].content[:7] != '>quote '):
result = [search[0], search[0].author, search[0].timestamp]
break
if ctx.message.clean_content[6:].strip() == search[0].id:
result = [search[0], search[0].author, search[0].timestamp]
break
else:
search = self.bot.all_log[ctx.message.channel.id + ' ' + ctx.message.server.id][-2]
result = [search[0], search[0].author, search[0].timestamp]
if result:
await self.bot.delete_message(ctx.message)
if embed_perms(ctx.message) and result[0].content:
em = discord.Embed(description=result[0].content, timestamp=result[2], color=0xbc0b0b)
em.set_author(name=result[1].name, icon_url=result[1].avatar_url)
await self.bot.send_message(ctx.message.channel, embed=em)
else:
await self.bot.send_message(ctx.message.channel, '%s - %s```%s```' % (result[1].name, result[2], result[0].content))
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'No quote found.')
await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True)
async def poll(self, ctx, *, msg):
"""Create a strawpoll. Ex: >poll Favorite color = Blue | Red | Green"""
try:
options = [op.strip() for op in msg.split('|')]
if '=' in options[0]:
title, options[0] = options[0].split('=')
options[0] = options[0].strip()
else:
title = 'Poll by %s' % ctx.message.author.name
except:
return await self.bot.send_message(ctx.message.channel, bot_prefix + 'Invalid Syntax. Example use: ``>poll Favorite color = Blue | Red | Green | Purple``')
poll = strawpy.create_poll(title.strip(), options)
await self.bot.send_message(ctx.message.channel, bot_prefix + poll.url)
@commands.command(pass_context=True)
async def calc(self, ctx, *, msg):
"""Simple calculator. Ex: >calc 2+2"""
equation = msg.strip().replace('^', '**')
if '=' in equation:
left = eval(equation.split('=')[0])
right = eval(equation.split('=')[1])
answer = str(left == right)
else:
answer = str(eval(equation))
if embed_perms(ctx.message):
em = discord.Embed(color=0xD3D3D3, title='Calculator')
em.add_field(name='Input:', value=msg.replace('**', '^'), inline=False)
em.add_field(name='Output:', value=answer, inline=False)
await self.bot.send_message(ctx.message.channel, content=None, embed=em)
await self.bot.delete_message(ctx.message)
else:
await self.bot.send_message(ctx.message.channel, bot_prefix + answer)
@commands.command(pass_context=True)
async def l2g(self, ctx, *, msg: str):
"""Creates a googleitfor.me link. Ex: >l2g how do i become cool."""
lmgtfy = 'http://googleitfor.me/?q='
words = msg.lower().strip().split(' ')
for word in words:
lmgtfy += word + '+'
await self.bot.send_message(ctx.message.channel, bot_prefix + lmgtfy[:-1])
await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True)
async def d(self, ctx):
"""Deletes the last message sent or n messages sent. Ex: >d 5"""
# If number of seconds/messages are specified
if len(ctx.message.content.lower().strip()) > 2:
if ctx.message.content[3] == '!':
killmsg = self.bot.self_log[ctx.message.channel.id][len(self.bot.self_log[ctx.message.channel.id]) - 2]
timer = int(ctx.message.content[4:].lower().strip())
# Animated countdown because screw rate limit amirite
destroy = await self.bot.edit_message(ctx.message, bot_prefix + 'The above message will self-destruct in:')
msg = await self.bot.send_message(ctx.message.channel, '``%s |``' % timer)
for i in range(0, timer, 4):
if timer - 1 - i == 0:
await self.bot.delete_message(destroy)
msg = await self.bot.edit_message(msg, '``0``')
break
else:
msg = await self.bot.edit_message(msg, '``%s |``' % int(timer - 1 - i))
await asyncio.sleep(1)
if timer - 1 - i != 0:
if timer - 2 - i == 0:
await self.bot.delete_message(destroy)
msg = await self.bot.edit_message(msg, '``0``')
break
else:
msg = await self.bot.edit_message(msg, '``%s /``' % int(timer - 2 - i))
await asyncio.sleep(1)
if timer - 2 - i != 0:
if timer - 3 - i == 0:
await self.bot.delete_message(destroy)
msg = await self.bot.edit_message(msg, '``0``')
break
else:
msg = await self.bot.edit_message(msg, '``%s -``' % int(timer - 3 - i))
await asyncio.sleep(1)
if timer - 3 - i != 0:
if timer - 4 - i == 0:
await self.bot.delete_message(destroy)
msg = await self.bot.edit_message(msg, '``0``')
break
else:
msg = await self.bot.edit_message(msg, '``%s \ ``' % int(timer - 4 - i))
await asyncio.sleep(1)
await self.bot.edit_message(msg, ':bomb:')
await asyncio.sleep(.5)
await self.bot.edit_message(msg, ':fire:')
await self.bot.edit_message(killmsg, ':fire:')
await asyncio.sleep(.5)
await self.bot.delete_message(msg)
await self.bot.delete_message(killmsg)
else:
await self.bot.delete_message(self.bot.self_log[ctx.message.channel.id].pop())
for i in range(0, int(ctx.message.content[3:])):
try:
await self.bot.delete_message(self.bot.self_log[ctx.message.channel.id].pop())
except:
pass
# If no number specified, delete message immediately
else:
await self.bot.delete_message(self.bot.self_log[ctx.message.channel.id].pop())
await self.bot.delete_message(self.bot.self_log[ctx.message.channel.id].pop())
@commands.command(pass_context=True)
async def spoiler(self, ctx, *, msg : str):
"""Spoiler tag. Ex: >spoiler Some book | They get married."""
try:
if " | " in msg:
spoiled_work, spoiler = msg.lower().split(" | ", 1)
else:
spoiled_work, _, spoiler = msg.lower().partition(" ")
await self.bot.edit_message(ctx.message, bot_prefix + 'Spoiler for `' + spoiled_work + '`: \n`'
+ ''.join(map(lambda c: chr(ord('a') + (((ord(c) - ord('a')) + 13) % 26)) if c >= 'a' and c <= 'z' else c, spoiler))
+ '`\n' + bot_prefix + 'Use http://rot13.com to decode')
except:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Could not encrypt spoiler.')
@commands.group(pass_context=True)
async def gist(self, ctx):
"""Posts to gist"""
if ctx.invoked_subcommand is None:
url = PythonGists.Gist(description='Created in channel: {} in server: {}'.format(ctx.message.channel, ctx.message.server), content=ctx.message.content[6:], name='Output')
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Gist output: ' + url)
await self.bot.delete_message(ctx.message)
@gist.command(pass_context=True)
async def file(self, ctx, *, msg):
"""Create gist of file"""
try:
with open(msg) as fp:
output = fp.read()
url = PythonGists.Gist(description='Created in channel: {} in server: {}'.format(ctx.message.channel, ctx.message.server), content=output, name=msg.replace('/', '.'))
await self.bot.send_message(ctx.message.channel, bot_prefix + 'Gist output: ' + url)
except:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'File not found.')
finally:
await self.bot.delete_message(ctx.message)
@commands.command(pass_context=True)
async def regional(self, ctx, *, msg):
"""Replace letters with regional indicator emojis"""
await self.bot.delete_message(ctx.message)
msg = list(msg)
regional_list = [self.regionals[x.lower()] if x.isalnum() else x for x in msg]
regional_output = ' '.join(regional_list)
await self.bot.send_message(ctx.message.channel, regional_output)
@commands.command(pass_context=True)
async def space(self, ctx, *, msg):
"""Add n spaces between each letter. Ex: >space 2 thicc"""
await self.bot.delete_message(ctx.message)
if msg.split(' ', 1)[0].isdigit():
spaces = int(msg.split(' ', 1)[0]) * ' '
msg = msg.split(' ', 1)[1].strip()
else:
spaces = ' '
msg = list(msg)
spaced_message = '{}'.format(spaces).join(msg)
await self.bot.send_message(ctx.message.channel, spaced_message)
@commands.command(pass_context=True)
async def react(self, ctx, msg: str, id: int = None):
"""Add letter(s) as reaction to previous message. Ex: >react hot"""
await self.bot.delete_message(ctx.message)
reactions = []
if id:
limit = 25
else:
limit = 1
for i in msg:
if i.isalnum():
reactions.append(self.regionals[i.lower()])
else:
reactions.append(i)
async for message in self.bot.logs_from(ctx.message.channel, limit=limit):
if (not id and message.id != ctx.message.id) or (str(id) == message.id):
for i in reactions:
await self.bot.add_reaction(message, i)
def setup(bot):
bot.add_cog(Misc(bot))
| StarcoderdataPython |
1786879 | <filename>polyA/fill_consensus_position_matrix.py
from typing import Dict, List, Tuple
from .matrices import ConsensusMatrixContainer
from .performance import timeit
@timeit()
def fill_consensus_position_matrix(
row_count: int,
column_count: int,
start_all: int,
subfams: List[str],
chroms: List[str],
starts: List[int],
stops: List[int],
consensus_starts: List[int],
strands: List[str],
) -> ConsensusMatrixContainer:
"""
Fills matrix that holds the consensus position for each subfam at that
position in the alignment. Walks along the alignments one nucleotide at a time adding
the consensus position to the matrix.
At same time, fills ActiveCells.
input:
column_count: number of columns in alignment matrix - will be same number of
columns in consensus_matrix
row_count: number of rows in matrices
start_all: min start position on chromosome/target sequences for whole alignment
subfams: actual subfamily/consensus sequences from alignment
chroms: actual target/chromosome sequences from alignment
starts: start positions for all competing alignments (on target)
stops: stop positions for all competing alignments (on target)
consensus_starts: where alignment starts in the subfam/consensus sequence
strands: what strand each of the alignments are on - reverse strand will count down instead of up
output:
ConsensusMatrixContainer
>>> subs = ["", ".AA", "TT-"]
>>> chrs = ["", ".AA", "TTT"]
>>> strts = [0, 1, 0]
>>> stps = [0, 2, 2]
>>> con_strts = [-1, 0, 10]
>>> strandss = ["", "+", "-"]
>>> active, con_mat = fill_consensus_position_matrix(3, 3, 0, subs, chrs, strts, stps, con_strts, strandss)
>>> con_mat
{(1, 2): 0, (1, 3): 1, (2, 1): 10, (2, 2): 9, (2, 3): 9, (0, 0): 0, (0, 1): 0, (0, 2): 0}
>>> active
{2: [0, 1, 2], 3: [0, 1, 2], 1: [0, 2], 0: [0]}
"""
active_cells: Dict[int, List[int]] = {}
consensus_matrix: Dict[Tuple[int, int], int] = {}
# start at 1 to ignore 'skip state'
for row_index in range(1, row_count):
if strands[row_index] == "+":
consensus_pos = consensus_starts[row_index] - 1
col_index: int = starts[row_index] - start_all + 1
seq_index: int = 0
while col_index < stops[row_index] + 1 - start_all + 1:
# consensus pos only advances when there is not a gap in the subfam seq
if subfams[row_index][seq_index] != "-":
consensus_pos += 1
consensus_matrix[row_index, col_index] = consensus_pos
# matrix position only advances when there is not a gap in the chrom seq
if chroms[row_index][seq_index] != "-":
if col_index in active_cells:
active_cells[col_index].append(row_index)
else:
active_cells[col_index] = [0, row_index]
col_index += 1
seq_index += 1
else: # reverse strand
consensus_pos2 = consensus_starts[row_index] + 1
col_index2: int = starts[row_index] - start_all + 1
seq_index2: int = 0
while col_index2 < stops[row_index] + 1 - start_all + 1:
if subfams[row_index][seq_index2] != "-":
consensus_pos2 -= 1
consensus_matrix[row_index, col_index2] = consensus_pos2
if chroms[row_index][seq_index2] != "-":
if col_index2 in active_cells:
active_cells[col_index2].append(row_index)
else:
active_cells[col_index2] = [0, row_index]
col_index2 += 1
seq_index2 += 1
for i in range(column_count):
consensus_matrix[0, i] = 0
if i not in active_cells:
active_cells[i] = [0]
return ConsensusMatrixContainer(active_cells, consensus_matrix)
| StarcoderdataPython |
5089534 | <reponame>lelechen63/idinvert_pytorch
import numpy as np
import cv2, PIL.Image
# show image in Jupyter Notebook (work inside loop)
from io import BytesIO
from IPython.display import display, Image
def show_img_arr(arr, bgr_mode = False):
if bgr_mode is True:
arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
im = PIL.Image.fromarray(arr)
bio = BytesIO()
im.save(bio, format='png')
display(Image(bio.getvalue(), format='png'))
# show depth array in Jupyter Notebook (work inside loop)
def show_depth_arr(depth_map):
depth_max = np.max(depth_map)
depth_min = np.min(depth_map)
depth_map = (depth_map - depth_min)/(depth_max - depth_min)*255
show_img_arr(depth_map.astype(np.uint8))
# rotate verts along y axis
def rotate_verts_y(verts, y):
verts_mean = np.mean(verts, axis = 0)
verts = verts - verts_mean
angle = y*np.math.pi/180
R = np.array([[np.cos(angle), 0, np.sin(angle)],
[0, 1, 0],
[-np.sin(angle), 0, np.cos(angle)]])
verts = np.tensordot(R, verts.T, axes = 1).T + verts_mean
return verts
# rotate verts along x axis
def rotate_verts_x(verts, x):
verts_mean = np.mean(verts, axis = 0)
verts = verts - verts_mean
angle = x*np.math.pi/180
R = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
verts = np.tensordot(R, verts.T, axes = 1).T + verts_mean
return verts
# rotate verts along z axis
def rotate_verts_z(verts, z):
verts_mean = np.mean(verts, axis = 0)
verts = verts - verts_mean
angle = z*np.math.pi/180
R = np.array([[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
verts = np.tensordot(R, verts.T, axes = 1).T + verts_mean
return verts
| StarcoderdataPython |
8016443 | <filename>caldavclientlibrary/protocol/url.py
##
# Copyright (c) 2007-2016 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
import urllib
class URL(object):
eAbsolute = 0
eRelative = 1
eLastPath = 2
URLEscape = '%'
URLReserved = "/?:@&="
URLUnreserved = ( # Allowable URL chars
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0 - 15
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 16 - 31
0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, # 32 - 47
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, # 48 - 63
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 64 - 79
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, # 80 - 95
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 96 - 111
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, # 112 - 127
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 128 - 143
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 144 - 159
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 160 - 175
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 176 - 191
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 192 - 207
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 208 - 223
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 224 - 239
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 240 - 255
)
URLCharacter = ( # Allowable URL chars -- all
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0 - 15
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 16 - 31
0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 32 - 47
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, # 48 - 63
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 64 - 79
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, # 80 - 95
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 96 - 111
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, # 112 - 127
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 128 - 143
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 144 - 159
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 160 - 175
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 176 - 191
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 192 - 207
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 208 - 223
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 224 - 239
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 240 - 255
)
URLXCharacter = ( # Allowable URL chars (all)
# RFC2732 uses '[...]' for IPv6 addressing - [] are now allowed
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0 - 15
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 16 - 31
0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 32 - 47
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, # 48 - 63
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 64 - 79
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, # 80 - 95
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 96 - 111
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, # 112 - 127
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 128 - 143
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 144 - 159
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 160 - 175
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 176 - 191
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 192 - 207
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 208 - 223
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 224 - 239
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 240 - 255
)
URLSchemeDoubleSlash = ("http", "https", "webcal",)
def __init__(self, url=None, scheme=None, server=None, path=None, extended=None, decode=False):
self.scheme = ""
self.server = ""
self.path = ""
self.extended = ""
if not url:
self.scheme = scheme
self.server = server
self.path = path
if self.path and decode:
self.path = urllib.unquote(self.path)
self.extended = extended
if self.extended and decode:
self.extended = urllib.unquote_plus(self.extended)
else:
self._parse(url, decode)
def __str__(self):
return "URL: %s" % (self.toString(),)
def __repr__(self):
return "URL: %s" % (self.toString(),)
def __cmp__(self, other):
return cmp(self.toString(), other.toString())
def absoluteURL(self):
return self.toString()
def relativeURL(self):
return self.toString(conversion=URL.eRelative)
def toString(self, conversion=eAbsolute, encode=True):
result = ""
# Add scheme & host if not relative
if conversion == URL.eAbsolute and self.scheme and self.server:
result += self.scheme + ":"
if self.scheme in URL.URLSchemeDoubleSlash:
result += "//"
result += self.server
# Get path (or last part of it if required)
if self.path and conversion == URL.eLastPath:
path = self.path[self.path.rfind("/"):]
else:
path = self.path
# Now encode if required
if path:
result += (urllib.quote(path) if encode else path)
if self.extended:
result += (urllib.quote_plus(self.extended, "?&=") if encode else self.extended)
return result
def equal(self, comp):
# Compare each component
if self.scheme != comp.scheme:
return False
if self.server != comp.server:
return False
# Ignore trailing slash
if self.path.rstrip("/") != comp.path.rstrip("/"):
return False
return True
def equalRelative(self, comp):
# Must be relative
if comp.server:
return False
if not self.path and not comp.path:
return True
if not self.path or not comp.path:
return False
# Just compare paths, ignore trailing slash
return self.path.rstrip("/") == comp.path.rstrip("/")
def dirname(self):
if self.path:
newpath = os.path.dirname(self.path.rstrip("/")) + "/"
return URL(scheme=self.scheme, server=self.server, path=newpath)
def _parse(self, url, decode=False):
# Strip off main scheme
if url.lower().startswith("url:"):
url = url[4:]
# Special - if it starts with "/" its a relative HTTP url
if url[0] == '/':
self.scheme = "http"
self.server = None
self._parsePath(url, decode)
else:
# Get protocol scheme
self.scheme = url[:url.find(":")].lower()
url = url[len(self.scheme):]
if self.scheme in URL.URLSchemeDoubleSlash:
assert(url.startswith("://"))
# Look for server
splits = url[3:].split("/", 1)
self.server = splits[0]
if len(splits) == 2:
self._parsePath("/" + splits[1], decode)
elif self.scheme in ("mailto", "urn",):
assert(url.startswith(":"))
# Look for server
self.server = url[1:]
def _parsePath(self, path, decode=False):
# Look for extended bits
splits = path.split("?", 1)
self.path = splits[0]
if decode:
self.path = urllib.unquote(self.path)
if len(splits) == 2:
self.extended = "?" + splits[1]
if decode:
self.extended = urllib.unquote_plus(self.extended)
| StarcoderdataPython |
3525205 | <filename>prepare_verbs.py
import jsonpickle as jp
from utils import open_file, write_file, collator
jp.set_encoder_options('simplejson', sort_keys=True, indent=4, ensure_ascii=False)
content = open_file('input/monlam_verbs.json')
json = jp.decode(content)
dadrag = open_file('input/dadrag_syllables.txt').strip().split('\n')
entries = []
for inflected, context in json.items():
# a few entries don't have any content in monlam_verbs.json and are filtered here
# like : ལྷོགས་ | ༡བྱ་ཚིག 1. ༡བརྡ་རྙིང་། རློགས། 2. ཀློགས། that parses into "ལྷོགས": []
if context == []:
continue
possible_verbs = []
for verb in context:
# inflected verbs
if 'བྱ་ཚིག' in verb.keys():
possible_verbs.append(verb['བྱ་ཚིག'])
# non-inflected verbs (གཟུགས་མི་འགྱུར་བ།)
else:
possible_verbs.append(inflected)
# de-duplicate the verbs
possible_verbs = list(set(possible_verbs))
# add an entry for every possible verb
if inflected in dadrag:
for verb in possible_verbs:
entries.append((inflected+'ད', '/'+verb))
else:
for verb in possible_verbs:
if inflected == verb:
entries.append((inflected, '='))
else:
entries.append((inflected, '/'+verb))
tib_sorted = sorted(entries, key=lambda x: collator.getSortKey(x[0]))
lines = ['{} {}'.format(inflected, lemma) for inflected, lemma in tib_sorted]
write_file('output/parsed_verbs.txt', '\n'.join(lines))
| StarcoderdataPython |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 404