Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
---|---|---|
14,900 | <ASSISTANT_TASK:>
Python Code:
from ipyparallel import Client
c = Client()
c.ids
dview = c[:]
dview
def fahrenheit(T):
return 9 / 5 * T + 32
temp = np.arange(0, 110, 10)
temp
F = map(fahrenheit, temp)
F, list(F)
def create_prime(primes, n):
for p in primes:
if n % p == 0:
return primes
primes.append(n)
return primes
from functools import reduce
reduce(create_prime, np.arange(2, 100), [2])
def pyprimes(kmax): #의미 생각하지 말고 소수 구하는 복잡한 함수다 정도만 알아두어라
p = np.zeros(1000)
result = []
if kmax > 1000:
kmax = 1000
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
i = i + 1
if i == k:
p[k] = n
k = k + 1
result.append(n)
n = n + 1
return result
%time result = map(pyprimes, range(700, 1000)) #도커 안이라서 아래와 이것과 시간이 같게 나올 것이다. 아래 거는 서버에서 돌리면 다를듯
%time parallel_result = dview.map_sync(pyprimes, range(700, 1000)) #6명 중 1명이라도 답을 안준다면 안 주고 다 끝나고 나서 끝이다.
parallel_result == result
async_result = dview.map_async(pyprimes, range(700, 1000)) #안 끝나도 중간에 제어권 돌려주고 모니터링 알아서 해라.
async_result.progress #몇 명이 완성했는지 알려준다.
async_result.get()[0][-10:]
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
news = fetch_20newsgroups(subset="all")
n_samples = 3000
X_train = news.data[:n_samples]
y_train = news.target[:n_samples]
model = Pipeline([
('vect', TfidfVectorizer(stop_words="english", token_pattern="\b[a-z0-9_\-\.]+[a-z][a-z0-9_\-\.]+\b")),
('svc', SVC()),
])
from sklearn.externals import joblib
import os
from sklearn.cross_validation import KFold, cross_val_score
def persist_cv_splits(X, y, K=3, name="data", suffix="_cv_%03d.pkl"): #데이터를 나눈다. 나눠서 저장한다.
cv_split_filenames = []
cv = KFold(n_samples, K, shuffle=True, random_state=0)
for i, (train, test) in enumerate(cv):
cv_fold = ([X[k] for k in train], y[train],
[X[k] for k in test], y[test])
cv_split_filename = name + suffix % i
cv_split_filename = os.path.abspath(cv_split_filename)
joblib.dump(cv_fold, cv_split_filename)
cv_split_filenames.append(cv_split_filename)
return cv_split_filenames
cv_filenames = persist_cv_splits(X_train, y_train, name="news")
cv_filenames
def compute_evaluation(cv_split_filename, model, params):
from sklearn.externals import joblib
X_train_, y_train_, X_test_, y_test_ = joblib.load(cv_split_filename, mmap_mode="c")
model.set_params(**params)
model.fit(X_train_, y_train_)
test_scores = model.score(X_test_, y_test_)
return test_scores
from sklearn.grid_search import ParameterGrid
def parallel_grid_search(lb_view, model, cv_split_filenames, param_grid): #lb_view 엔진에 대한 view.
all_tasks = []
all_parameters = list(ParameterGrid(param_grid))
for i, params in enumerate(all_parameters):
task_for_params = []
for j, cv_split_filename in enumerate(cv_split_filenames):
t = lb_view.apply(compute_evaluation, cv_split_filename, model, params) #map이랑 유사. apply는 하나짜리 함수 실행. 여기 말고 엔진에 가서 실행
task_for_params.append(t)
all_tasks.append(task_for_params)
return all_parameters, all_tasks
import datetime
def print_progress(tasks):
progress = np.mean([task.ready() for task_group in tasks for task in task_group])
print("{0}:{1}%".format(datetime.datetime.now(), progress * 100.0))
return int(progress * 100.0)
from ipyparallel import Client
client = Client()
print(client.ids)
lb_view = client.load_balanced_view()
from sklearn.grid_search import GridSearchCV
parameters = {
"svc__gamma": np.logspace(-2, 1, 4),
"svc__C": np.logspace(-1, 1, 3),
}
all_parameters, all_tasks = parallel_grid_search(lb_view, model, cv_filenames, parameters)
import time
start_time = datetime.datetime.now()
while True:
progress = print_progress(all_tasks)
if progress >= 100:
break
time.sleep(1)
print("finish")
end_time = datetime.datetime.now()
print((end_time - start_time).total_seconds())
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Map / Reduce
Step2: Parallel Map
Step3: 모형 저장
|
14,901 | <ASSISTANT_TASK:>
Python Code:
from functools import wraps
def debug(func):
msg = func.__name__
# wraps is used to keep the metadata of the original function
@wraps(func)
def wrapper(*args, **kwargs):
print(msg)
return func(*args, **kwargs)
return wrapper
@debug
def add(x,y):
return x+y
add(2,3)
def add(x,y):
return x+y
debug(add)
debug(add)(2,3)
def debug_with_args(prefix=''):
def decorate(func):
msg = prefix + func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
print(msg)
return func(*args, **kwargs)
return wrapper
return decorate
@debug_with_args(prefix='***')
def mul(x,y):
return x*y
mul(2,3)
def mul(x,y):
return x*y
debug_with_args(prefix='***')
debug_with_args(prefix='***')(mul)
debug_with_args(prefix='***')(mul)(2,3)
from functools import wraps, partial
def debug_with_args2(func=None, prefix=''):
if func is None: # no function was passed
return partial(debug_with_args2, prefix=prefix)
msg = prefix + func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
print(msg)
return func(*args, **kwargs)
return wrapper
@debug_with_args2(prefix='***')
def div(x,y):
return x / y
div(4,2)
def div(x,y):
return x / y
debug_with_args2(prefix='***')
debug_with_args2(prefix='***')(div)
debug_with_args2(prefix='***')(div)(4,2)
f = debug_with_args2(prefix='***')
def div(x,y):
return x / y
debug_with_args2(prefix='***')(div)
def debug_with_args_nonpartial(func, prefix=''):
msg = prefix + func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
print(msg)
return func(*args, **kwargs)
return wrapper
def plus1(x):
return x+1
debug_with_args_nonpartial(plus1, prefix='***')(23)
@debug_with_args_nonpartial
def plus1(x):
return x+1
plus1(23)
@debug_with_args_nonpartial(prefix='***')
def plus1(x):
return x+1
def debug_with_args3(*args, **kwargs):
def inner(func, **kwargs):
if 'prefix' in kwargs:
msg = kwargs['prefix'] + func.__name__
else:
msg = func.__name__
print(msg)
return func
# decorator without arguments
if len(args) == 1 and callable(args[0]):
func = args[0]
return inner(func)
# decorator with keyword arguments
else:
return partial(inner, prefix=kwargs['prefix'])
def plus2(x):
return x+2
debug_with_args3(plus2)(23)
debug_with_args3(prefix='***')(plus2)(23)
@debug_with_args3 # WRONG: this shouldn't print anything during creation
def plus2(x):
return x+2
plus2(12) # WRONG: this should print the function name and the prefix
@debug_with_args3(prefix='###') # WRONG: this shouldn't print anything during creation
def plus2(x):
return x+2
plus2(12) # WRONG: this should print the function name and the prefix
def debugmethods(cls):
for name, val in vars(cls).items():
if callable(val):
setattr(cls, name, debug(val))
return cls
@debugmethods
class Spam(object):
def foo(self):
pass
def bar(self):
pass
s = Spam()
s.foo()
s.bar()
def debugattr(cls):
orig_getattribute = cls.__getattribute__
def __getattribute__(self, name):
print('Get:', name)
return orig_getattribute(self, name)
cls.__getattribute__ = __getattribute__
return cls
@debugattr
class Ham(object):
def foo(self):
pass
def bar(self):
pass
h = Ham()
h.foo()
h.bar
class debugmeta(type):
def __new__(cls, clsname, bases, clsdict):
clsobj = super(cls).__new__(cls, clsname, bases, clsdict)
clsobj = debugmethods(clsobj)
return clsobj
# class Base(metaclass=debugmeta): # won't work in Python 2.7
# pass
# class Bam(Base):
# pass
# cf. minute 27
class Spam:
pass
s = Spam()
from copy import deepcopy
current_vars = deepcopy(globals())
for var in current_vars:
if callable(current_vars[var]):
print var,
frozendict
for var in current_vars:
cls = getattr(current_vars[var], '__class__')
if cls:
print var, cls
print current_vars['Spam']
type(current_vars['Spam'])
callable(Spam)
callable(s)
isinstance(Spam, classobj)
__name__
sc = s.__class__
type('Foo', (), {})
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Decorators with arguments
Step2: Decorators with arguments
Step3: Debug with arguments
Step4: Decorators with arguments
Step5: Class decorators
Step6: Class decoration
Step7: Debug all the classes?
Step8: Can we inject the debugging code into all known classes?
|
14,902 | <ASSISTANT_TASK:>
Python Code:
%%R
# I had to import foreign to get access to read.dta
library("foreign")
kidiq <- read.dta("../../ARM_Data/child.iq/kidiq.dta")
# I won't attach kidiq-- i generally don't attach to avoid confusion(s)
#attach(kidiq)
%%R
library("arm")
%%R
fit.3 <- lm(kidiq$kid_score ~ kidiq$mom_hs + kidiq$mom_iq)
%%R
display(fit.3)
%%R
print(fit.3)
%%R
summary(fit.3)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the arm library-- see the Chapter 3.1 notebook if you need help.
Step2: Regression-- to demonstrate reports of fit, Pg38
Step3: Display, Pg 38
Step4: Print, Pg 39
Step5: Summary, Pg 38
|
14,903 | <ASSISTANT_TASK:>
Python Code:
!pip install nose
%%file type_util.py
class TypeUtil:
@classmethod
def is_iterable(cls, obj):
Determines if obj is iterable.
Useful when writing functions that can accept multiple types of
input (list, tuple, ndarray, iterator). Pairs well with
convert_to_list.
try:
iter(obj)
return True
except TypeError:
return False
@classmethod
def convert_to_list(cls, obj):
Converts obj to a list if it is not a list and it is iterable,
else returns the original obj.
if not isinstance(obj, list) and cls.is_iterable(obj):
obj = list(obj)
return obj
%%file tests/test_type_util.py
from nose.tools import assert_equal
from ..type_util import TypeUtil
class TestUtil():
def test_is_iterable(self):
assert_equal(TypeUtil.is_iterable('foo'), True)
assert_equal(TypeUtil.is_iterable(7), False)
def test_convert_to_list(self):
assert_equal(isinstance(TypeUtil.convert_to_list('foo'), list), True)
assert_equal(isinstance(TypeUtil.convert_to_list(7), list), False)
!nosetests tests/test_type_util.py -v
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Create the Code
Step4: Create the Nose Tests
Step5: Run the Nose Tests
|
14,904 | <ASSISTANT_TASK:>
Python Code:
# import the appropriate libraries
import xml.etree.ElementTree as ET # xml processing
# read the XML file
tree = ET.parse('input/menu.xml')
print('tree element:\t', tree)
# get the root of the tree
root = tree.getroot()
print 'root element:\t ', root
# here is the name of the root element
root.tag
# get the children of breakfast_menu, the next level down the tree
children = root.getchildren()
for child in children:
print (child.tag)
# for each child (node), get it's children and print out their names (tags)
for child in children:
grand_children = child.getchildren()
print (child.tag, '\t', child.attrib)
for grand_child in grand_children:
print (grand_child.tag)
# make a list of all the <food> tags
food_tags = root.findall('food')
print ('number of food tags = ', len(food_tags))
# print the <food> tags - it's not what you would expect
print (food_tags)
# access the enties in the list
first_food_item = food_tags[0]
print ('the first child node is:\t', first_food_item)
# here's how we can view a child node's content
ET.dump(first_food_item)
section = 'food'
tag = 'price'
node = root.find(section)
subnode = node.find(tag)
print ("Path to Price subnode of Food node:")
print ("Root:", str(root), " Node: ", node, "Subnode: ", subnode)
#node = root.find('food')
#subnode = node.find('prince')
#subsubnode = subnode.find('curr')
#Specify the path to the 'name' attribute of the 'food' node
node = root.find(section)
attribute = node.attrib['name']
print ("Path to Name attribute of Food node:")
print ("Root:", str(root), " Node: ", node, "Attribute: ", attribute)
#Find the attributes of each food node
print "All nodes, subnodes and attributes:"
for node in root:
print (node.tag, node.attrib)
for subnode in node:
print (subnode.tag, subnode.text)
#Add a new attribute to each food tag
for node in tree.iter(tag='food'):
node.set('category', 'breakfast')
# you can search by name
name = 'Belgian Waffles'
for selected_name in root.findall("./food/[@name='%s']" % name):
#print the description associated with the selected name
print "Found Belgian Waffles!"
print name, ":", selected_name.find('description').text
#find a specific node
#and update a subnode
for node in tree.iter(tag='food'):
if node.attrib['name'] == 'French Toast':
subnode = node.find('price')
print ("Subnode text: ", subnode.text)
subnode.text = '$6.50'
print ("Modified subnode text: ", subnode.text)
#Add a new subelement to the root
new_name = 'Three-Egg Omlette'
new_price = '$7.95'
new_description = 'three-egg omlette with your choice of meat, cheese and vegetables'
new_calories = '900'
food_node = ET.SubElement(root, 'food', {'name':new_name})
price_subnode = ET.SubElement(food_node, 'price')
price_subnode.text = new_price
description_subnode = ET.SubElement(food_node, 'description')
description_subnode.text = new_description
calories_subnode = ET.SubElement(food_node, 'calories')
calories_subnode.text = new_calories
#<el name='x'> 4 </el>
#<price amount='5.5' cur='$$'/>
#Write out the modified xml
tree.write('output/outputMenu.xml')
import xmltodict
with open('input/menu.xml') as fd:
doc = xmltodict.parse(fd.read())
print(doc)
import untangle
obj = untangle.parse('input/menu.xml')
obj.breakfast_menu.food[0].calories
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Use xmltodict
Step2: Use untangle
|
14,905 | <ASSISTANT_TASK:>
Python Code:
from scipy import sparse
V = sparse.random(10, 10, density = 0.05, format = 'coo', random_state = 42)
x = 100
y = 99
V = V.copy()
V.data += x
V.eliminate_zeros()
V.data += y
V.eliminate_zeros()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
14,906 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nerc', 'ukesm1-0-ll', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
14,907 | <ASSISTANT_TASK:>
Python Code:
# Imports
import matplotlib.pyplot as plt
import numpy
import pandas
import scipy
import scipy.stats
import os
# Using os.listdir to show the current directory
os.listdir("./")
# Using os.listdir to show the output directory
os.listdir("output")[0:5]
import glob
# Using glob to list the output directory
glob.glob("output/run-*")[0:5]
run_directory = os.listdir("output")[0]
print(run_directory)
print(os.path.join(run_directory,
"parameters.csv"))
print(run_directory)
print(os.path.basename(run_directory))
# Create "complete" data frames
run_data = []
all_timeseries_data = pandas.DataFrame()
all_interaction_data = pandas.DataFrame()
# Iterate over all directories
for run_directory in glob.glob("output/run*"):
# Get the run ID from our directory name
run_id = os.path.basename(run_directory)
# Load parameter and reshape
run_parameter_data = pandas.read_csv(os.path.join(run_directory, "parameters.csv"))
run_parameter_data.index = run_parameter_data["parameter"]
# Load timeseries and interactions
run_interaction_data = pandas.read_csv(os.path.join(run_directory, "interactions.csv"))
run_interaction_data["run"] = run_id
run_ts_data = pandas.read_csv(os.path.join(run_directory, "timeseries.csv"))
run_ts_data["run"] = run_id
# Flatten parameter data into interaction and TS data
for parameter_name in run_parameter_data.index:
run_ts_data.loc[:, parameter_name] = run_parameter_data.loc[parameter_name, "value"]
if run_interaction_data.shape[0] > 0:
for parameter_name in run_parameter_data.index:
run_interaction_data.loc[:, parameter_name] = run_parameter_data.loc[parameter_name, "value"]
# Store raw run data
run_data.append({"parameters": run_parameter_data,
"interactions": run_interaction_data,
"timeseries": run_ts_data})
# Update final steps
all_timeseries_data = all_timeseries_data.append(run_ts_data)
all_interaction_data = all_interaction_data.append(run_interaction_data)
# let's see how many records we have.
print(all_timeseries_data.shape)
print(all_interaction_data.shape)
# Let's see what the data looks like.
all_timeseries_data.head()
all_interaction_data.head()
%matplotlib inline
# let's use groupby to find some information.
last_step_data = all_timeseries_data.groupby("run").tail(1)
# Simple plot
f = plt.figure()
plt.scatter(last_step_data["min_subsidy"],
last_step_data["num_infected"],
alpha=0.5)
plt.xlabel("Subsidy")
plt.ylabel("Number infected")
plt.title("Subsidy vs. number infected")
# Let's use groupby with **multiple** variables now.
mean_infected_by_subsidy = all_timeseries_data.groupby(["run", "min_subsidy", "min_prob_hookup"])["num_infected"].mean()
std_infected_by_subsidy = all_timeseries_data.groupby(["run", "min_subsidy", "min_prob_hookup"])["num_infected"].std()
infected_by_subsidy = pandas.concat((mean_infected_by_subsidy,
std_infected_by_subsidy), axis=1)
infected_by_subsidy.columns = ["mean", "std"]
infected_by_subsidy.head()
# Plot a distribution
f = plt.figure()
_ = plt.hist(last_step_data["num_infected"].values,
color="red",
alpha=0.5)
plt.xlabel("Number infected")
plt.ylabel("Frequency")
plt.title("Distribution of number infected")
# Perform distribution tests for no subsidy vs. some subsidy
no_subsidy_data = last_step_data.loc[last_step_data["min_subsidy"] == 0,
"num_infected"]
some_subsidy_data = last_step_data.loc[last_step_data["min_subsidy"] > 0,
"num_infected"]
# Plot a distribution
f = plt.figure()
_ = plt.hist(no_subsidy_data.values,
color="red",
alpha=0.25)
_ = plt.hist(some_subsidy_data.values,
color="blue",
alpha=0.25)
plt.xlabel("Number infected")
plt.ylabel("Frequency")
plt.title("Distribution of number infected")
# Test for normality
print(scipy.stats.shapiro(no_subsidy_data))
print(scipy.stats.shapiro(some_subsidy_data))
# Test for equal variances
print(scipy.stats.levene(no_subsidy_data, some_subsidy_data))
# Perform t-test
print(scipy.stats.ttest_ind(no_subsidy_data, some_subsidy_data))
# Perform rank-sum test
print(scipy.stats.ranksums(no_subsidy_data, some_subsidy_data))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading Model Results
Step2: Using os.path.join and os.path.basename
Step3: Iterating through model run directories
|
14,908 | <ASSISTANT_TASK:>
Python Code:
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
# Make plots inline
%matplotlib inline
train_x = torch.linspace(0, 1, 1000)
train_y = torch.sin(train_x * (4 * math.pi) + torch.randn(train_x.size()) * 0.2)
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
# SKI requires a grid size hyperparameter. This util can help with that. Here we are using a grid that has the same number of points as the training data (a ratio of 1.0). Performance can be sensitive to this parameter, so you may want to adjust it for your own problem on a validation set.
grid_size = gpytorch.utils.grid.choose_grid_size(train_x,1.0)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.GridInterpolationKernel(
gpytorch.kernels.RBFKernel(), grid_size=grid_size, num_dims=1
)
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
# this is for running the notebook in our testing framework
import os
smoke_test = ('CI' in os.environ)
training_iterations = 1 if smoke_test else 30
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for i in range(training_iterations):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.step()
# Put model & likelihood into eval mode
model.eval()
likelihood.eval()
# Initalize plot
f, ax = plt.subplots(1, 1, figsize=(4, 3))
# The gpytorch.settings.fast_pred_var flag activates LOVE (for fast variances)
# See https://arxiv.org/abs/1803.06058
with torch.no_grad(), gpytorch.settings.fast_pred_var():
test_x = torch.linspace(0, 1, 51)
prediction = likelihood(model(test_x))
mean = prediction.mean
# Get lower and upper predictive bounds
lower, upper = prediction.confidence_region()
# Plot the training data as black stars
def ax_plot():
if smoke_test: return # this is for running the notebook in our testing framework
ax.plot(train_x.detach().numpy(), train_y.detach().numpy(), 'k*')
# Plot predictive means as blue line
ax.plot(test_x.detach().numpy(), mean.detach().numpy(), 'b')
# Plot confidence bounds as lightly shaded region
ax.fill_between(test_x.detach().numpy(), lower.detach().numpy(), upper.detach().numpy(), alpha=0.5)
ax.set_ylim([-3, 3])
ax.legend(['Observed Data', 'Mean', 'Confidence'])
ax_plot()
# We make an nxn grid of training points spaced every 1/(n-1) on [0,1]x[0,1]
n = 40
train_x = torch.zeros(pow(n, 2), 2)
for i in range(n):
for j in range(n):
train_x[i * n + j][0] = float(i) / (n-1)
train_x[i * n + j][1] = float(j) / (n-1)
# True function is sin( 2*pi*(x0+x1))
train_y = torch.sin((train_x[:, 0] + train_x[:, 1]) * (2 * math.pi)) + torch.randn_like(train_x[:, 0]).mul(0.01)
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
# SKI requires a grid size hyperparameter. This util can help with that
grid_size = gpytorch.utils.grid.choose_grid_size(train_x)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.GridInterpolationKernel(
gpytorch.kernels.RBFKernel(), grid_size=grid_size, num_dims=2
)
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
def train():
for i in range(training_iterations):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.step()
%time train()
# Set model and likelihood into evaluation mode
model.eval()
likelihood.eval()
# Generate nxn grid of test points spaced on a grid of size 1/(n-1) in [0,1]x[0,1]
n = 10
test_x = torch.zeros(int(pow(n, 2)), 2)
for i in range(n):
for j in range(n):
test_x[i * n + j][0] = float(i) / (n-1)
test_x[i * n + j][1] = float(j) / (n-1)
with torch.no_grad(), gpytorch.settings.fast_pred_var():
observed_pred = likelihood(model(test_x))
pred_labels = observed_pred.mean.view(n, n)
# Calc abosolute error
test_y_actual = torch.sin(((test_x[:, 0] + test_x[:, 1]) * (2 * math.pi))).view(n, n)
delta_y = torch.abs(pred_labels - test_y_actual).detach().numpy()
# Define a plotting function
def ax_plot(f, ax, y_labels, title):
if smoke_test: return # this is for running the notebook in our testing framework
im = ax.imshow(y_labels)
ax.set_title(title)
f.colorbar(im)
# Plot our predictive means
f, observed_ax = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax, pred_labels, 'Predicted Values (Likelihood)')
# Plot the true values
f, observed_ax2 = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax2, test_y_actual, 'Actual Values (Likelihood)')
# Plot the absolute errors
f, observed_ax3 = plt.subplots(1, 1, figsize=(4, 3))
ax_plot(f, observed_ax3, delta_y, 'Absolute Error Surface')
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
# SKI requires a grid size hyperparameter. This util can help with that
# We're setting Kronecker structure to False because we're using an additive structure decomposition
grid_size = gpytorch.utils.grid.choose_grid_size(train_x, kronecker_structure=False)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.AdditiveStructureKernel(
gpytorch.kernels.ScaleKernel(
gpytorch.kernels.GridInterpolationKernel(
gpytorch.kernels.RBFKernel(), grid_size=128, num_dims=1
)
), num_dims=2
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: KISS-GP for 1D Data
Step2: Set up the model
Step3: Train the model hyperparameters
Step4: Make predictions
Step5: KISS-GP for 2D-4D Data
Step6: The model
Step7: Train the model hyperparameters
Step8: Make predictions with the model
Step9: KISS-GP for higher dimensional data w/ Additive Structure
|
14,909 | <ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
# TODO: Implement Min-Max scaling for grayscale image data
return 0.1 + (image_data - 0) * (0.9 - 0.1) / (255 - 0)
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal([features_count, labels_count]))
biases = tf.Variable(tf.zeros(10))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 5
learning_rate = 0.2
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/Mean Variance - Image.png" style="height
Step6: Checkpoint
Step7: Problem 2
Step8: <img src="image/Learn Rate Tune - Image.png" style="height
Step9: Test
|
14,910 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import dowhy
from dowhy import CausalModel
from dowhy import causal_estimators
# Config dict to set the logging level
import logging.config
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'': {
'level': 'WARN',
},
}
}
logging.config.dictConfig(DEFAULT_LOGGING)
# Disabling warnings output
import warnings
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
#The covariates data has 46 features
x = pd.read_csv("https://raw.githubusercontent.com/AMLab-Amsterdam/CEVAE/master/datasets/TWINS/twin_pairs_X_3years_samesex.csv")
#The outcome data contains mortality of the lighter and heavier twin
y = pd.read_csv("https://raw.githubusercontent.com/AMLab-Amsterdam/CEVAE/master/datasets/TWINS/twin_pairs_Y_3years_samesex.csv")
#The treatment data contains weight in grams of both the twins
t = pd.read_csv("https://raw.githubusercontent.com/AMLab-Amsterdam/CEVAE/master/datasets/TWINS/twin_pairs_T_3years_samesex.csv")
#_0 denotes features specific to the lighter twin and _1 denotes features specific to the heavier twin
lighter_columns = ['pldel', 'birattnd', 'brstate', 'stoccfipb', 'mager8',
'ormoth', 'mrace', 'meduc6', 'dmar', 'mplbir', 'mpre5', 'adequacy',
'orfath', 'frace', 'birmon', 'gestat10', 'csex', 'anemia', 'cardiac',
'lung', 'diabetes', 'herpes', 'hydra', 'hemo', 'chyper', 'phyper',
'eclamp', 'incervix', 'pre4000', 'preterm', 'renal', 'rh', 'uterine',
'othermr', 'tobacco', 'alcohol', 'cigar6', 'drink5', 'crace',
'data_year', 'nprevistq', 'dfageq', 'feduc6', 'infant_id_0',
'dlivord_min', 'dtotord_min', 'bord_0',
'brstate_reg', 'stoccfipb_reg', 'mplbir_reg']
heavier_columns = [ 'pldel', 'birattnd', 'brstate', 'stoccfipb', 'mager8',
'ormoth', 'mrace', 'meduc6', 'dmar', 'mplbir', 'mpre5', 'adequacy',
'orfath', 'frace', 'birmon', 'gestat10', 'csex', 'anemia', 'cardiac',
'lung', 'diabetes', 'herpes', 'hydra', 'hemo', 'chyper', 'phyper',
'eclamp', 'incervix', 'pre4000', 'preterm', 'renal', 'rh', 'uterine',
'othermr', 'tobacco', 'alcohol', 'cigar6', 'drink5', 'crace',
'data_year', 'nprevistq', 'dfageq', 'feduc6',
'infant_id_1', 'dlivord_min', 'dtotord_min', 'bord_1',
'brstate_reg', 'stoccfipb_reg', 'mplbir_reg']
#Since data has pair property,processing the data to get separate row for each twin so that each child can be treated as an instance
data = []
for i in range(len(t.values)):
#select only if both <=2kg
if t.iloc[i].values[1]>=2000 or t.iloc[i].values[2]>=2000:
continue
this_instance_lighter = list(x.iloc[i][lighter_columns].values)
this_instance_heavier = list(x.iloc[i][heavier_columns].values)
#adding weight
this_instance_lighter.append(t.iloc[i].values[1])
this_instance_heavier.append(t.iloc[i].values[2])
#adding treatment, is_heavier
this_instance_lighter.append(0)
this_instance_heavier.append(1)
#adding the outcome
this_instance_lighter.append(y.iloc[i].values[1])
this_instance_heavier.append(y.iloc[i].values[2])
data.append(this_instance_lighter)
data.append(this_instance_heavier)
cols = [ 'pldel', 'birattnd', 'brstate', 'stoccfipb', 'mager8',
'ormoth', 'mrace', 'meduc6', 'dmar', 'mplbir', 'mpre5', 'adequacy',
'orfath', 'frace', 'birmon', 'gestat10', 'csex', 'anemia', 'cardiac',
'lung', 'diabetes', 'herpes', 'hydra', 'hemo', 'chyper', 'phyper',
'eclamp', 'incervix', 'pre4000', 'preterm', 'renal', 'rh', 'uterine',
'othermr', 'tobacco', 'alcohol', 'cigar6', 'drink5', 'crace',
'data_year', 'nprevistq', 'dfageq', 'feduc6',
'infant_id', 'dlivord_min', 'dtotord_min', 'bord',
'brstate_reg', 'stoccfipb_reg', 'mplbir_reg','wt','treatment','outcome']
df = pd.DataFrame(columns=cols,data=data)
df.head()
df = df.astype({"treatment":'bool'}, copy=False) #explicitly assigning treatment column as boolean
df.fillna(value=df.mean(),inplace=True) #filling the missing values
df.fillna(value=df.mode().loc[0],inplace=True)
data_1 = df[df["treatment"]==1]
data_0 = df[df["treatment"]==0]
print(np.mean(data_1["outcome"]))
print(np.mean(data_0["outcome"]))
print("ATE", np.mean(data_1["outcome"])- np.mean(data_0["outcome"]))
#The causal model has "treatment = is_heavier", "outcome = mortality" and "gestat10 = gestational weeks before birth"
model=CausalModel(
data = df,
treatment='treatment',
outcome='outcome',
common_causes='gestat10'
)
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
print(identified_estimand)
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression", test_significance=True
)
print(estimate)
print("ATE", np.mean(data_1["outcome"])- np.mean(data_0["outcome"]))
print("Causal Estimate is " + str(estimate.value))
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_matching"
)
print("Causal Estimate is " + str(estimate.value))
print("ATE", np.mean(data_1["outcome"])- np.mean(data_0["outcome"]))
refute_results=model.refute_estimate(identified_estimand, estimate,
method_name="random_common_cause")
print(refute_results)
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute",
num_simulations=20)
print(res_placebo)
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9,
num_simulations=20)
print(res_subset)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font size="4">Load the Data</font>
Step2: <font size="4">1. Model</font>
Step3: <font size="4">2. Identify</font>
Step4: <font size="4">3. Estimate Using Various Methods</font>
Step5: <font size="3">3.2 Using Propensity Score Matching</font>
Step6: <font size="4">4. Refute</font>
Step7: <font size="3">4.2 Using a placebo treatment</font>
Step8: <font size="3">4.3 Using a data subset refuter</font>
|
14,911 | <ASSISTANT_TASK:>
Python Code:
report_file = '/Users/bking/IdeaProjects/LanguageModelRNN/reports/encdec_noing_250_512_025dr.json'
log_file = '/Users/bking/IdeaProjects/LanguageModelRNN/logs/encdec_noing_250_512_025dr_logs.json'
import json
import matplotlib.pyplot as plt
with open(report_file) as f:
report = json.loads(f.read())
with open(log_file) as f:
logs = json.loads(f.read())
print'Encoder: \n\n', report['architecture']['encoder']
print'Decoder: \n\n', report['architecture']['decoder']
print('Train Perplexity: ', report['train_perplexity'])
print('Valid Perplexity: ', report['valid_perplexity'])
print('Test Perplexity: ', report['test_perplexity'])
%matplotlib inline
for k in logs.keys():
plt.plot(logs[k][0], logs[k][1], label=str(k) + ' (train)')
plt.plot(logs[k][0], logs[k][2], label=str(k) + ' (valid)')
plt.title('Loss v. Epoch')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
%matplotlib inline
for k in logs.keys():
plt.plot(logs[k][0], logs[k][3], label=str(k) + ' (train)')
plt.plot(logs[k][0], logs[k][4], label=str(k) + ' (valid)')
plt.title('Perplexity v. Epoch')
plt.xlabel('Epoch')
plt.ylabel('Perplexity')
plt.legend()
plt.show()
def print_sample(sample):
enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>'])
gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>'])
print('Input: '+ enc_input + '\n')
print('Gend: ' + sample['generated'] + '\n')
print('True: ' + gold + '\n')
print('\n')
for sample in report['train_samples']:
print_sample(sample)
for sample in report['valid_samples']:
print_sample(sample)
for sample in report['test_samples']:
print_sample(sample)
print 'Overall Score: ', report['bleu']['score'], '\n'
print '1-gram Score: ', report['bleu']['components']['1']
print '2-gram Score: ', report['bleu']['components']['2']
print '3-gram Score: ', report['bleu']['components']['3']
print '4-gram Score: ', report['bleu']['components']['4']
npairs_generated = report['n_pairs_bleu_generated']
npairs_gold = report['n_pairs_bleu_gold']
print 'Overall Score (Generated): ', npairs_generated['score'], '\n'
print '1-gram Score: ', npairs_generated['components']['1']
print '2-gram Score: ', npairs_generated['components']['2']
print '3-gram Score: ', npairs_generated['components']['3']
print '4-gram Score: ', npairs_generated['components']['4']
print '\n'
print 'Overall Score: (Gold)', npairs_gold['score'], '\n'
print '1-gram Score: ', npairs_gold['components']['1']
print '2-gram Score: ', npairs_gold['components']['2']
print '3-gram Score: ', npairs_gold['components']['3']
print '4-gram Score: ', npairs_gold['components']['4']
print 'Average Generated Score: ', report['average_alignment_generated']
print 'Average Gold Score: ', report['average_alignment_gold']
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Perplexity on Each Dataset
Step2: Loss vs. Epoch
Step3: Perplexity vs. Epoch
Step4: Generations
Step5: BLEU Analysis
Step6: N-pairs BLEU Analysis
Step7: Alignment Analysis
|
14,912 | <ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
odd_20 = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
i = 0
odd_20 = []
while i <= 20:
if i % 2 == 1:
odd_20.append(i)
i += 1
print(odd_20)
odd_20 = []
for i in range(21):
if i % 2 == 1:
odd_20.append(i)
print(odd_20)
[1, 3, 5, 7, 9, 11, ..., 99999999]
def odd_number(num):
L=[]
for i in range(num):
if i%2 == 1:
L.append(i)
return L
odd_number(20)
odd_100M = odd_number(100000000)
print(odd_100M[:20])
import time
start_time = time.clock()
odd_100M = odd_number(100000000)
end_time = time.clock()
print(end_time - start_time, "초")
odd_100M = [x for x in range(100000001) if x % 2 == 1]
odd_100M[:10]
odd_100M_square = [x**2 for x in range(100000000) if x % 2== 1]
odd_100M_square[:10]
odd_100M_square = [x**2 for x in odd_100M]
odd_100M_square[:10]
odd_100M2 = [2 * x + 1 for x in range(50000000)]
odd_100M2[:10]
odd_100M2 = []
for x in range(50000000):
odd_100M2.append(2*x+1)
odd_100M2[:10]
%matplotlib inline
import matplotlib.pyplot as plt
### 그래프 준비 시작 ###
# 여기부터 아래 세 개의 우물정 표시 부분까지는 그래프를 그리기 위해 준비하는 부분이다.
# 이해하려 하지 말고 그냥 기억만 해두면 된다.
# 그림을 그리기 위한 도화지를 준비하는 용도이다.
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# x축은 아래에, y축은 그림의 중심에 위치하도록 한다.
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('zero')
# 그래프를 둘러싸는 상자를 없앤다.
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
### 그래프 그리기 준비 끝 ###
# x좌표와 y좌표 값들의 리스트를 제공한다.
# 여기서는 조건제시법을 활용한다.
xs = [x for x in range(-10, 11, 5)]
ys = [x**2 for x in xs]
# 이제 plot() 함수를 호출하여 그래프를 그린다.
plt.plot(xs, ys)
plt.show()
### 그래프 준비 시작 ###
# 여기부터 아래 세 개의 우물정 표시 부분까지는 그래프를 그리기 위해 준비하는 부분이다.
# 이해하려 하지 말고 그냥 기억만 해두면 된다.
# 그림을 그리기 위한 도화지를 준비하는 용도이다.
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# x축은 아래에, y축은 그림의 중심에 위치하도록 한다.
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('zero')
# 그래프를 둘러싸는 상자를 없앤다.
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
### 그래프 그리기 준비 끝 ###
# x좌표와 y좌표 값들의 리스트를 제공한다.
# 여기서는 조건제시법을 활용한다.
xs = [x for x in range(-10, 11)]
ys = [x**2 for x in xs]
# 이제 plot() 함수를 호출하여 그래프를 그린다.
plt.plot(xs, ys)
plt.show()
from math import exp
[exp(n) for n in range(10) if n % 2 == 1]
[exp(3*n) for n in range(1,6)]
about_python = 'Python is a general-purpose programming language. \
It is becoming more and more popular \
for doing data science.'
words = about_python.split()
words
L =[]
for x in words:
L.append((x.upper(), len(x)))
L
[(x.upper(), len(x)) for x in words]
[(x.upper(), len(x)) for x in words[:5]]
[(words[n].upper(), len(words[n])) for n in range(len(words)) if n < 5]
[(x.strip('.').upper(), len(x.strip('.'))) for x in words]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 주요 내용
Step2: 아니면, 반복문을 활용할 수 있다.
Step3: for 반복문
Step4: 예제
Step5: 반면에 반복문을 활용하는 것은 언제든지 가능하다.
Step6: 0과 20 사이의 홀수들의 리스트는 다음과 같다.
Step7: 이제 0과 1억 사이의 홀수들의 리스트를 생성해보자.
Step8: 좀 오래 걸린다.
Step9: 부록
Step10: 이제 질문을 좀 다르게 하자.
Step11: 예제
Step12: 물론 앞서 만든 odd_100M을 재활용할 수 있다.
Step13: 예제
Step14: 이 방식은 좀 더 쉬워 보인다. if 문이 없기 때문이다.
Step15: 오늘의 주요 예제 해결
Step16: matplotlib.pyplot 모듈 이름이 길어서 보통은 plt 라고 줄여서 부른다.
Step17: 그래프를 그리기 위해서는 먼저 필요한 만큼의 점을 찍어야 한다.
Step18: 보다 많은 점을 찍으면 보다 부드러운 그래프를 얻을 수 있다.
Step19: 연습문제
Step20: 연습
Step21: 연습
Step22: 위 문장에 사용된 단어들의 길이를 분석하기 위해 먼저 위 문장을 단어로 쪼갠다.
Step23: 위 words 리스트의 각 항목의 문자열들을 모두 대문자로 바꾼 단어와 그리고 해당 항목의 문자열의 길이를 항목으로 갖는 튜플들의 리스트를 작성하고자 한다.
Step24: 리스트 조건제시법으로는 아래와 같이 보다 간결하게 구현할 수 있다.
Step25: 처음 다섯 개의 단어만 다루고자 할 경우에는 아래처럼 하면 된다.
Step26: 아래처럼 인덱스에 제한을 가하는 방식도 가능하다. 즉, if 문을 추가로 활용한다.
Step27: 질문
|
14,913 | <ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
# If you have a GPU, execute the following lines to restrict the amount of VRAM used:
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 1:
print("Using GPU {}".format(gpus[0]))
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
else:
print("Using CPU")
import os
import random
import itertools
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Concatenate, Lambda, Dot
from tensorflow.keras.layers import Conv2D, MaxPool2D, GlobalAveragePooling2D, Flatten, Dropout
from tensorflow.keras import optimizers
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
PATH = "lfw/lfw-deepfunneled/"
USE_SUBSET = True
dirs = sorted(os.listdir(PATH))
if USE_SUBSET:
dirs = dirs[:500]
name_to_classid = {d:i for i,d in enumerate(dirs)}
classid_to_name = {v:k for k,v in name_to_classid.items()}
num_classes = len(name_to_classid)
print("number of classes: "+str(num_classes))
# read all directories
img_paths = {c:[directory + "/" + img for img in sorted(os.listdir(PATH+directory))]
for directory,c in name_to_classid.items()}
# retrieve all images
all_images_path = []
for img_list in img_paths.values():
all_images_path += img_list
# map to integers
path_to_id = {v:k for k,v in enumerate(all_images_path)}
id_to_path = {v:k for k,v in path_to_id.items()}
# build mappings between images and class
classid_to_ids = {k:[path_to_id[path] for path in v] for k,v in img_paths.items()}
id_to_classid = {v:c for c,imgs in classid_to_ids.items() for v in imgs}
from skimage.io import imread
from skimage.transform import resize
def resize100(img):
return resize(img, (100, 100), preserve_range=True, mode='reflect', anti_aliasing=True)[20:80,20:80,:]
def open_all_images(id_to_path):
all_imgs = []
for path in id_to_path.values():
all_imgs += [np.expand_dims(resize100(imread(PATH+path)),0)]
return np.vstack(all_imgs)
all_imgs = open_all_images(id_to_path)
mean = np.mean(all_imgs, axis=(0,1,2))
all_imgs -= mean
all_imgs.shape, str(all_imgs.nbytes / 1e6) + "Mo"
def build_pos_pairs_for_id(classid, max_num=50):
imgs = classid_to_ids[classid]
if len(imgs) == 1:
return []
pos_pairs = list(itertools.combinations(imgs, 2))
random.shuffle(pos_pairs)
return pos_pairs[:max_num]
def build_positive_pairs(class_id_range):
listX1 = []
listX2 = []
for class_id in class_id_range:
pos = build_pos_pairs_for_id(class_id)
for pair in pos:
listX1 += [pair[0]]
listX2 += [pair[1]]
perm = np.random.permutation(len(listX1))
return np.array(listX1)[perm], np.array(listX2)[perm]
split_num = int(num_classes * 0.8)
Xa_train, Xp_train = build_positive_pairs(range(0, split_num))
Xa_test, Xp_test = build_positive_pairs(range(split_num, num_classes-1))
# Gather the ids of all images that are used for train and test
all_img_train_idx = list(set(Xa_train) | set(Xp_train))
all_img_test_idx = list(set(Xa_test) | set(Xp_test))
Xa_train.shape, Xp_train.shape
from imgaug import augmenters as iaa
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontally flip 50% of the images
])
class TripletGenerator(tf.keras.utils.Sequence):
def __init__(self, Xa_train, Xp_train, batch_size, all_imgs, neg_imgs_idx):
self.cur_img_index = 0
self.cur_img_pos_index = 0
self.batch_size = batch_size
self.imgs = all_imgs
self.Xa = Xa_train # Anchors
self.Xp = Xp_train
self.cur_train_index = 0
self.num_samples = Xa_train.shape[0]
self.neg_imgs_idx = neg_imgs_idx
def __len__(self):
return self.num_samples // self.batch_size
def __getitem__(self, batch_index):
low_index = batch_index * self.batch_size
high_index = (batch_index + 1) * self.batch_size
imgs_a = self.Xa[low_index:high_index] # Anchors
imgs_p = self.Xp[low_index:high_index] # Positives
imgs_n = random.sample(self.neg_imgs_idx, imgs_a.shape[0]) # Negatives
imgs_a = seq.augment_images(self.imgs[imgs_a])
imgs_p = seq.augment_images(self.imgs[imgs_p])
imgs_n = seq.augment_images(self.imgs[imgs_n])
# We also a null vector as placeholder for output, but it won't be needed:
return ([imgs_a, imgs_p, imgs_n], np.zeros(shape=(imgs_a.shape[0])))
batch_size = 128
gen = TripletGenerator(Xa_train, Xp_train, batch_size, all_imgs, all_img_train_idx)
len(all_img_test_idx), len(gen)
[xa, xp, xn], y = gen[0]
xa.shape, xp.shape, xn.shape
plt.figure(figsize=(16, 9))
for i in range(5):
plt.subplot(3, 5, i + 1)
plt.title("anchor")
plt.imshow((xa[i] + mean) / 255)
plt.axis('off')
for i in range(5):
plt.subplot(3, 5, i + 6)
plt.title("positive")
plt.imshow((xp[i] + mean) / 255)
plt.axis('off')
for i in range(5):
plt.subplot(3, 5, i + 11)
plt.title("negative")
plt.imshow((xn[i] + mean) / 255)
plt.axis('off')
plt.show()
gen_test = TripletGenerator(Xa_test, Xp_test, 32, all_imgs, all_img_test_idx)
len(gen_test)
# Build a loss which doesn't take into account the y_true, as
# we'll be passing only 0
def identity_loss(y_true, y_pred):
return K.mean(y_pred - 0 * y_true)
# The real loss is here
def cosine_triplet_loss(X, margin=0.5):
positive_sim, negative_sim = X
# batch loss
losses = K.maximum(0.0, negative_sim - positive_sim + margin)
return K.mean(losses)
class SharedConv(tf.keras.Model):
def __init__(self):
super().__init__(self, name="sharedconv")
self.conv1 = Conv2D(16, 3, activation="relu", padding="same")
self.conv2 = Conv2D(16, 3, activation="relu", padding="same")
self.pool1 = MaxPool2D((2,2)) # 30,30
self.conv3 = Conv2D(32, 3, activation="relu", padding="same")
self.conv4 = Conv2D(32, 3, activation="relu", padding="same")
self.pool2 = MaxPool2D((2,2)) # 15,15
self.conv5 = Conv2D(64, 3, activation="relu", padding="same")
self.conv6 = Conv2D(64, 3, activation="relu", padding="same")
self.pool3 = MaxPool2D((2,2)) # 8,8
self.conv7 = Conv2D(64, 3, activation="relu", padding="same")
self.conv8 = Conv2D(32, 3, activation="relu", padding="same")
self.flatten = Flatten()
self.dropout1 = Dropout(0.2)
self.fc1 = Dense(40, activation="tanh")
self.dropout2 = Dropout(0.2)
self.fc2 = Dense(64)
def call(self, inputs):
x = self.pool1(self.conv2(self.conv1(inputs)))
x = self.pool2(self.conv4(self.conv3(x)))
x = self.pool3(self.conv6(self.conv5(x)))
x = self.flatten(self.conv8(self.conv7(x)))
x = self.fc1(self.dropout1(x))
return self.fc2(self.dropout2(x))
shared_conv = SharedConv()
class TripletNetwork(tf.keras.Model):
def __init__(self, shared_conv):
super().__init__(self, name="tripletnetwork")
# TODO
def call(self, inputs):
pass # TODO
model_triplet = TripletNetwork(shared_conv)
model_triplet.compile(loss=identity_loss, optimizer="rmsprop")
# %load solutions/triplet.py
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
best_model_fname = "triplet_checkpoint_b2.h5"
best_model_cb = ModelCheckpoint(best_model_fname, monitor='val_loss',
save_best_only=True, verbose=1)
history = model_triplet.fit(gen,
epochs=10,
validation_data = gen_test,
callbacks=[best_model_cb])
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.ylim(0, 0.5)
plt.legend(loc='best')
plt.title('Loss');
model_triplet.load_weights("triplet_checkpoint_b2.h5")
# You may load this model
# Trained on triplets but with larger dataset
# Far from perfect !
# model_triplet.load_weights("triplet_pretrained.h5")
emb = shared_conv.predict(all_imgs)
emb = emb / np.linalg.norm(emb, axis=-1, keepdims=True)
pixelwise = np.reshape(all_imgs, (all_imgs.shape[0], 60*60*3))
def most_sim(idx, topn=5, mode="cosine"):
x = emb[idx]
if mode == "cosine":
x = x / np.linalg.norm(x)
sims = np.dot(emb, x)
ids = np.argsort(sims)[::-1]
return [(id,sims[id]) for id in ids[:topn]]
elif mode == "euclidean":
dists = np.linalg.norm(emb - x, axis=-1)
ids = np.argsort(dists)
return [(id,dists[id]) for id in ids[:topn]]
else:
dists = np.linalg.norm(pixelwise - pixelwise[idx], axis=-1)
ids = np.argsort(dists)
return [(id,dists[id]) for id in ids[:topn]]
def display(img):
img = img.astype('uint8')
plt.imshow(img)
plt.axis('off')
plt.show()
interesting_classes = list(filter(lambda x: len(x[1])>4, classid_to_ids.items()))
class_idx = random.choice(interesting_classes)[0]
print(class_idx)
img_idx = random.choice(classid_to_ids[class_idx])
for id, sim in most_sim(img_idx):
display(all_imgs[id] + mean)
print((classid_to_name[id_to_classid[id]], id, sim))
test_ids = []
for class_id in range(split_num, num_classes-1):
img_ids = classid_to_ids[class_id]
if len(img_ids) > 1:
test_ids += img_ids
print(len(test_ids))
len([len(classid_to_ids[x]) for x in list(range(split_num, num_classes-1)) if len(classid_to_ids[x])>1])
def recall_k(k=10, mode="embedding"):
num_found = 0
for img_idx in test_ids:
image_class = id_to_classid[img_idx]
found_classes = []
if mode == "embedding":
found_classes = [id_to_classid[x] for (x, score) in most_sim(img_idx, topn=k+1)[1:]]
elif mode == "random":
found_classes = [id_to_classid[x] for x in random.sample(
list(set(all_img_test_idx + all_img_train_idx) - {img_idx}), k)]
elif mode == "image":
found_classes = [id_to_classid[x] for (x, score) in most_sim(img_idx, topn=k+1, mode="image")[1:]]
if image_class in found_classes:
num_found += 1
return num_found / len(test_ids)
recall_k(k=10), recall_k(k=10, mode="random")
# Naive way to compute all similarities between all images. May be optimized!
def build_similarities(conv, all_imgs):
embs = conv.predict(all_imgs)
embs = embs / np.linalg.norm(embs, axis=-1, keepdims=True)
all_sims = np.dot(embs, embs.T)
return all_sims
def intersect(a, b):
return list(set(a) & set(b))
def build_negatives(anc_idxs, pos_idxs, similarities, neg_imgs_idx, num_retries=20):
# If no similarities were computed, return a random negative
if similarities is None:
return random.sample(neg_imgs_idx,len(anc_idxs))
final_neg = []
# for each positive pair
for (anc_idx, pos_idx) in zip(anc_idxs, pos_idxs):
anchor_class = id_to_classid[anc_idx]
#positive similarity
sim = similarities[anc_idx, pos_idx]
# find all negatives which are semi(hard)
possible_ids = np.where((similarities[anc_idx] + 0.25) > sim)[0]
possible_ids = intersect(neg_imgs_idx, possible_ids)
appended = False
for iteration in range(num_retries):
if len(possible_ids) == 0:
break
idx_neg = random.choice(possible_ids)
if id_to_classid[idx_neg] != anchor_class:
final_neg.append(idx_neg)
appended = True
break
if not appended:
final_neg.append(random.choice(neg_imgs_idx))
return final_neg
class HardTripletGenerator(tf.keras.utils.Sequence):
def __init__(self, Xa_train, Xp_train, batch_size, all_imgs, neg_imgs_idx, conv):
self.batch_size = batch_size
self.imgs = all_imgs
self.Xa = Xa_train
self.Xp = Xp_train
self.num_samples = Xa_train.shape[0]
self.neg_imgs_idx = neg_imgs_idx
if conv:
print("Pre-computing similarities...", end=" ")
self.similarities = build_similarities(conv, self.imgs)
print("Done!")
else:
self.similarities = None
def __len__(self):
return self.num_samples // self.batch_size
def __getitem__(self, batch_index):
low_index = batch_index * self.batch_size
high_index = (batch_index + 1) * self.batch_size
imgs_a = self.Xa[low_index:high_index]
imgs_p = self.Xp[low_index:high_index]
imgs_n = build_negatives(imgs_a, imgs_p, self.similarities, self.neg_imgs_idx)
imgs_a = seq.augment_images(self.imgs[imgs_a])
imgs_p = seq.augment_images(self.imgs[imgs_p])
imgs_n = seq.augment_images(self.imgs[imgs_n])
return ([imgs_a, imgs_p, imgs_n], np.zeros(shape=(imgs_a.shape[0])))
batch_size = 128
gen_hard = HardTripletGenerator(Xa_train, Xp_train, batch_size, all_imgs, all_img_train_idx, shared_conv)
len(gen_hard)
[xa, xp, xn], y = gen_hard[0]
xa.shape, xp.shape, xn.shape
plt.figure(figsize=(16, 9))
for i in range(5):
plt.subplot(3, 5, i + 1)
plt.title("anchor")
plt.imshow((xa[i] + mean) / 255)
plt.axis('off')
for i in range(5):
plt.subplot(3, 5, i + 6)
plt.title("positive")
plt.imshow((xp[i] + mean) / 255)
plt.axis('off')
for i in range(5):
plt.subplot(3, 5, i + 11)
plt.title("negative")
plt.imshow((xn[i] + mean) / 255)
plt.axis('off')
plt.show()
class SharedConv2(tf.keras.Model):
Improved version of SharedConv
def __init__(self):
super().__init__(self, name="sharedconv2")
self.conv1 = Conv2D(16, 3, activation="relu", padding="same")
self.conv2 = Conv2D(16, 3, activation="relu", padding="same")
self.pool1 = MaxPool2D((2,2)) # 30,30
self.conv3 = Conv2D(32, 3, activation="relu", padding="same")
self.conv4 = Conv2D(32, 3, activation="relu", padding="same")
self.pool2 = MaxPool2D((2,2)) # 15,15
self.conv5 = Conv2D(64, 3, activation="relu", padding="same")
self.conv6 = Conv2D(64, 3, activation="relu", padding="same")
self.pool3 = MaxPool2D((2,2)) # 8,8
self.conv7 = Conv2D(64, 3, activation="relu", padding="same")
self.conv8 = Conv2D(32, 3, activation="relu", padding="same")
self.flatten = Flatten()
self.dropout1 = Dropout(0.2)
self.fc1 = Dense(64)
def call(self, inputs):
x = self.pool1(self.conv2(self.conv1(inputs)))
x = self.pool2(self.conv4(self.conv3(x)))
x = self.pool3(self.conv6(self.conv5(x)))
x = self.flatten(self.conv8(self.conv7(x)))
return self.fc1(self.dropout1(x))
tf.random.set_seed(1337)
shared_conv2 = SharedConv2()
model_triplet2 = TripletNetwork(shared_conv2)
opt = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model_triplet2.compile(loss=identity_loss, optimizer=opt)
gen_test = TripletGenerator(Xa_test, Xp_test, 32, all_imgs, all_img_test_idx)
len(gen_test)
# At first epoch we don't generate hard triplets so that our model can learn the easy examples first
gen_hard = HardTripletGenerator(Xa_train, Xp_train, batch_size, all_imgs, all_img_train_idx, None)
loss, val_loss = [], []
best_model_fname_hard = "triplet_checkpoint_hard.h5"
best_val_loss = float("inf")
nb_epochs = 10
for epoch in range(nb_epochs):
print("built new hard generator for epoch " + str(epoch))
history = model_triplet2.fit(
gen_hard,
epochs=1,
validation_data = gen_test)
loss.extend(history.history["loss"])
val_loss.extend(history.history["val_loss"])
if val_loss[-1] < best_val_loss:
print("Saving best model")
model_triplet2.save_weights(best_model_fname_hard)
gen_hard = HardTripletGenerator(Xa_train, Xp_train, batch_size, all_imgs, all_img_train_idx, shared_conv2)
plt.plot(loss, label='train')
plt.plot(val_loss, label='validation')
plt.ylim(0, 0.5)
plt.legend(loc='best')
plt.title('Loss');
emb = shared_conv2.predict(all_imgs)
emb = emb / np.linalg.norm(emb, axis=-1, keepdims=True)
recall_k(k=10), recall_k(k=10, mode="random")
shared_conv2_nohard = SharedConv2()
model_triplet2_nohard = TripletNetwork(shared_conv2_nohard)
opt = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model_triplet2_nohard.compile(loss=identity_loss, optimizer=opt)
gen_nohard = HardTripletGenerator(Xa_train, Xp_train, batch_size, all_imgs, all_img_train_idx, None)
history = model_triplet2_nohard.fit_generator(
generator=gen_nohard,
epochs=10,
validation_data=gen_test)
plt.plot(loss, label='train (hardmining)')
plt.plot(val_loss, label='validation (hardmining)')
plt.plot(history.history["loss"], label="train")
plt.plot(history.history["val_loss"], label="validation")
plt.ylim(0, 0.5)
plt.legend(loc='best')
plt.title('Loss hardmining vs no hardmining');
emb = shared_conv2_nohard.predict(all_imgs)
emb = emb / np.linalg.norm(emb, axis=-1, keepdims=True)
recall_k(k=10), recall_k(k=10, mode="random")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Processing the dataset
Step2: In each directory, there is one or more images corresponding to the identity. We map each image path with an integer id, then build a few dictionaries
Step3: The following histogram shows the number of images per class
Step4: The following function builds a large number of positives/negatives pairs (train and test)
Step5: We end up with 1177 different pairs, which we'll append with a random sample (as negative) in the generator
Step6: As you can see, choosing randomly the negatives can be inefficient. For example it's reasonnable to think a old man will be a too easy negative if the anchor is a young woman.
Step7: Triplet Model
Step8: Shared Convolutional Network
Step9: Triplet Model
Step10: Warning
Step11: Exercise
Step12: Displaying similar images
Step13: Test Recall@k model
Step15: Hard Negative Mining
Step16: Note that we are re-creating a HardTripletGenerator at each epoch. By doing so, we re-compute the new hard negatives with the newly updated model. On larger scale this operation can take a lot of time, and could be done every X epochs (X > 1).
Step17: You should see that the train loss is barely improving while the validation loss is decreasing. Remember that we are feeding the hardest triplets to the model!
Step18: Let's run the improved convnet SharedConv2 without negative hardming in order to have a fair comparison
|
14,914 | <ASSISTANT_TASK:>
Python Code:
import means
import urllib
__ = urllib.urlretrieve("http://www.ebi.ac.uk/biomodels/models-main/publ/"
"BIOMD0000000010/BIOMD0000000010.xml.origin",
filename="autoreg.xml")
# Requires: libsbml
autoreg_model, autoreg_parameters, autoreg_initial_conditions \
= means.io.read_sbml('autoreg.xml')
autoreg_model
print autoreg_parameters[:3], '.. snip ..', autoreg_parameters[-3:]
print autoreg_initial_conditions
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Biomodels repository hosts a number of published models.
Step2: This model can be parsed into MEANS Model object using means.io.read_sbml function. When parsing the SBML format, compartments of species are neglected, as the species names are assumed to be compartment-specific.
Step3: To view the model, simply output it
Step4: Note that a set of parameters and initial conditions are also parsed from the SBML file directly, let's view them
|
14,915 | <ASSISTANT_TASK:>
Python Code:
# import NumPy and pandas
import numpy as np
import pandas as pd
# set some pandas options
pd.set_option('display.notebook_repr_html', False)
pd.set_option('display.max_columns', 10)
pd.set_option('display.max_rows',10)
# create a DataFrame from a 2-d array
pd.DataFrame(np.array([[10,11],[20,21]]))
# create a DataFrame from a list of Series objects
df1 = pd.DataFrame([pd.Series(np.arange(10,15)),pd.Series(np.arange(15,20))])
df1
# what is the shape of the data frame
df1.shape
# specify column names
df = pd.DataFrame(np.array([[10,11],[20,21]]), columns=['a','b'])
df
# what are the name of he columns
df.columns
# retrieve just the names of the columns by position
"{0},{1}".format(df.columns[0],df.columns[1])
# rename the columns
df.columns = ['c','d']
df
# create a dataframe with named rows and columns
df = pd.DataFrame(np.array([[10,11],[20,21]]),columns=['col1','col2'],index=['row1','row2'])
df
df.index
# create a dataframe with 2 series objects
# and a dictionary
s1 = pd.Series(np.arange(1,6,1))
s2 = pd.Series(np.arange(6,11,1))
pd.DataFrame({'c1':s1,'c2':s2})
# demonstrate alignment during creation
s3 = pd.Series(np.arange(12,14),index=[1,2])
df = pd.DataFrame({'c1':s1,'c2':s2,'c3':s3})
df
# show the first 3 lines of the file
!head -n 3 ../../data/sp500.csv
sp500 = pd.read_csv("../../data/sp500.csv",index_col='Symbol',usecols=[0,2,3,7])
# first five rows
sp500.head()
# last 5 rows
sp500.tail()
# how many rows of data
len(sp500)
# examine the index
sp500.index
# get the columns
sp500.columns
# second dataset
!head -n 3 ../../data/omh.csv
# read in the data
one_mon_hist = pd.read_csv("../../data/omh.csv")
one_mon_hist[:3]
# get first and second columns by position or location
sp500.columns
type(sp500)
sp500[sp500.columns[1]].head()
df = sp500['Price']
df
# create a new dataframe with integers as the columns names
# make sure to use .copy() or change will be in-place
df = sp500.copy()
df.columns = [0,1,2]
df.head()
# because the column names are actually integers
# and therefore [1] is found as a column
df[1]
df.columns
# get price column by name
# result is a series
sp500['Price']
# get price and sector columns
# since a list is passed, the result is a DataFrame
sp500[['Price','Sector']]
# attribute access of the column by name
sp500.Price
loc = sp500.columns.get_loc('Price')
loc
# first five rows
sp500[:5]
sp500['ABT':'ACN']
sp500.loc['ACN']
sp500.loc[['MMM','MSFT']]
# get rows in locations 0 and 2
sp500.iloc[[0,2]]
# get the location of MMM and A in the index
i1 = sp500.index.get_loc('MMM')
i2 = sp500.index.get_loc('A')
"{0},{1}".format(i1,i2)
# and get the rows
sp500.iloc[[i1,i2]]
# by label
sp500.ix[['MSFT','ZTS']]
# by label in both the index and column
sp500.at['MMM','Price']
# by location. Row 0, column 1
sp500.iat[0,1]
# what rows have a price < 100 ?
sp500.Price < 100
# get only the Price where price is < 10 and > 0
r = sp500[(sp500.Price < 10) & (sp500.Price > 0)] [['Price']]
r
# rename the Book Value colun to not have a space
# this returns a copy with the column renamed
df = sp500.rename(columns={'Book Value':'BookValue'})
# print first 2 rows
df[:2]
df.columns
# old dataframe remains intact
sp500.columns
# this changes the column in-place
sp500.rename(columns={'Book Value':'BookValue'},inplace=True)
sp500.columns
sp500.BookValue[:5]
# make a copy
copy = sp500.copy()
copy['TwicePrice'] = sp500.Price * 2
copy[:2]
copy = sp500.copy()
copy.insert(2,'TwicePrice',sp500.Price*2)
copy[:2]
# extract the first 4 rows and Price column
rcopy = sp500[0:3][['Price']].copy()
rcopy
# create a new series to merge as a column
# one label exists in rcopy(MSFT) and MM does not
s = pd.Series({'MMM':'Is in the DataFrame','MSFT':'Is not in the DataFrame'})
s
rcopy['Comment'] = s
rcopy
# replace the Price column data with the new values
# instead of adding a new column
copy = sp500.copy()
copy.Price = sp500.Price * 2
copy[:5]
# copy all 500 rows
copy = sp500.copy()
prices = sp500.iloc[[3,1,0]].Price.copy()
# examine the extracted prices
prices
# now replace the Price column with prices
copy.Price = prices
# it is not really simple insertion, it is alignment
# values are put in the correct place according to labels
copy
# Example of using del to delete a column
# make a copy of a subset of the data frame
copy = sp500[:2].copy()
copy
del copy['BookValue']
copy
# Example of using pop to remove a column from a DataFrame
# first make a copy of a subset of the data frame
# pop works in-place
copy = sp500[:2].copy()
popped = copy.pop('Sector')
copy
# and we have the Sector column as the result of the pop
popped
# Example of using drop to remove a column
# make a copy of a subset of the DataFrame
copy = sp500[:2].copy()
afterdrop = copy.drop(['Sector'],axis=1)
afterdrop
# copy the first three rows of sp500
df1 = sp500.iloc[0:3].copy()
# copy the 10th and 11th rows
df2 = sp500.iloc[[10,11,2]]
# append df1 and df2
appended = df1.append(df2)
# the result is the rows of the first followed by those in the second
appended
# DataFrame using df1.index and just a PER columns
# also a good example of using a scalar value
# to initialize multiple rows.
df3 = pd.DataFrame(0.0,index=df1.index,columns=['PER'])
df3
# append df1 and df3
# each has three rows, so 6 rows is the result
# df1 had no PER column, so NaN for those rows
# df3 had no BookValue, Price or Sector, so NaN values
df1.append(df3)
# ignore index labels, create default index
df1.append(df3,ignore_index=True)
# copy the first 3 rows of sp500
df1 = sp500.iloc[0:3].copy()
# copy 10th and 11th rows
df2 = sp500.iloc[[10,11,2]]
## pass them as a list
pd.concat([df1,df2])
# copy df2
df2_2 = df2.copy()
# add column to df2_2 that is not in df1
df2_2.insert(3,'Foo',pd.Series(0,index=df2.index))
df2_2
# now concatenate
pd.concat([df1,df2_2])
# specify keys
r = pd.concat([df1,df2_2],keys=['df1','df2'])
r
# first three rows, columns 0 and 1
# causing error => df3 = sp500[:3][[0,1]]
df3 = sp500[:3][['Price','Sector']]
df3
df4 = sp500[:3][['BookValue']]
df4
# put them back together
pd.concat([df3,df4],axis=1)
# make a copy of df4
df4_2 = df4.copy()
# add a column to df4_2, that is also in df3
df4_2.insert(1,'Sector',pd.Series(1,index=df4_2.index))
df4_2
# demonstrate duplicate columns
pd.concat([df3,df4_2],axis=1)
# first three rows and first two columns
df5 = sp500[:3][['Sector','Price']]
df5
# row 2 through 4 and first tow columns
df6 = sp500[2:5][['Sector','Price']]
df6
# inner join on index labels will return in only one row
pd.concat([df5,df6],join='inner',axis=1)
# get a email subset of the sp500
# make sure to copy the slice to make a copy
ss = sp500[:3].copy()
# create a new row with index lable FOO
# and assign some values to the columns via a list
ss.loc['FOO'] = ['the sector',100,110]
ss
# copy of the subset/ slice
ss = sp500[:3].copy()
# add the new column initialized to 0
ss.loc[:,'PER'] = 0
ss
# get a copy of the first 5 rows of sp500
ss = sp500[:5].copy()
ss
# drop rows with labels ABT and ACN
afterdrop = ss.drop(['ABT','ACN'])
afterdrop
# note that ss is not modified
ss
# determine the rows where Price > 300
selection = sp500.Price > 300
# to make the output shorter, report the # of row returned (500),
# and the sum of those where Price > 300 (which is 10)
"{0} {1}".format(len(selection),selection.sum())
# select the complement
withPriceLessThan300 = sp500[~selection]
withPriceLessThan300
# get only the first 3 rows
onlyfirstthree = sp500[:3]
onlyfirstthree
# first three but a copy of them
# get only the first 3 rows
onlyfirstthree = sp500[:3].copy()
onlyfirstthree
# get a subset / copy of the data
subset = sp500[:3].copy()
subset
# change scalar by label on row and column
subset.ix['MMM','Price'] = 0
subset
# subset of first three rows
subset = sp500[:3].copy()
# get the location of the Price column
price_loc = sp500.columns.get_loc('Price')
# get the location of the MMM row
abt_row_loc = sp500.index.get_loc('ABT')
# change the price
subset.iloc[abt_row_loc,price_loc] = 1000
subset
# set the seed to allow replicatable results
np.random.seed(123456)
# create the data frame
df = pd.DataFrame(np.random.randn(5,4), columns=['A','B','C','D'])
df
# multiply everything by 2
df * 2
# get first row
s = df.iloc[0]
# subtract first row from every row of the dataframe
diff = df - s
diff
# subtract dataframe from series
diff = s - df
diff
# B, C
s2 = s[1:3]
# add E
s2['E'] = 0
# see how alignment is applied in math
df + s2
# get rows 1 through three and only B,C columns
subframe = df[1:4][['B','C']]
# we have extracted a little square in the middle of the df
subframe
# demonstrate the alignment of the subtraction
df - subframe
# get the A column
a_col = df['A']
df.sub(a_col, axis=0)
# reset the index, moving it into a column
reset_sp500 = sp500.reset_index()
reset_sp500
reset_sp500.set_index('Symbol')
# get first four rows
subset = sp500[:4].copy()
subset
# reindex to have MMM,ABBV and FOO index labels
reindexed = subset.reindex(index=['MMM','ABBV','FOO'])
# not that ABT and ACN are dropped and FOO has NaN values
reindexed
# reindex columns
subset.reindex(columns=['Price','BookValue','New_Column'])
# first push symbol into a column
reindexed = sp500.reset_index()
# and now index sp500 by sector and symbol
multi_fi = reindexed.set_index(['Sector','Symbol'])
multi_fi
# the index is a multiindex
# examine the index
type(multi_fi.index)
# examine the index
multi_fi.index
# this has 2 levels
len(multi_fi.index.levels)
# each index level is an index
multi_fi.index.levels[0]
# values of the index level 0
multi_fi.index.get_level_values(0)
# get all the stocks that are industrials
# note the result drops level 0 of the index
multi_fi.xs('Industrials')
# select rows where level 1 is ALLE
# note that the sector level is dropped from the result
multi_fi.xs('ALLE', level=1)
# Industrials, without dropping the level
multi_fi.xs('Industrials', drop_level=False)
# drill through the levels
multi_fi.xs('Industrials').xs('UPS')
# drill through using tuples
multi_fi.xs(('Industrials','UPS'))
# calc the mean of the values in each column
one_mon_hist.mean()
# calc the mean of the values in each row
one_mon_hist.mean(axis=1)
# calc the variance of the values in each column
one_mon_hist.var()
# calc the median of the values in each column
one_mon_hist.median()
# location of min price for both stocks
one_mon_hist[['MSFT','AAPL']].min()
# and location of the max
one_mon_hist[['MSFT','AAPL']].max()
# location of the min price for both stocks
one_mon_hist[['MSFT','AAPL']].idxmin()
one_mon_hist[['MSFT','AAPL']].idxmax()
# find the mode of the Series
s = pd.Series([1,2,3,3,5])
s.mode()
# there can be more than one mode
s = pd.Series([1,2,3,3,5,1])
s.mode()
# calculate a cumulative product
pd.Series([1,2,3,4]).cumprod()
# calculate a cumulative sum
pd.Series([1,2,3,4]).cumsum()
# summary statistics
one_mon_hist.describe()
# get summary statistics on non-numeric data
s = pd.Series(['a','a','b','c',np.NaN])
s.describe()
# get summary stats of non-numeric data
s.count()
# return a list of unique items
s.unique()
# number of occurences of each unique value
s.value_counts()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A DataFrame also performs automatic alignment of the data for each Series passed in by a dictionary. For example, the following code adds a third column in the DataFrame initialisation.
Step2: In the above example, first two Series did not have an index specified so they were indexed with default labels 0..4. The third Series has index values, and therefore the values for those indexes are placed in DataFrame in the row with the matching index from the previous columns.
Step3: Selecting Columns of a DataFrame
Step4: Selecting rows and values of a DataFrame using the index
Step5: Selecting rows by index label and/or location
Step6: Scalar lookup by label or location using .at[] and iat[]
Step7: Selecting rows of a DataFrame by Boolean selection
Step8: Modifying the structure and content of DataFrame
Step9: Adding and Inserting Columns
Step10: It is important to remember that this is not simply inserting a column into the DataFrame. The alignment process used here is performing a left join of the DataFrame and the Series by their index labels and then creating the column and populating data in the appropriate cell in the DataFrame.
Step11: Replacing the contents of a column
Step12: Deleting Columns in a DataFrame
Step13: Using pop
Step14: Using drop
Step15: Adding rows to a DataFrame
Step16: The set of columns of the DataFrame objects being appended do not need to be the same. The resulting DataFrame will consist of the union of the columns in both and where either did not have a columns, NaN will be used as the value.
Step17: To append without forcing the index to be taken from either DataFrame, you can use the ignore_index=True parameter.
Step18: Concatenating DataFrame objects with pd.concat()
Step19: Actually, pandas calculates the sorted union of distinct column names across all supplied objects and uses those as the columns and then appends data along the rows for each object in the order given in the list.
Step20: Using the keys parameter, it is possible to differentiate the pandas objects from which the rows originated. The following code adds a level to the index which represents the source object.
Step21: We can change the axis of the concatenation to work along the columns by specifying axis = 1, which will calculate the sorted union of the distinct index labels from the rows and then append columns and their data from the specified objects.
Step22: We can further examing this operation by adding a column to the second DataFrame that has a duplicate name to a column in the first. The result will have duplicate columns, as they are blindly appended.
Step23: pandas is performing an outer join along the labels of the specified axis. An inner join can be specified using the join='inner' parameter.
Step24: Adding rows (and columns) via setting with enlargement
Step25: Note that the change is made in-place. If FOO already exists as an index label, then the column data would be replaced. This is one of the means of updating data in a DataFrame in-place as .loc not only retrieves row(s), but also lets you modify the results that are returned.
Step26: Removing rows from a DataFrame
Step27: Removing rows using Boolean selection
Step28: Removing rows using a slice
Step29: Changing scalar values in a DataFrame
Step30: .loc may suffer from lower performance as compared to .iloc due to the possibility of needing to map the label values into locations.
Step31: Arithmetic on a DataFrame
Step32: When performing an operation between a DataFrame and a Series, pandax will align the Series index along the DataFrame columns performing what is referred to as a row-wise broadcast.
Step33: The set of columns returned will be the union of the labels in the index of both the series and columns index of the DataFrame object. If a label representing the result column is not found in either the Series of the DataFrame object, then the values will be NaN filled.
Step34: Resetting and Reindexing
Step35: One or more columns can also be moved into the inext. We can use the set_index() method for this.
Step36: Reindexing can also be done upon the columns.
Step37: Hierarchical Indexing
Step38: Values of the index, at a specific level for every row, can be retrieved by the .get_level_values() method
Step39: Summarized data and descriptive statistics
Step40: Some pandas statistical methods are referred to as indirect statistics, for example, .idxmin() and .idxmax() return the index location where the minimum and maximum values exist respectively.
Step41: Accumulations in pandas are statistical methods that determine a value by continuously applying the next value in a Series to the current result.
Step42: Non-numerical will result in a slightly different set of summary statistics.
|
14,916 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import interact
plt.style.use('classic')
def p(x,a):
if abs(x)<a:
return 1.
else:
return 0.
pulso = np.vectorize(p) #vectorizando la función pulso
x = np.linspace(-10,10,1000)
k = np.linspace(-10,10,1000)
def p(a=1):
plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
#fig,ej=subplots(1,2,figsize=(14,5))
plt.plot(x,pulso(x,a), lw = 2)
plt.xlim(-10,10)
plt.ylim(-.1,1.1)
plt.grid(True)
plt.xlabel(r'$x$',fontsize=15)
plt.ylabel(r'$f(x)$',fontsize=15)
plt.subplot(1,2,2)
plt.plot(k,2*(np.sin(k*a)/k), lw = 2)
plt.xlim(-10,10)
plt.grid(True)
plt.xlabel('$k$',fontsize=15)
plt.ylabel('$\\tilde{f}(k)$',fontsize=15)
#p(5)
#plt.savefig('fig-transformada-Fourier-pulso-cuadrado.pdf')
interact(p, a=(1,10))
def gaussina(alpha=1):
plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
plt.plot(x,np.exp(-alpha*x**2), lw=2)
plt.xlim(-3,3)
plt.grid(True)
plt.xlabel('$x$',fontsize=15)
plt.ylabel('$f(x)$',fontsize=15)
plt.subplot(1,2,2)
plt.plot(k,np.sqrt(np.pi/alpha)*np.exp(-k**2/(4.*alpha)), lw=2)
plt.xlim(-10,10)
plt.ylim(0,2)
plt.grid(True)
plt.xlabel('$k$',fontsize=15)
plt.ylabel('$\\tilde{f}(k)$',fontsize=15)
interact(gaussina, alpha=(1,50))
#gaussina(5)
#plt.savefig('fig-transformada-Fourier-gaussiana.pdf')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pulso cuadrado
Step2: Definimos 1000 puntos en el intervalo $[-\pi,\pi]$
Step3: Función Guassiana
|
14,917 | <ASSISTANT_TASK:>
Python Code:
import re
from gensim import models
from scipy import spatial
import numpy as np
import os.path
import urllib
import gzip
import json
import pandas as pd
def search_tags(entity, search):
This function searches through all the 'tags' (semantic content) of a data set
and returns 'true' if the search expression is found. case insensitive.
all_tags = '; '.join([str(x) for x in entity['tags'].values()])
return bool(re.search(search, all_tags, flags=re.IGNORECASE))
def gunzipFile(inFileName, outFileName):
inF = gzip.open(inFileName, 'rb')
outF = open(outFileName, 'wb')
outF.write( inF.read() )
inF.close()
outF.close()
# the idea for this code comes from this blog post:
# http://sujitpal.blogspot.nl/2015/09/sentence-similarity-using-word2vec-and.html
def sentenceDistance(sent1, sent2, stoplist):
# remove all non-alphanumeric characters
sent1 = re.sub('[^0-9a-zA-Z]+', ' ', sent1)
sent2 = re.sub('[^0-9a-zA-Z]+', ' ', sent2)
# split up the sentences into tokens, convert to lower case, and remove stopwords
tokens1 = [word for word in sent1.lower().split() if word not in stoplist]
tokens2 = [word for word in sent2.lower().split() if word not in stoplist]
# get unique tokens
tokens1 = list(set(tokens1))
tokens2 = list(set(tokens2))
# Need to get the shortest distances from all words in sent1 to a word in sent2
# If there are matching words, then the distance is 0
# If a synonym was found, then the distance should be small
# The sum of these shortest distances for all words in sent1 is then returned as totalDist
totalDist = 9999
for token1 in tokens1:
if model.vocab.has_key(token1):
minDist = 9999
for token2 in tokens2:
if model.vocab.has_key(token2):
lv = model[token1]
rv = model[token2]
dist = spatial.distance.cosine(lv, rv)
# instead of cosine distance can also try euclidean distance
#dist = spatial.distance.euclidean(lv, rv)
if dist < minDist:
minDist = dist
if minDist < 9999:
if totalDist == 9999:
totalDist = minDist
else:
totalDist = totalDist + minDist
return(totalDist)
stopWordsFile = "en.txt"
with open(stopWordsFile) as f:
stoplist = [x.strip('\n') for x in f.readlines()]
if os.path.isfile("GoogleNews-vectors-negative300.bin.gz") == False:
# This is the direct download link for GoogleNews-vectors-negative300.bin.gz
# If the link changes, just search for the filename as this is a file often used for word2vec
downloadURL = 'https://doc-0g-8s-docs.googleusercontent.com/docs/securesc/ha0ro937gcuc7l7deffksulhg5h7mbp1/dhu4deogg9hg0tkm9tdann504ue0vp91/1461232800000/06848720943842814915/*/0B7XkCwpI5KDYNlNUTTlSS21pQmM?e=download'
urllib.urlretrieve (downloadURL, "GoogleNews-vectors-negative300.bin.gz")
if os.path.isfile("GoogleNews-vectors-negative300.bin") == False:
gunzipFile('GoogleNews-vectors-negative300.bin.gz', 'GoogleNews-vectors-negative300.bin')
model = models.Word2Vec.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
# http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-ones-from-json-in-python
# need this to deal with unicode errors
def byteify(input):
if isinstance(input, dict):
return {byteify(key): byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
gunzipFile('../catalogs/gabi_2016_professional-database-2016.json.gz',
'../catalogs/gabi_2016_professional-database-2016.json')
gunzipFile('../catalogs/uslci_ecospold.json.gz',
'../catalogs/uslci_ecospold.json')
with open('../catalogs/gabi_2016_professional-database-2016.json') as data_file:
gabi = json.load(data_file, encoding='utf-8')
with open('../catalogs/uslci_ecospold.json') as data_file:
uslci = json.load(data_file, encoding='utf-8')
gabi = byteify(gabi)
uslci = byteify(uslci)
roundwood = [flow for flow in uslci['flows'] if search_tags(flow,'roundwood, softwood')]
roundwoodExample = roundwood[0]
# number of top scores to show
numTopScores = 10
flowNames = []
distValues = []
for flow in gabi['archives'][0]['flows']:
name = flow['tags']['Name']
flowNames.append(name)
dist = sentenceDistance(roundwoodExample['tags']['Name'], name, stoplist)
distValues.append(dist)
len(flowNames)
# figure out top scores
arr = np.array(distValues)
topIndices = arr.argsort()[0:numTopScores]
topScores = np.array(distValues)[topIndices]
print 'Process name to match:'
print roundwoodExample['tags']['Name']
print 'Matches using Word2Vec:'
for i, s in zip(topIndices, topScores):
if s < 9999:
print(flowNames[i],s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Load in the stopwords file. These are common words which we wish to exclude when performing comparisons (a, an, the, etc). Every word is separated by a new line.
Step3: We need to check if we have the word2vec model which has been pre-trained on the Google News corpus. The vectors are 300 dimentions and this is generated with a training set involving over 100 billion words
Step4: Unzip the file. This may take a few several minutes due to the python gzip library. It may be quicker to just do this from the command line or do a system call.
Step5: Create a model using this pre-trained data set
Step6: Load in the data from the catalog
|
14,918 | <ASSISTANT_TASK:>
Python Code:
import math
import numpy as np
import scipy
class AnomalyDetection():
def __init__(self, multi_variate=False):
# if multi_variate is True, we will use multivariate Gaussian distribution
# to estimate the probabilities
self.multi_variate = multi_variate
self.mu = None
self.sigma2 = None
self.best_epsilon = 0
self.best_f1_score = 0
def _fit_gaussian(self, X_train):
# fit the parameters of the Gaussian Distribution
# if not using the multivariate Gaussian Distribution, we will estimate
# mu and sigma for each single feature distribution separately
if self.multi_variate is False:
self.mu = np.mean(X_train, axis=0)
self.sigma2 = np.var(X_train, axis=0)
# if using the multivariate Gaussian Distribution, we estimate the vector
# of mu and variance/covariance matrix of sigma
else:
m = X_train.shape[0]
self.mu = np.mean(X_train, axis=0)
self.sigma2 = 1.0 / m * (X_train - self.mu).T.dot(X_train - self.mu)
def _prob_calc(self, X):
# helper function to calculate the probability of each instance
# in the cross-validation set
if self.multi_variate is False:
p = np.prod(np.exp(-(X - self.mu) ** 2 / (2.0 * self.sigma2))
/ np.sqrt(2.0 * math.pi * self.sigma2), axis=1)
else:
n = X.shape[1]
p = 1.0 / ((2 * math.pi) ** (n / 2.0) * (np.linalg.det(self.sigma2) ** 0.5)) \
* np.diag(np.exp(-0.5 * ((X - self.mu).dot(np.linalg.inv(self.sigma2))) \
.dot((X - self.mu).T)))
return p
def _fit_epsilon(self, X_val, y_val):
# this is the second step of model fitting
# the input is the cross-validation set
# the output is the threshold that will maximizes the f1-score
# of the positive class (anomalies) in the CV set
p_val = self._prob_calc(X_val)
p_min = np.array(p_val).min()
p_max =np.array(p_val).max()
step = (p_max - p_min) / 100.0
for epsilon in np.arange(p_min, p_max + step, step):
y_predict = (p_val < epsilon).reshape((len(y_val), 1))
TP = np.sum([1 if y_predict[i] == 1 and y_val[i] == 1 else 0 \
for i in range(len(y_val))])
PP = np.sum((y_predict == 1))
AP = np.sum((y_val == 1))
if PP == 0 or AP == 0:
continue
precision = float(TP) / PP
recall = float(TP) / AP
f1_score = 2.0 * precision * recall / (precision + recall)
if f1_score > self.best_f1_score:
self.best_f1_score = f1_score
self.best_epsilon = epsilon
def fit(self, X_train, X_val, y_val):
# fit the anomaly detection model
# step 1 - fit mu and sigma based on the training set (all 0s)
# step 2 - fit epsilon based on validation set (0s and 1s)
self._fit_gaussian(X_train)
self._fit_epsilon(X_val, y_val)
def predict(self, X_test):
# predict using fitted model
p_test = self._prob_calc(X_test)
y_test = (p_test < self.best_epsilon).astype(int)
return y_test
from sklearn.datasets import load_iris
iris = load_iris()
X = iris['data']
y = iris['target']
print X.shape
print y.shape
print "Number of Classes 0: {}".format(np.sum(y == 0))
print "Number of Classes 1: {}".format(np.sum(y == 1))
print "Number of Classes 2: {}".format(np.sum(y == 2))
y_new = y
y_new[y == 0] = 0
y_new[y == 1] = 0
y_new[y == 2] = 1
print "Number of Classes 0: {}".format(np.sum(y_new == 0))
print "Number of Classes 1: {}".format(np.sum(y_new == 1))
X_normal = X[y_new == 0]
y_normal = y_new[y_new == 0]
X_abnormal = X[y_new == 1]
y_abnormal = y_new[y_new == 1]
from sklearn.cross_validation import train_test_split
X_normal_train_val, X_normal_test, y_normal_train_val, y_normal_test = \
train_test_split(X_normal, y_normal, test_size=0.2, random_state=26)
X_normal_train, X_normal_val, y_normal_train, y_normal_val = \
train_test_split(X_normal_train_val, y_normal_train_val, test_size=0.25, random_state=26)
X_abnormal_val, X_abnormal_test, y_abnormal_val, y_abnormal_test = \
train_test_split(X_abnormal, y_abnormal, test_size=0.5, random_state=26)
X_train = X_normal_train
y_train = y_normal_train
X_val = np.r_[X_normal_val, X_abnormal_val]
y_val = np.r_[y_normal_val, y_abnormal_val]
X_test = np.r_[X_normal_test, X_abnormal_test]
y_test = np.r_[y_normal_test, y_abnormal_test]
print X_train.shape
print X_val.shape
print X_test.shape
ad = AnomalyDetection(multi_variate=False)
ad.fit(X_train, X_val, y_val)
y_predict = ad.predict(X_test)
print "True Values: {}".format(y_test)
print "Predicted Values: {}".format(y_predict)
print "Prediction Accuracy: {:.2%}".format(np.mean((y_predict == y_test).astype(float)))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's load the Iris Dataset for a demo.
Step2: Let's just assume class 2 as the anomaly for test purposes.
Step3: Now we have 100 normal examples, and 50 abnormal cases.
Step4: Then we can split the dataset into train (with all normal examples), validation (normal & abnormal examples), and test (the rest of normal & abnormal examples).
Step5: Finally, we can start an AnmalyDetection() object, use train and validation data to fit the Gaussian Distribution paramters and find the proper value for $\epsilon$. With the fitted model, we can predict the anomaly cases on the test dataset and check the performance.
|
14,919 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import graphlab
sales = graphlab.SFrame.read_csv('Philadelphia_Crime_Rate_noNA.csv/')
sales
graphlab.canvas.set_target('ipynb')
sales.show(view="Scatter Plot", x="CrimeRate", y="HousePrice")
crime_model = graphlab.linear_regression.create(sales, target='HousePrice', features=['CrimeRate'],validation_set=None,verbose=False)
import matplotlib.pyplot as plt
plt.plot(sales['CrimeRate'],sales['HousePrice'],'.',
sales['CrimeRate'],crime_model.predict(sales),'-')
sales_noCC = sales[sales['MilesPhila'] != 0.0]
sales_noCC.show(view="Scatter Plot", x="CrimeRate", y="HousePrice")
crime_model_noCC = graphlab.linear_regression.create(sales_noCC, target='HousePrice', features=['CrimeRate'],validation_set=None, verbose=False)
plt.plot(sales_noCC['CrimeRate'],sales_noCC['HousePrice'],'.',
sales_noCC['CrimeRate'],crime_model.predict(sales_noCC),'-')
crime_model.get('coefficients')
crime_model_noCC.get('coefficients')
sales_nohighend = sales_noCC[sales_noCC['HousePrice'] < 350000]
crime_model_nohighend = graphlab.linear_regression.create(sales_nohighend, target='HousePrice', features=['CrimeRate'],validation_set=None, verbose=False)
crime_model_noCC.get('coefficients')
crime_model_nohighend.get('coefficients')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load some house value vs. crime rate data
Step2: Exploring the data
Step3: Fit the regression model using crime as the feature
Step4: Let's see what our fit looks like
Step5: Above
Step6: Refit our simple regression model on this modified dataset
Step7: Look at the fit
Step8: Compare coefficients for full-data fit versus no-Center-City fit
Step9: Above
Step10: Do the coefficients change much?
|
14,920 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import numpy as np
x = np.linspace(0, 10, 30)
y = np.sin(x)
plt.plot(x, y, 'o', color='black');
rng = np.random.RandomState(0)
for marker in ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's', 'd']:
plt.plot(rng.rand(5), rng.rand(5), marker,
label="marker='{0}'".format(marker))
plt.legend(numpoints=1)
plt.xlim(0, 1.8);
plt.plot(x, y, '-ok');
plt.plot(x, y, '-p', color='gray',
markersize=15, linewidth=4,
markerfacecolor='white',
markeredgecolor='gray',
markeredgewidth=2)
plt.ylim(-1.2, 1.2);
plt.scatter(x, y, marker='o');
rng = np.random.RandomState(0)
x = rng.randn(100)
y = rng.randn(100)
colors = rng.rand(100)
sizes = 1000 * rng.rand(100)
plt.scatter(x, y, c=colors, s=sizes, alpha=0.3,
cmap='viridis')
plt.colorbar(); # show color scale
from sklearn.datasets import load_iris
iris = load_iris()
features = iris.data.T
plt.scatter(features[0], features[1], alpha=0.2,
s=100*features[3], c=iris.target, cmap='viridis')
plt.xlabel(iris.feature_names[0])
plt.ylabel(iris.feature_names[1]);
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Scatter Plots with plt.plot
Step2: The third argument in the function call is a character that represents the type of symbol used for the plotting. Just as you can specify options such as '-', '--' to control the line style, the marker style has its own set of short string codes. The full list of available symbols can be seen in the documentation of plt.plot, or in Matplotlib's online documentation. Most of the possibilities are fairly intuitive, and we'll show a number of the more common ones here
Step3: For even more possibilities, these character codes can be used together with line and color codes to plot points along with a line connecting them
Step4: Additional keyword arguments to plt.plot specify a wide range of properties of the lines and markers
Step5: This type of flexibility in the plt.plot function allows for a wide variety of possible visualization options.
Step6: The primary difference of plt.scatter from plt.plot is that it can be used to create scatter plots where the properties of each individual point (size, face color, edge color, etc.) can be individually controlled or mapped to data.
Step7: Notice that the color argument is automatically mapped to a color scale (shown here by the colorbar() command), and that the size argument is given in pixels.
|
14,921 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
from getpass import getpass
import pandas as pd
from skdaccess.framework.param_class import *
from skdaccess.geo.era_interim.cache import DataFetcher as EDF
date_list = pd.date_range('2015-06-06 00:00:00', '2015-06-06 06:00:00', freq='6H')
username='Enter username'
password = getpass()
edf = EDF(date_list=date_list, data_names=['Geopotential','Temperature'],
username=username, password=password)
edw = edf.output()
iterator = edw.getIterator()
geo_label, geo_data = next(iterator)
temp_label, temp_data = next(iterator)
plt.figure(figsize=(5,3.75));
plt.plot(temp_data[0,:,75,350], temp_data['pressure']);
plt.gca().invert_yaxis();
plt.ylabel('Pressure');
plt.xlabel('Temperature');
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Specify list of dates
Step2: Enter Research Data Archive (NCAR) credentials
Step3: Create data fetcher
Step4: Access data
Step5: Plot temperature data
|
14,922 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import geopandas as gpd
df = gpd.read_file("communes-20181110.shp")
!head test.csv
# https://gis.stackexchange.com/questions/114066/handling-kml-csv-with-geopandas-drivererror-unsupported-driver-ucsv
df_tracks = pd.read_csv("test.csv", skiprows=3)
df_tracks.head()
df_tracks.columns
from shapely.geometry import Point
df_tracks['geometry'] = df_tracks.apply(lambda pt: Point(pt["Longitude (deg)"], pt["Latitude (deg)"]), axis=1)
df_tracks = gpd.GeoDataFrame(df_tracks)
# https://stackoverflow.com/questions/38961816/geopandas-set-crs-on-points
df_tracks.crs = {'init' :'epsg:4326'}
df_tracks.head()
df_tracks.plot()
communes_list = [
"78160", # Chevreuse
"78575", # Saint-Rémy-lès-Chevreuse
]
df = df.loc[df.insee.isin(communes_list)]
df
ax = df_tracks.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
ax = df.plot(ax=ax, alpha=0.5, edgecolor='k')
#df.plot(ax=ax)
df_tracks_wm = df_tracks.to_crs(epsg=3857)
df_wm = df.to_crs(epsg=3857)
df_tracks_wm
ax = df_tracks_wm.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
import contextily as ctx
def add_basemap(ax, zoom, url='http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'):
xmin, xmax, ymin, ymax = ax.axis()
basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, url=url)
ax.imshow(basemap, extent=extent, interpolation='bilinear')
# restore original x/y limits
ax.axis((xmin, xmax, ymin, ymax))
ax = df_tracks_wm.plot(figsize=(16, 16), alpha=0.5, edgecolor='k')
ax = df_wm.plot(ax=ax, alpha=0.5, edgecolor='k')
#add_basemap(ax, zoom=13, url=ctx.sources.ST_TONER_LITE)
add_basemap(ax, zoom=14)
ax.set_axis_off()
import fiona
fiona.supported_drivers
!rm tracks.geojson
df_tracks.to_file("tracks.geojson", driver="GeoJSON")
!ls -lh tracks.geojson
df = gpd.read_file("tracks.geojson")
df
ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TODO
Step2: Convert the data to Web Mercator
Step3: Contextily helper function
Step4: Add background tiles to plot
Step5: Save selected departments into a GeoJSON file
|
14,923 | <ASSISTANT_TASK:>
Python Code:
import os
import json
from nltk.corpus import gutenberg
import corpushash as ch
import base64
import hashlib
import random
import nltk
#nltk.download('gutenberg') # comment (uncomment) if you have (don't have) the data
gutenberg.fileids()
base_path = os.getcwd()
base_path
corpus_path = os.path.join(base_path, 'guten_test')
corpus_path
excerpt = gutenberg.raw('austen-emma.txt')[50:478]
print(excerpt)
print(ch.text_split(excerpt))
%%time
guten_list = []
for document_name in gutenberg.fileids():
document = gutenberg.raw(document_name)
split_document = ch.text_split(document)
guten_list.append(split_document)
document = random.choice(guten_list)
print(document[:10])
%time hashed_guten = ch.CorpusHash(guten_list, corpus_path)
entries = random.sample(list(hashed_guten.encode_dictionary.keys()), k=5)
for entry in entries:
print("token >> {:^20} | hashed_token >> '{}'".format(entry, hashed_guten.encode_dictionary[entry]))
entries = random.sample(list(hashed_guten.decode_dictionary.keys()), k=5)
for entry in entries:
print("hashed_token >> '{}' | (token >> '{}', salt >> '{}'".format(entry, hashed_guten.decode_dictionary[entry][0], hashed_guten.decode_dictionary[entry][1][:4])) # cutting off some bytes for aesthetic reasons
print(excerpt)
for element in ch.walk_nested_list(ch.text_split(excerpt)):
print(element)
limit = 10 # showing first ten entries
document = random.randint(0, len(gutenberg.fileids()))
print('document {} corresponds to {}.'.format(document, gutenberg.fileids()[document]))
document_path = os.path.join(hashed_guten.public_path, "{}.json".format(document))
with open(document_path, mode="rt") as fp:
encoded_document = json.load(fp)
print("original token >> encoded token")
for ix, tokens in enumerate(zip(ch.walk_nested_list(encoded_document), ch.walk_nested_list(encoded_document))):
print("'{}' >> '{}'".format(tokens[0], tokens[1]))
if ix > limit:
break
for document in hashed_guten.read_hashed_corpus():
print(document[0])
break
hashed_guten.public_path
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: we'll use the gutenberg corpus as test data, which is available through the nltk library.
Step2: files in test data
Step3: creating test corpus path, where hashed documents will be stored as .json files
Step4: function to split text into nested list
Step5: every paragraph and sentence is its own list
Step6: Input
Step7: excerpt
Step8: processing using corpushash
Step9: Output
Step10: Decode dictionary
Step11: hashed .json files
Step12: we can use this function to see what hashcodecs has done to the corpus.
Step13: note
Step14: or using corpushash's read_hashed_corpus method, which yields the corpus' documents in order
Step15: alternatively, one can check the corpus_path directory and read the output files using one's favorite text editor.
|
14,924 | <ASSISTANT_TASK:>
Python Code:
from math import sin, cos
def func(x):
y = x
for i in range(30):
y = sin(x + y)
return y
from sympy import diff, Symbol, sin
from __future__ import print_function
x = Symbol('x')
dexp = diff(func(x), x)
print(dexp)
xpt = 0.1
dfdx = dexp.subs(x, xpt)
print('dfdx =', dfdx)
from algopy import UTPM, sin
x_algopy = UTPM.init_jacobian(xpt)
y_algopy = func(x_algopy)
dfdx = UTPM.extract_jacobian(y_algopy)
print('dfdx =', dfdx)
def funcad(x):
xd = 1.0
yd = xd
y = x
for i in range(30):
yd = (xd + yd)*cos(x + y)
y = sin(x + y)
return yd
dfdx = funcad(xpt)
print('dfdx =', dfdx)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can compute a derivative symbolically, but it is of course horrendous (see below). Think of how much worse it would be if we chose a function with products, more dimensions, or iterated more than 20 times.
Step2: We can now evaluate the expression.
Step3: Let's compare with automatic differentiation using operator overloading
Step4: Let's also compare to AD using a source code transformation method (I used Tapenade in Fortran)
|
14,925 | <ASSISTANT_TASK:>
Python Code:
import os
import sys
sys.path.append(os.path.pardir)
%matplotlib inline
import numpy as np
from fa_kit import FactorAnalysis
from fa_kit import plotting as fa_plotting
def make_random_data(n_samp=10000, n_feat=100):
make some random data with correlated features
data = np.random.randn(n_samp, n_feat)
signal_width = 10
signal_overlap = 2
step_size = signal_width - signal_overlap
for i in range(0, data.shape[1], step_size):
shared_signal = 0.3*np.random.randn(n_samp, 1)
data[:, i:(i+signal_width)] += shared_signal
return data
data = make_random_data()
def run_pipeline(data, retain_method='broken_stick',
rotation_method='varimax', **kwargs):
# Set up the factor analysis object, indiate how to calculate the
# correlation matrix out of this input data.
fa = FactorAnalysis.load_data_samples(
data,
preproc_demean=True,
preproc_scale=True
)
# Extract the components
fa.extract_components()
# Calculate how many components to retain
# You can use any of these methods:
# 'top_n', 'top_pct', 'kaiser', 'broken_stick'
fa.find_comps_to_retain(
method=retain_method,
**kwargs
)
# Once you know how many to retain, re-extract with PAF
fa.reextract_using_paf()
# Apply factor rotation
# Right now there are both 'varimax' and 'quartimax'
fa.rotate_components(
method=rotation_method
)
# Plot summary figure
fig_summary = fa_plotting.graph_summary(fa)
return fig_summary
fig_topn = run_pipeline(data, retain_method='top_n', num_keep=5)
fig_toppct = run_pipeline(data, retain_method='top_pct', pct_keep=0.2)
fig_kaiser = run_pipeline(data, retain_method='kaiser')
fig_bs = run_pipeline(data, retain_method='broken_stick', rotation_method='varimax')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Synthesizing fake data
Step3: Setting up a factor analysis pipeline
Step4: Demo
Step5: Demo
Step6: Demo
Step7: Demo
|
14,926 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import statsmodels.formula.api as smf
import pandas as pd
import scipy as sp
%matplotlib notebook
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
import matplotlib.pyplot as plt
# Define true statistics relating x and y
N_points = 10
true_beta0 = 0
true_beta1 = 2
noise_stdev = 1
# Set random seed
np.random.seed(0)
# Generate correlated data
x = np.random.randn(N_points) + 2
y = true_beta0 + true_beta1*x + np.random.randn(N_points)*noise_stdev
print('x=', x)
print('y=', y)
# Plot x and y
plt.figure(figsize=(4,4))
plt.plot(x, y, 'k.', ms=12)
plt.xlabel('x',size=15)
plt.ylabel('y',size=15)
# Fit line to data
A = np.vstack([x, np.ones(len(x))]).T
m, b = np.linalg.lstsq(A, y)[0]
print('True statistics: y =', true_beta1, '*x +', true_beta0)
print('Estimated stats: y =', m, '*x +', b)
print('R squared (fraction of variance explained) =',np.round(sp.stats.pearsonr(x,y)[0],2))
# Plot fitted line
plt.figure(figsize=(4,4))
plt.plot(x, y, 'k.', ms=12)
plt.plot([0,5], [true_beta1*x+true_beta0 for x in [0,5]], 'k--',label='True correlation')
plt.plot([0,5], [m*x+b for x in [0,5]], 'r--',label='Estimated correlation')
plt.xlabel('x',size=15)
plt.ylabel('y',size=15)
plt.xlim((0,5))
plt.legend(loc='best')
# Simulate data with non-normal distribution of error
np.random.seed(1)
N_points = 100
x = np.random.randn(N_points) + 2
y = true_beta0 + true_beta1*x + np.random.randn(N_points)**2
# Fit line to data
A = np.vstack([x, np.ones(len(x))]).T
m, b = np.linalg.lstsq(A, y)[0]
print('True statistics: y =', true_beta1, '*x +', true_beta0)
print('Estimated stats: y =', m, '*x +', b)
print('R squared (fraction of variance explained) =',np.round(sp.stats.pearsonr(x,y)[0],2))
# Plot fitted line
plt.figure(figsize=(4,4))
plt.plot(x, y, 'k.', ms=8)
plt.plot([0,5], [true_beta1*x+true_beta0 for x in [0,5]], 'k--',label='True correlation')
plt.plot([0,5], [m*x+b for x in [0,5]], 'r--', label='Estimated correlation')
plt.xlabel('x',size=15)
plt.ylabel('y',size=15)
plt.xlim((0,5))
plt.legend(loc='best')
plt.figure(figsize=(8,3))
errors = y - [m*xi+b for xi in x]
hist2 = plt.hist(np.random.randn(100000)*np.std(errors),np.arange(-8,8,.5),color='r', normed=True, alpha=.5,label='normal')
hist = plt.hist(errors,np.arange(-8,8,.5),color='k', normed=True, alpha=.3,label='True error')
plt.legend(loc='best')
plt.xlabel('Estimate error')
plt.ylabel('Probability')
plt.yticks(np.arange(0,1.2,.2),np.arange(0,.6,.1))
# Burrito information
np.random.seed(0)
burrito1_cost = 6 + np.random.randn(50)
burrito1_stars = 3.5 + np.random.randn(50)*.8
burrito_new_cost = 4
burrito_new_stars = np.arange(4,5.1,.1)
# Define cost and stars arrays
c = np.append(np.ones(len(burrito_new_stars))*burrito_new_cost,burrito1_cost)
s = np.append(burrito_new_stars,burrito1_stars)
# Compute correlation
print('Statistics of random data points')
print('R squared (fraction of variance explained) =',np.round(sp.stats.pearsonr(c,s)[0]**2,2))
print('p =',np.round(sp.stats.pearsonr(c,s)[1],3))
print('\nStatistics after adding in 10 non-independent data points')
print('R squared (fraction of variance explained) =',np.round(sp.stats.pearsonr(burrito1_cost, burrito1_stars)[0],2))
print('p =',np.round(sp.stats.pearsonr(burrito1_cost, burrito1_stars)[1],3))
# Fit line to data
A = np.vstack([c, np.ones(len(c))]).T
m, b = np.linalg.lstsq(A, s)[0]
# Plot fitted line
plt.figure(figsize=(4,4))
plt.plot(c, s, 'k.', ms=8)
plt.plot([0,10], [m*x+b for x in [0,10]], 'k--')
plt.xlabel('Burrito cost')
plt.ylabel('Stars')
plt.xlim((0,10))
# Load burrito data into pandas dataframe
url = 'https://docs.google.com/spreadsheet/ccc?key=18HkrklYz1bKpDLeL-kaMrGjAhUM6LeJMIACwEljCgaw&output=csv'
df = pd.read_csv(url)
# Delete unreliable ratings
import pandasql
df.Unreliable = df.Unreliable.map({'x':1,'X':1,1:1})
df.Unreliable = df.Unreliable.fillna(0)
q = SELECT * FROM df WHERE unreliable == 0
df = pandasql.sqldf(q.lower(), locals())
# Rename meat:filling column because statsmodels sucks
df.rename(columns={'Meat:filling': 'Meatratio'}, inplace=True)
# Limit data to main features
df = df[['Location','Burrito','Yelp','Cost','Hunger', 'Volume', 'Tortilla', 'Temp', 'Meat',
'Fillings', 'Meatratio', 'Uniformity', 'Salsa', 'Synergy', 'Wrap', 'overall']]
df.tail()
# Define dimensions of interest
dims = ['Cost', 'Hunger', 'Tortilla', 'Temp', 'Meat',
'Fillings', 'Meatratio', 'Uniformity', 'Salsa', 'Wrap']
# Correlate each dimension to the overall satisfaction rating
results = {}
for d in dims:
model_str = 'overall ~ ' + d
results[d] = smf.ols(model_str, data=df, missing='drop').fit()
print(d,', R2 =',results[d].rsquared, ', p =',np.round(results[d].pvalues[d],4))
plt.plot(df['Fillings'],df['overall'],'k.')
plt.xlabel('Nonmeat filling flavor')
plt.ylabel('overall satisfaction')
model_str = 'overall ~ ' + ' + '.join(dims)
print(model_str)
results_all = smf.ols(model_str, data=df, missing='drop').fit()
print(results_all.summary())
dims = ['Cost','Hunger', 'Tortilla', 'Temp', 'Meat',
'Fillings', 'Meatratio', 'Uniformity', 'Salsa', 'Synergy', 'Wrap']
model_str = 'overall ~ ' + ' + '.join(dims)
results_all = smf.ols(model_str, data=df, missing='drop').fit()
print(results_all.summary())
dfcorr = df[dims].corr()
M = len(dims)
from matplotlib import cm
clim1 = (-1,1)
plt.figure(figsize=(12,10))
cax = plt.pcolor(range(M+1), range(M+1), dfcorr, cmap=cm.bwr)
cbar = plt.colorbar(cax, ticks=(-1,-.5,0,.5,1))
cbar.ax.set_ylabel('Pearson correlation (r)', size=30)
plt.clim(clim1)
cbar.ax.set_yticklabels((-1,-.5,0,.5,1),size=20)
ax = plt.gca()
ax.set_yticks(np.arange(M)+.5)
ax.set_yticklabels(dims,size=25)
ax.set_xticks(np.arange(M)+.5)
ax.set_xticklabels(dims,size=25)
plt.xticks(rotation='vertical')
plt.tight_layout()
plt.xlim((0,M))
plt.ylim((0,M))
y = np.arange(1,2,.01)
x1 = y + np.random.randn(len(y))*.1
x2 = x1 + np.random.randn(len(y))*.3
plt.figure(figsize=(8,4))
plt.subplot(1,2,1)
plt.plot(x1,y,'k.')
plt.ylabel('y')
plt.xlabel('x1')
plt.subplot(1,2,2)
plt.plot(x2,y,'k.')
plt.xlabel('x2')
plt.figure(figsize=(4,4))
plt.plot(x1,x2,'k.')
plt.xlabel('x1')
plt.ylabel('x2')
print('Correlation coefficient between x1 and y: ', np.round(sp.stats.pearsonr(x1,y)[0],3))
print('Correlation coefficient between x2 and y: ', np.round(sp.stats.pearsonr(x2,y)[0],3))
print('Correlation coefficient between x1 and x2: ', np.round(sp.stats.pearsonr(x1,x2)[0],3))
# Regress out features
def regress_out(x, y):
Regress x out of y to get a new y value
A = np.vstack([x, np.ones(len(x))]).T
m, b = np.linalg.lstsq(A, y)[0]
return y - b - x*m
x2b = regress_out(x1, x2)
# Visualize relationships with x2 after regressing out x1
plt.figure(figsize=(4,7))
plt.subplot(2,1,1)
plt.plot(x2b,x1,'k.')
plt.ylabel('x1')
plt.subplot(2,1,2)
plt.plot(x2b,y,'k.')
plt.ylabel('y')
plt.xlabel('x2 after regressing out x1')
print('After regressing out x1 from x2:')
print('Correlation coefficient between x2 and y: ', np.round(sp.stats.pearsonr(x2b,y)[0],3))
print('Correlation coefficient between x1 and x2: ', np.round(sp.stats.pearsonr(x1,x2b)[0],3))
df = pd.DataFrame.from_dict({'x1':x1, 'x2':x2, 'y':y})
results_all = smf.ols('y ~ x1 + x2', data=df).fit()
print(results_all.summary())
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Regular linear regression
Step2: 1b. Outliers and normality of errors
Step3: 1c. Importance of independence of samples
Step5: 2. Multiple linear regression
Step6: 2a. Individual linear regressions between burrito dimensions and overall satisfaction rating (BAD)
Step7: 2b. Multiple linear regression
Step8: Add in 'flavor synergy' to model
Step9: 2c. Correlation matrix
Step10: 3. Regressing out
Step12: 3b. Regress out x1 from x2
Step13: 3c. Multiple linear regression of x1 and x2 to predict y
|
14,927 | <ASSISTANT_TASK:>
Python Code:
y = np.array([203, 58, 210, 202, 198, 158,
165, 201, 157, 131, 166, 160,
186, 125, 218, 146])
x = np.array([495, 173, 479, 504, 510, 416,
393, 442, 317, 311, 400, 337,
423, 334, 533, 344])
plt.scatter( # complete
# complete
# complete
p = np.polyfit( # complete
plt.scatter( # complete
# complete
p_yx = np.polyfit( # complete
plt.scatter( # complete
# complete
print("For y vs. x, then x=50 would predict y={:.2f}".format( # complete
print("For x vs. y, then x=50 would predict y={:.2f}".format( # complete
x = np.array([203, 58, 210, 202, 198, 158,
165, 201, 157, 131, 166, 160,
186, 125, 218, 146])
y = np.array([495, 173, 479, 504, 510, 416,
393, 442, 317, 311, 400, 337,
423, 334, 533, 344])
sigma_y = np.array([21, 15, 27, 14, 30, 16,
14, 25, 52, 16, 34, 31,
42, 26, 16, 22])
plt.errorbar( # complete
Y = # complete
A = # complete
C = # complete
X = # complete
best_fit = # complete
plt.errorbar( # complete
plt.plot( # complete
print("The best-fit value for the slope and intercept are: {:.4f} and {:.4f}".format( # complete
p = np.polyfit( # complete
print("The best-fit value for the slope and intercept are: {:.4f} and {:.4f}".format( # complete
x = np.array([201, 201, 287, 166, 58, 157, 146, 218, 203, 186, 160, 47, 210,
131, 202, 125, 158, 198, 165, 244])
y = np.array([592, 442, 402, 400, 173, 317, 344, 533, 495, 423, 337, 583, 479,
311, 504, 334, 416, 510, 393, 401])
sigma_y = np.array([61, 25, 15, 34, 15, 52, 22, 16, 21, 42, 31, 38, 27, 16, 14, 26, 16,
30, 14, 25])
Y = # complete
# complete
# complete
plt.errorbar( # complete
plt.plot( # complete
print("The best-fit value for the slope and intercept are: {:.4f} and {:.4f}".format( # complete
Y = # complete
# complete
# complete
plt.errorbar( # complete
plt.plot( # complete
# complete
# complete
# complete
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 1c
Step2: Probelm 2) Fitting a Line to Data
Step3: There is a very good chance, though, again, I am not specifically assuming anything, that for the previous plots that you plotted x along the abscissa and y along the ordinate.
Step4: So we have now uncovered one of the peculiariaties of least-squares. Fitting y vs. x is not the same as fitting x vs. y.
Step5: We are now assuming that x has negligible uncertainties and that y has uncertainties that can be perfectly described by Gaussians of known variance.
Step6: Problem 2f
Step7: Problem 3) Are the Uncertainties Actually Gaussian?
Step8: Problem 3a
Step9: Unlike the data in Problems 1 and 2, there appear to be some significant outliers (of course - this appearance of outliers is entirely dependent upon the assumption of linearity, there may actually be no outliers and a complex relation between x and y). As such, it does not appear (to me) as though the best-fit line provides a good model for the data.
Step10: By eye (a metric that is hardly quantitative, but nevertheless worth developing because talks never provide all of the details), the quadratic fit appears "better" than the linear fit.
Step11: By eye, the results above are not that satisfying. Several of those points do look like outliers, but there are also 2 points being rejected that are well within the other cluster of data.
Step12: By eye, this appears superior to the previous fit. At the same time, we have not actually optimized anything to definitively show that this is the case.
|
14,928 | <ASSISTANT_TASK:>
Python Code:
from IPython.display import YouTubeVideo
YouTubeVideo(id="3sJnTpeFXZ4", width="100%")
from pyprojroot import here
import zipfile
import os
from nams.load_data import datasets
# This block of code checks to make sure that a particular directory is present.
if "divvy_2013" not in os.listdir(datasets):
print('Unzipping the divvy_2013.zip file in the datasets folder.')
with zipfile.ZipFile(datasets / "divvy_2013.zip","r") as zip_ref:
zip_ref.extractall(datasets)
import pandas as pd
stations = pd.read_csv(datasets / 'divvy_2013/Divvy_Stations_2013.csv', parse_dates=['online date'], encoding='utf-8')
stations.head()
stations.describe()
trips = pd.read_csv(datasets / 'divvy_2013/Divvy_Trips_2013.csv',
parse_dates=['starttime', 'stoptime'])
trips.head()
import janitor
trips_summary = (
trips
.groupby(["from_station_id", "to_station_id"])
.count()
.reset_index()
.select_columns(
[
"from_station_id",
"to_station_id",
"trip_id"
]
)
.rename_column("trip_id", "num_trips")
)
trips_summary.head()
import networkx as nx
G = nx.from_pandas_edgelist(
df=trips_summary,
source="from_station_id",
target="to_station_id",
edge_attr=["num_trips"],
create_using=nx.DiGraph
)
print(nx.info(G))
list(G.edges(data=True))[0:5]
list(G.nodes(data=True))[0:5]
stations.head()
for node, metadata in stations.set_index("id").iterrows():
for key, val in metadata.items():
G.nodes[node][key] = val
list(G.nodes(data=True))[0:5]
def filter_graph(G, minimum_num_trips):
Filter the graph such that
only edges that have minimum_num_trips or more
are present.
G_filtered = G.____()
for _, _, _ in G._____(data=____):
if d[___________] < ___:
G_________.___________(_, _)
return G_filtered
from nams.solutions.io import filter_graph
G_filtered = filter_graph(G, 50)
import nxviz as nv
c = nv.geo(G_filtered, node_color_by="dpcapacity")
nx.write_gpickle(G, "/tmp/divvy.pkl")
G_loaded = nx.read_gpickle("/tmp/divvy.pkl")
def test_graph_integrity(G):
Test integrity of raw Divvy graph.
# Your solution here
pass
from nams.solutions.io import test_graph_integrity
test_graph_integrity(G)
from nams.solutions import io
import inspect
print(inspect.getsource(io))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In order to get you familiar with graph ideas,
Step2: Firstly, we need to unzip the dataset
Step3: Now, let's load in both tables.
Step4: Now, let's load in the trips table.
Step5: Graph Model
Step6: Inspect the graph
Step7: You'll notice that the edge metadata have been added correctly
Step8: However, the node metadata is not present
Step9: Annotate node metadata
Step10: The id column gives us the node ID in the graph,
Step11: Now, our node metadata should be populated.
Step13: In nxviz, a GeoPlot object is available
Step14: Visualize using GeoPlot
Step15: Does that look familiar to you? Looks quite a bit like Chicago, I'd say
Step16: And just to show that it can be loaded back into memory
Step18: Exercise
Step19: Other text formats
|
14,929 | <ASSISTANT_TASK:>
Python Code:
import gcp.bigquery as bq
miRNA_BQtable = bq.Table('isb-cgc:tcga_201607_beta.miRNA_Expression')
%bigquery schema --table $miRNA_BQtable
%%sql --module count_unique
DEFINE QUERY q1
SELECT COUNT (DISTINCT $f, 25000) AS n
FROM $t
fieldList = ['ParticipantBarcode', 'SampleBarcode', 'AliquotBarcode']
for aField in fieldList:
field = miRNA_BQtable.schema[aField]
rdf = bq.Query(count_unique.q1,t=miRNA_BQtable,f=field).results().to_dataframe()
print " There are %6d unique values in the field %s. " % ( rdf.iloc[0]['n'], aField)
fieldList = ['mirna_id', 'mirna_accession']
for aField in fieldList:
field = miRNA_BQtable.schema[aField]
rdf = bq.Query(count_unique.q1,t=miRNA_BQtable,f=field).results().to_dataframe()
print " There are %6d unique values in the field %s. " % ( rdf.iloc[0]['n'], aField)
%%sql
SELECT
Platform,
COUNT(*) AS n
FROM
$miRNA_BQtable
GROUP BY
Platform
ORDER BY
n DESC
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From now on, we will refer to this table using this variable ($miRNA_BQtable), but we could just as well explicitly give the table name each time.
Step2: Now let's count up the number of unique patients, samples and aliquots mentioned in this table. We will do this by defining a very simple parameterized query. (Note that when using a variable for the table name in the FROM clause, you should not also use the square brackets that you usually would if you were specifying the table name as a string.)
Step3: These counts show that the mirna_id field is not a unique identifier and should be used in combination with the MIMAT accession number.
|
14,930 | <ASSISTANT_TASK:>
Python Code:
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
from collections import Counter
import numpy as np
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
positive_counts.most_common()
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size)
list(vocab)
import numpy as np
layer_0 = np.zeros((1,vocab_size))
layer_0
from IPython.display import Image
Image(filename='sentiment_network.png')
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
word2index
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
def get_target_for_label(label):
if(label == 'POSITIVE'):
return 1
else:
return 0
labels[0]
get_target_for_label(labels[0])
labels[1]
get_target_for_label(labels[1])
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
# set our random number generator
np.random.seed(1)
self.pre_process_data(reviews, labels)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] += 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
from IPython.display import Image
Image(filename='sentiment_network.png')
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
review_counter = Counter()
for word in reviews[0].split(" "):
review_counter[word] += 1
review_counter.most_common()
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
# set our random number generator
np.random.seed(1)
self.pre_process_data(reviews, labels)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] = 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
Image(filename='sentiment_network_sparse.png')
layer_0 = np.zeros(10)
layer_0
layer_0[4] = 1
layer_0[9] = 1
layer_0
weights_0_1 = np.random.randn(10,5)
layer_0.dot(weights_0_1)
indices = [4,9]
layer_1 = np.zeros(5)
for index in indices:
layer_1 += (weights_0_1[index])
layer_1
Image(filename='sentiment_network_sparse_2.png')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lesson
Step2: Project 1
Step3: Transforming Text into Numbers
Step4: Project 2
Step5: Project 3
Step6: Understanding Neural Noise
Step7: Project 4
Step8: Analyzing Inefficiencies in our Network
|
14,931 | <ASSISTANT_TASK:>
Python Code:
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
! pip3 install --upgrade google-cloud-storage $USER_FLAG
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
! gcloud config set project {PROJECT_ID}
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
DATASET_LOCATION = "gs://cloud-samples-data/vertex-ai/community-content/datasets/MNLI"
TRAIN_FILE = f"{DATASET_LOCATION}/mnli_train.tf_record"
EVAL_FILE = f"{DATASET_LOCATION}/mnli_valid.tf_record"
METADATA_FILE = f"{DATASET_LOCATION}/metadata.json"
# List the files
! gsutil ls {DATASET_LOCATION}
# Examine the metadata
! gsutil cat {METADATA_FILE}
# Create training image directory
! mkdir training_image
%%writefile training_image/Dockerfile
FROM gcr.io/deeplearning-platform-release/tf2-gpu.2-5:m73
WORKDIR /
# Installs Reduction Server NCCL plugin
RUN apt remove -y google-fast-socket \
&& echo "deb https://packages.cloud.google.com/apt google-fast-socket main" | tee /etc/apt/sources.list.d/google-fast-socket.list \
&& curl -s -L https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \
&& apt update && apt install -y google-reduction-server
# Installs Official Models and Text libraries
RUN pip install --use-deprecated=legacy-resolver tf-models-official==2.5.0 tensorflow-text==2.5.0
# Copies the trainer code to the docker image.
COPY trainer /trainer
ENV NCCL_DEBUG=INFO
# Sets up the entry point to invoke the trainer.
ENTRYPOINT ["python", "trainer/train.py"]
CMD ["-c", "print('TF Model Garden')"]
# Create trainer directory
! mkdir training_image/trainer
%%writefile training_image/trainer/train.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TFM common training driver.
import json
import os
from absl import app
from absl import flags
from absl import logging
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.common import registry_imports
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
FLAGS = flags.FLAGS
def _get_model_dir(model_dir):
Defines utility functions for model saving.
In a multi-worker scenario, the chief worker will save to the
desired model directory, while the other workers will save the model to
temporary directories. It’s important that these temporary directories
are unique in order to prevent multiple workers from writing to the same
location. Saving can contain collective ops, so all workers must save and
not just the chief.
def _is_chief(task_type, task_id):
return ((task_type == 'chief' and task_id == 0) or task_type is None)
tf_config = os.getenv('TF_CONFIG')
if tf_config:
tf_config = json.loads(tf_config)
if not _is_chief(tf_config['task']['type'], tf_config['task']['index']):
model_dir = os.path.join(model_dir,
'worker-{}').format(tf_config['task']['index'])
logging.info('Setting model_dir to: %s', model_dir)
return model_dir
def main(_):
model_dir = _get_model_dir(FLAGS.model_dir)
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu,
**params.runtime.model_parallelism())
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
%%writefile training_image/trainer/glue_mnli_matched.yaml
task:
hub_module_url: 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/4'
model:
num_classes: 3
init_checkpoint: ''
metric_type: 'accuracy'
train_data:
drop_remainder: true
global_batch_size: 32
input_path: ''
is_training: true
seq_length: 128
label_type: 'int'
validation_data:
drop_remainder: false
global_batch_size: 32
input_path: ''
is_training: false
seq_length: 128
label_type: 'int'
trainer:
checkpoint_interval: 3000
optimizer_config:
learning_rate:
polynomial:
# 100% of train_steps.
decay_steps: 36813
end_learning_rate: 0.0
initial_learning_rate: 3.0e-05
power: 1.0
type: polynomial
optimizer:
type: adamw
warmup:
polynomial:
power: 1
# ~10% of train_steps.
warmup_steps: 3681
type: polynomial
steps_per_loop: 1000
summary_interval: 1000
# Training data size 392,702 examples, 3 epochs.
train_steps: 36813
validation_interval: 6135
# Eval data size = 9815 examples.
validation_steps: 307
best_checkpoint_export_subdir: 'best_ckpt'
best_checkpoint_eval_metric: 'cls_accuracy'
best_checkpoint_metric_comp: 'higher'
runtime:
distribution_strategy: 'multi_worker_mirrored'
all_reduce_alg: 'nccl'
TRAIN_IMAGE = f"gcr.io/{PROJECT_ID}/mnli_finetuning"
! docker build -t {TRAIN_IMAGE} training_image
! docker push {TRAIN_IMAGE}
REPLICA_COUNT = 2
WORKER_COUNT = REPLICA_COUNT - 1
WORKER_MACHINE_TYPE = "a2-highgpu-4g"
ACCELERATOR_TYPE = "NVIDIA_TESLA_A100"
PER_MACHINE_ACCELERATOR_COUNT = 4
PER_REPLICA_BATCH_SIZE = 32
REDUCTION_SERVER_COUNT = 4
REDUCTION_SERVER_MACHINE_TYPE = "n1-highcpu-16"
import time
PARAMS_OVERRIDE = ",".join(
[
"trainer.train_steps=2000",
"trainer.steps_per_loop=100",
"trainer.summary_interval=100",
"trainer.validation_interval=2000",
"trainer.checkpoint_interval=2000",
"task.train_data.global_batch_size="
+ str(REPLICA_COUNT * PER_REPLICA_BATCH_SIZE * PER_MACHINE_ACCELERATOR_COUNT),
"task.validation_data.global_batch_size="
+ str(REPLICA_COUNT * PER_REPLICA_BATCH_SIZE * PER_MACHINE_ACCELERATOR_COUNT),
"task.train_data.input_path=" + TRAIN_FILE,
"task.validation_data.input_path=" + EVAL_FILE,
"runtime.num_gpus=" + str(PER_MACHINE_ACCELERATOR_COUNT),
]
)
JOB_NAME = "MNLI_{}".format(time.strftime("%Y%m%d_%H%M%S"))
MODEL_DIR = f"{BUCKET_NAME}/{JOB_NAME}/model"
WORKER_ARGS = [
"--experiment=bert/sentence_prediction",
"--mode=train",
"--model_dir=" + MODEL_DIR,
"--config_file=trainer/glue_mnli_matched.yaml",
"--params_override=" + PARAMS_OVERRIDE,
]
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def writetemplate(line, cell):
with open(line, "w") as f:
f.write(cell.format(**globals()))
%%writetemplate config.yaml
trainingInput:
scaleTier: CUSTOM
masterType: {WORKER_MACHINE_TYPE}
masterConfig:
acceleratorConfig:
count: {PER_MACHINE_ACCELERATOR_COUNT}
type: {ACCELERATOR_TYPE}
imageUri: {TRAIN_IMAGE}
workerType: {WORKER_MACHINE_TYPE}
workerConfig:
acceleratorConfig:
count: {PER_MACHINE_ACCELERATOR_COUNT}
type: {ACCELERATOR_TYPE}
imageUri: {TRAIN_IMAGE}
workerCount: {WORKER_COUNT}
parameterServerType: {REDUCTION_SERVER_MACHINE_TYPE}
parameterServerConfig:
imageUri: gcr.io/cloud-ml-service-private/reductionserver:latest
parameterServerCount: {REDUCTION_SERVER_COUNT}
args: {WORKER_ARGS}
useChiefInTfConfig: true
! gcloud beta ai-platform jobs submit training {JOB_NAME} --region={REGION} --config=config.yaml
# Delete Cloud Storage objects that were created
! gsutil -m rm -r {BUCKET_NAME}
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest version of the Google Cloud Storage library.
Step2: Restart the kernel
Step3: Before you begin
Step4: Otherwise, set your project ID here.
Step5: Create a Cloud Storage bucket
Step6: Only if your bucket doesn't already exist
Step7: Finally, validate access to your Cloud Storage bucket by examining its contents
Step8: Set dataset location
Step9: Create a training container
Step12: Create training application code
Step13: Create base settings for the MNLI fine tuning experiment
Step14: Build the container
Step15: Create a custom training job
Step16: Fine tune the MNLI experiment settings
Step17: Create custom job configuration
Step18: Submit and monitor the job
Step19: Cleaning up
|
14,932 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
nx, ny = 6, 3
np.random.seed(0)
orography = np.random.normal(1000, 600, size=(ny, nx)) - 400
sea_level_temp = np.random.normal(290, 5, size=(ny, nx))
# Now visualise:
import matplotlib.pyplot as plt
plt.set_cmap('viridis')
fig = plt.figure(figsize=(8, 6))
plt.subplot(1, 2, 1)
plt.pcolormesh(orography)
cbar = plt.colorbar(orientation='horizontal',
label='Orography (m)')
# Reduce the maximum number of ticks to 5.
cbar.ax.xaxis.get_major_locator().nbins = 5
plt.subplot(1, 2, 2)
plt.pcolormesh(sea_level_temp)
cbar = plt.colorbar(orientation='horizontal',
label='Sea level temperature (K)')
# Reduce the maximum number of ticks to 5.
cbar.ax.xaxis.get_major_locator().nbins = 5
plt.show()
nz = 9
model_levels = np.arange(nz)
model_top = 5000 # m
# The proportion of orographic influence on the model altitude. In this case,
# we define this as a log progression from full influence to no influence.
sigma = 1.1 - np.logspace(-1, np.log10(1.1), nz)
# Broadcast sigma so that when we multiply the orography we get a 3D array of z, y, x.
sigma = sigma[:, np.newaxis, np.newaxis]
# Combine sigma with the orography and model top value to
# produce 3d (z, y, x) altitude data for our "model levels".
altitude = (orography * sigma) + (model_top * (1 - sigma))
plt.figure(figsize=(8, 6))
plt.fill_between(np.arange(6), np.zeros(6), orography[1, :],
color='green', linewidth=2, label='Orography')
plt.plot(np.zeros(nx),
color='blue', linewidth=1.2,
label='Sea level')
for i in range(9):
plt.plot(altitude[i, 1, :], color='gray', linestyle='--',
label='Model levels' if i == 0 else None)
plt.ylabel('altitude / m')
plt.margins(0.1)
plt.legend()
plt.show()
lapse = -6.5 / 1000 # degC / m
temperature = sea_level_temp + lapse * altitude
from matplotlib.colors import LogNorm
fig = plt.figure(figsize=(8, 6))
norm = plt.Normalize(vmin=temperature.min(), vmax=temperature.max())
for i in range(nz):
plt.subplot(3, 3, i + 1)
qm = plt.pcolormesh(temperature[i], cmap='viridis', norm=norm)
plt.subplots_adjust(right=0.84, wspace=0.3, hspace=0.3)
cax = plt.axes([0.85, 0.1, 0.03, 0.8])
plt.colorbar(cax=cax)
plt.suptitle('Temperature (K) at each "model level"')
plt.show()
target_altitudes = np.linspace(700, 5500, 5) # m
plt.figure(figsize=(8, 6))
plt.fill_between(np.arange(6), np.zeros(6), orography[1, :],
color='green', linewidth=2, label='Orography')
for i in range(9):
plt.plot(altitude[i, 1, :],
color='gray', lw=1.2,
label=None if i > 0 else 'Source levels \n(model levels)')
for i, target in enumerate(target_altitudes):
plt.plot(np.repeat(target, 6),
color='gray', linestyle='--', lw=1.4, alpha=0.6,
label=None if i > 0 else 'Target levels \n(altitude)')
plt.ylabel('height / m')
plt.margins(top=0.1)
plt.legend()
plt.savefig('summary.png')
plt.show()
import stratify
target_nz = 20
target_altitudes = np.linspace(400, 5200, target_nz) # m
new_temperature = stratify.interpolate(target_altitudes, altitude, temperature,
axis=0)
plt.figure(figsize=(8, 6))
ax1 = plt.subplot(1, 2, 1)
plt.fill_between(np.arange(6), np.zeros(6), orography[1, :],
color='green', linewidth=2, label='Orography')
cs = plt.contourf(np.tile(np.arange(6), nz).reshape(nz, 6),
altitude[:, 1],
temperature[:, 1])
plt.scatter(np.tile(np.arange(6), nz).reshape(nz, 6),
altitude[:, 1],
c=temperature[:, 1])
plt.subplot(1, 2, 2, sharey=ax1)
plt.fill_between(np.arange(6), np.zeros(6), orography[1, :],
color='green', linewidth=2, label='Orography')
plt.contourf(np.arange(6), target_altitudes,
np.ma.masked_invalid(new_temperature[:, 1]),
cmap=cs.cmap, norm=cs.norm)
plt.scatter(np.tile(np.arange(nx), target_nz).reshape(target_nz, nx),
np.repeat(target_altitudes, nx).reshape(target_nz, nx),
c=new_temperature[:, 1])
plt.scatter(np.tile(np.arange(nx), target_nz).reshape(target_nz, nx),
np.repeat(target_altitudes, nx).reshape(target_nz, nx),
s=np.isnan(new_temperature[:, 1]) * 15, marker='x')
plt.suptitle('Temperature cross-section before and after restratification')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, let's define a vertical coordinate system that minimises missing data values, and gives good resolution at the (orographic) surface.
Step2: Our new 3d array now represents altitude (height above sea surface) at each of our "model levels".
Step3: To recap, we now have a model vertical coordinate system that maximises the number grid-point locations close to the orography. In addition, we have a 3d array of "altitudes" so that we can relate any phenomenon measured on this grid to useful vertical coordinate information.
Step4: Restratification / vertical interpolation
Step5: If we visualise this, we can see that we need to consider the behaviour for a number of situations, including what should happen when we are sampling below the orography, and when we are above the model top.
Step6: The default behaviour depends on the scheme, but for linear interpolation we recieve NaNs both below the orography and above the model top
Step7: With some work, we can visualise this result to compare a cross-section before and after. In particular this will allow us to see precisely what the interpolator has done at the extremes of our target levels
|
14,933 | <ASSISTANT_TASK:>
Python Code:
data = pd.read_csv('fer2013/fer2013.csv')
df = shuffle(df)
X = data['pixels']
y = data['emotion']
X = pd.Series([np.array(x.split()).astype(int) for x in X])
# convert one column as list of ints into dataframe where each item in array is a column
X = pd.DataFrame(np.matrix(X.tolist()))
df = pd.DataFrame(y)
df.loc[:,'f'] = pd.Series(-1, index=df.index)
df.groupby('emotion').count()
# This function plots the given sample set of images as a grid with labels
# if labels are available.
def plot_sample(S,w=48,h=48,labels=None):
m = len(S);
# Compute number of items to display
display_rows = int(np.floor(np.sqrt(m)));
display_cols = int(np.ceil(m / display_rows));
fig = plt.figure()
S = S.as_matrix()
for i in range(0,m):
arr = S[i,:]
arr = arr.reshape((w,h))
ax = fig.add_subplot(display_rows,display_cols , i+1)
ax.imshow(arr, aspect='auto', cmap=plt.get_cmap('gray'))
if labels is not None:
ax.text(0,0, '{}'.format(labels[i]), bbox={'facecolor':'white', 'alpha':0.8,'pad':2})
ax.axis('off')
plt.show()
print ('0=Angry', '1=Disgust', '2=Fear', '3=Happy', '4=Sad', '5=Surprise', '6=Neutral')
samples = X.sample(16)
plot_sample(samples,48,48,y[samples.index].as_matrix())
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
# CALC AUC_ROC, binarizing each lable
y_b = pd.DataFrame(label_binarize(y, classes=[0,1,2,3,4,5,6]))
n_classes = y_b.shape[1]
# since the data we have is one big array, we want to split it into training
# and testing sets, the split is 70% goes to training and 30% of data for testing
X_train, X_test, y_train, y_test = train_test_split(X, y_b, test_size=0.3)
neural_network =(100,)
clfs ={}
for a in [1,0.1,1e-2,1e-3,1e-4,1e-5]:
# for this excersize we are using MLPClassifier with lbfgs optimizer (the family of quasi-Newton methods). In my simple
# experiments it produces good quality outcome
clf = MLPClassifier( alpha=a, hidden_layer_sizes=neural_network, random_state=1)
clf.fit(X_train, y_train)
# So after the classifier is trained, lets see what it predicts on the test data
prediction = clf.predict(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test.as_matrix()[:,i], prediction[:,i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.as_matrix().ravel(), prediction.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
print ("ROC_AUC (micro) score is {:.04f} with alpha {}".format(roc_auc["micro"], a))
clfs[a] = clf
samples = X_test.sample(16)
p = clfs.get(0.001).predict(samples)
plot_sample(samples,48,48,[x.argmax(axis=0) for x in p])
p=y_test.loc[samples.index].as_matrix()
plot_sample(samples,48,48,[x.argmax(axis=0) for x in p])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, let use the Neural Network with 1 hidden layers. The number of neurons in each layer is X_train.shape[1] which is 400 in our example (excluding the extra bias unit).
|
14,934 | <ASSISTANT_TASK:>
Python Code:
%gui asyncio
from flexx import flx
flx.init_notebook()
b = flx.Button(text='foo')
b
b.set_text('click me!')
None # suppress output
with flx.HBox() as hbox:
slider = flx.Slider(flex=1)
progress = flx.ProgressBar(flex=3, value=0.7)
hbox
@slider.reaction('value')
def show_slider_value(*events):
progress.set_value(slider.value) # or events[-1].new_value
class MyWidget2(flx.Widget):
def init(self):
with flx.HBox():
self._slider = flx.Slider(flex=1)
self._progress = flx.ProgressBar(flex=3)
@flx.reaction
def show_slider_value(self):
self._progress.set_value(self._slider.value / 2)
MyWidget2(style='min-height:20px')
w3 = flx.launch(MyWidget2)
from flexxamples.demos.drawing import Drawing
Drawing(style='height:100px') # Draw using the mouse below!
from flexxamples.demos.twente import Twente
Twente(style='height:300px')
from flexxamples.demos.drawing import Drawing
from flexxamples.demos.twente import Twente
from flexxamples.howtos.splitters import Split
with flx.TabLayout(style='height:300px') as w4: # min-height does not seem to work very well for panel-based layouts
Twente(title='Twente', flex=1)
Drawing(title='Drawing', flex=1)
Split(title='Split', flex=1)
w4
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Displaying widgets
Step2: Widgets have many properties to modify their appearance and behavior
Step3: Layout
Step4: Events
Step5: Compound widgets
Step6: Widgets are apps
Step7: Example apps
Step8: Since apps are really just Widgets, they can be embedded in larger apps with ease
|
14,935 | <ASSISTANT_TASK:>
Python Code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
rect_image = cv2.imread('data/I/27.png', cv2.IMREAD_GRAYSCALE)
circle_image = cv2.imread('data/O/11527.png', cv2.IMREAD_GRAYSCALE)
queen_image = cv2.imread('data/Q/18027.png', cv2.IMREAD_GRAYSCALE)
plt.figure(figsize = (10, 7))
plt.title('Rectangle Tag')
plt.axis('off')
plt.imshow(rect_image, cmap = cm.Greys_r)
plt.figure(figsize = (10, 7))
plt.title('Circle Tag')
plt.axis('off')
plt.imshow(circle_image, cmap = cm.Greys_r)
plt.figure(figsize = (10, 7))
plt.title('Queen Tag')
plt.axis('off')
plt.imshow(queen_image, cmap = cm.Greys_r)
print (rect_image.shape)
print (rect_image.dtype)
print (circle_image.shape)
print (circle_image.dtype)
cropped_rect_image = rect_image[4:20,4:20]
cropped_circle_image = circle_image[4:20,4:20]
cropped_queen_image = queen_image[4:20,4:20]
plt.figure(figsize = (10, 7))
plt.title('Rectangle Tag ' + str(cropped_rect_image.shape))
plt.axis('off')
plt.imshow(cropped_rect_image, cmap = cm.Greys_r)
plt.figure(figsize = (10, 7))
plt.title('Circle Tag ' + str(cropped_circle_image.shape))
plt.axis('off')
plt.imshow(cropped_circle_image, cmap = cm.Greys_r)
plt.figure(figsize = (10, 7))
plt.title('Queen Tag ' + str(cropped_queen_image.shape))
plt.axis('off')
plt.imshow(cropped_queen_image, cmap = cm.Greys_r)
plt.figure(figsize = (10, 7))
plt.title('Rectangle Tag')
plt.axis('off')
plt.imshow(rect_image, cmap = cm.Greys_r)
print(rect_image)
mean_smoothed = cv2.blur(rect_image, (5, 5))
median_smoothed = cv2.medianBlur(rect_image, 5)
gaussian_smoothed = cv2.GaussianBlur(rect_image, (5, 5), 0)
mean_compare = np.hstack((rect_image, mean_smoothed))
median_compare = np.hstack((rect_image, median_smoothed))
gaussian_compare = np.hstack((rect_image, gaussian_smoothed))
plt.figure(figsize = (15, 12))
plt.title('Mean')
plt.axis('off')
plt.imshow(mean_compare, cmap = cm.Greys_r)
plt.figure(figsize = (15, 12))
plt.title('Median')
plt.axis('off')
plt.imshow(median_compare, cmap = cm.Greys_r)
plt.figure(figsize = (15, 12))
plt.title('Gaussian')
plt.axis('off')
plt.imshow(gaussian_compare, cmap = cm.Greys_r)
increase_brightness = rect_image + 30
decrease_brightness = rect_image - 30
increase_contrast = rect_image * 1.5
decrease_contrast = rect_image * 0.5
brightness_compare = np.hstack((increase_brightness, decrease_brightness))
constrast_compare = np.hstack((increase_contrast, decrease_contrast))
plt.figure(figsize = (15, 12))
plt.title('Brightness')
plt.axis('off')
plt.imshow(brightness_compare, cmap = cm.Greys_r)
plt.figure(figsize = (15, 12))
plt.title('Contrast')
plt.axis('off')
plt.imshow(constrast_compare, cmap = cm.Greys_r)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Extract Images
Step2: Image Properties
Step3: This tells us that this image is 24x24 pixels in size, and that the datatype of the values it stores are unsigned 8 bit integers. While the explanation of this datatype isn't especially relevant to the lesson, the main point is that it is extremely important to double check the size and structure of your data. Let's do the same thing for the circular tag image too
Step4: This holds the same values, which is good. When you're working with your own datasets in the future, it would be highly beneficial to write your own little program to check the values and structure of your data to ensure that subtle bugs don't creep in to your analysis.
Step5: Feature Engineering
Step6: In fact this is not actualy the case. In the case of this dataset, the features are actually the pixel values that make up the images - those are the values we'll be training the machine learning algorithm with
Step7: So what can we do to manipulate the features in out dataset? We'll explore three methods to acheive this
Step8: Feel free to have a play with the different parameters for these smoothing operations. We'll now write some code to place the original images next to their smoothed counterparts in order to compare them
Step9: Brightness and Contrast
|
14,936 | <ASSISTANT_TASK:>
Python Code:
from symbulate import *
%matplotlib inline
# Type all of your code for this problem in this cell.
# Feel free to add additional cells for scratch work, but they will not be graded.
# Type all of your code for this problem in this cell.
# Feel free to add additional cells for scratch work, but they will not be graded.
# Type all of your code for this problem in this cell.
# Feel free to add additional cells for scratch work, but they will not be graded.
# Type all of your code for this problem in this cell.
# Feel free to add additional cells for scratch work, but they will not be graded.
# Type all of your code for this problem in this cell.
# Feel free to add additional cells for scratch work, but they will not be graded.
def count_strikes_in_3_hours(interarrival_times):
for time in interarrival_times:
# Be sure to return something inside this for loop;
# otherwise this for loop will run forever, since it
# is iterating over an infinite list!
raise NotImplementedError()
# Type all of your code for this problem in this cell.
# Feel free to add additional cells for scratch work, but they will not be graded.
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: b)
Step3: Model 2
Step4: b)
Step5: Model 3
Step6: b)
|
14,937 | <ASSISTANT_TASK:>
Python Code:
import yaml
# Set `PATH` to include the directory containing TFX CLI and skaffold.
PATH=%env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
!python -c "import tensorflow; print('TF version: {}'.format(tensorflow.__version__))"
!python -c "import tfx; print('TFX version: {}'.format(tfx.__version__))"
!python -c "import kfp; print('KFP version: {}'.format(kfp.__version__))"
%pip install --upgrade --user tensorflow==2.3.2
%pip install --upgrade --user tfx==0.25.0
%pip install --upgrade --user kfp==1.4.0
%cd pipeline
!ls -la
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
GCP_REGION = 'us-central1'
ARTIFACT_STORE_URI = f'gs://{PROJECT_ID}-kubeflowpipelines-default'
CUSTOM_SERVICE_ACCOUNT = f'tfx-tuner-caip-service-account@{PROJECT_ID}.iam.gserviceaccount.com'
#TODO: Set your environment resource settings here for ENDPOINT.
ENDPOINT = ''
# Set your resource settings as Python environment variables. These override the default values in pipeline/config.py.
%env GCP_REGION={GCP_REGION}
%env ARTIFACT_STORE_URI={ARTIFACT_STORE_URI}
%env CUSTOM_SERVICE_ACCOUNT={CUSTOM_SERVICE_ACCOUNT}
%env PROJECT_ID={PROJECT_ID}
PIPELINE_NAME = 'tfx_covertype_continuous_training'
MODEL_NAME = 'tfx_covertype_classifier'
DATA_ROOT_URI = 'gs://workshop-datasets/covertype/small'
CUSTOM_TFX_IMAGE = 'gcr.io/{}/{}'.format(PROJECT_ID, PIPELINE_NAME)
RUNTIME_VERSION = '2.3'
PYTHON_VERSION = '3.7'
USE_KFP_SA=False
ENABLE_TUNING=False
%env PIPELINE_NAME={PIPELINE_NAME}
%env MODEL_NAME={MODEL_NAME}
%env DATA_ROOT_URI={DATA_ROOT_URI}
%env KUBEFLOW_TFX_IMAGE={CUSTOM_TFX_IMAGE}
%env RUNTIME_VERSION={RUNTIME_VERSION}
%env PYTHON_VERIONS={PYTHON_VERSION}
%env USE_KFP_SA={USE_KFP_SA}
%env ENABLE_TUNING={ENABLE_TUNING}
!tfx pipeline compile --engine kubeflow --pipeline_path runner.py
!tfx pipeline create \
--pipeline_path=runner.py \
--endpoint={ENDPOINT} \
--build_target_image={CUSTOM_TFX_IMAGE}
!tfx run create --pipeline_name={PIPELINE_NAME} --endpoint={ENDPOINT}
!tfx run list --pipeline_name {PIPELINE_NAME} --endpoint {ENDPOINT}
RUN_ID='[YOUR RUN ID]'
!tfx run status --pipeline_name {PIPELINE_NAME} --run_id {RUN_ID} --endpoint {ENDPOINT}
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Validate lab package version installation
Step2: Note
Step3: Note
Step4: The config.py module configures the default values for the environment specific settings and the default values for the pipeline runtime parameters.
Step5: Set the pipeline compile time settings
Step6: Compile your pipeline code
Step7: Note
Step8: If you make a mistake above and need to redeploy the pipeline you can first delete the previous version using tfx pipeline delete or you can update the pipeline in-place using tfx pipeline update.
Step9: Exercise
Step10: To retrieve the status of a given run retrieved from the command above
|
14,938 | <ASSISTANT_TASK:>
Python Code:
alphas = [1.0, 0.999, 0.99, 0.9, 0.7, 0.5, 0.3, 0.1, 0.01, 0.001]
max_iters = 50000
epsilon = 0.001
init_v = np.zeros(env.num_states())
init_r_bar_scalar = 0
convergence_flags = np.zeros(alphas.__len__())
for i, alpha in enumerate(alphas):
alg = RVI_Evaluation(env, init_v, alpha, ref_idx=0)
print(f'RVI Evaluation starting alpha:{alpha}', end=' ')
convergence = run_alg(alg, 'exec_sync', max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[i] = convergence
plt.figure(figsize=(15, 15))
plt.yticks(np.arange(alphas.__len__()), alphas)
plt.ylabel(r'$\alpha$', rotation=0, labelpad=20)
results = np.array([convergence_flags]).reshape(-1, 1)
plt.imshow(results, cmap='viridis', interpolation='nearest')
plt.colorbar()
plt.clim(0, 1)
plt.title('RVI Evaluation')
plt.show()
plt.close()
betas = [1.0, 0.999, 0.99, 0.9, 0.7, 0.5, 0.3, 0.1, 0.01, 0.001]
convergence_flags = np.zeros((alphas.__len__(), betas.__len__()))
for i, alpha in enumerate(alphas):
for j, beta in enumerate(betas):
alg = DVI_Evaluation(env, init_v, init_r_bar_scalar, alpha, beta)
print(f'DVI Evaluation starting alpha:{alpha} beta:{beta}', end=' ')
convergence = run_alg(alg, 'exec_sync', max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[i][j] = convergence
plt.figure(figsize=(15, 15))
plt.yticks(np.arange(alphas.__len__()), alphas)
plt.ylabel(r'$\alpha$', rotation=0, labelpad=20)
plt.xlabel(r'$\beta$')
plt.xticks(np.arange(betas.__len__()), betas)
plt.imshow(convergence_flags, cmap='viridis', interpolation='nearest')
plt.colorbar()
plt.clim(0, 1)
plt.title('DVI Evaluation')
plt.show()
plt.close()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For $\alpha=1$, sRVI does not converge on the (periodic) 3-loop problem.
|
14,939 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import math
n = 100
x = np.linspace(1, n, n)
y = x**5
#Your code goes here
#Your code goes here
# Your code goes here
n = 100
a = np.random.normal(0, 1, n)
#Your code goes here
n = 100
c = np.random.normal(0, 2, n)
#Your code goes here
#Pipeline Setup
from quantopian.research import run_pipeline
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import CustomFactor, Returns, RollingLinearRegressionOfReturns
from quantopian.pipeline.classifiers.morningstar import Sector
from quantopian.pipeline.filters import QTradableStocksUS
from time import time
#MyFactor is our custom factor, based off of asset price momentum
class MyFactor(CustomFactor):
Momentum factor
inputs = [USEquityPricing.close]
window_length = 60
def compute(self, today, assets, out, close):
out[:] = close[-1]/close[0]
universe = QTradableStocksUS()
pipe = Pipeline(
columns = {
'MyFactor' : MyFactor(mask=universe),
},
screen=universe
)
start_timer = time()
results = run_pipeline(pipe, '2015-01-01', '2015-06-01')
end_timer = time()
results.fillna(value=0);
print "Time to run pipeline %.2f secs" % (end_timer - start_timer)
my_factor = results['MyFactor']
n = len(my_factor)
asset_list = results.index.levels[1].unique()
prices_df = get_pricing(asset_list, start_date='2015-01-01', end_date='2016-01-01', fields='price')
# Compute 10-day forward returns, then shift the dataframe back by 10
forward_returns_df = prices_df.pct_change(10).shift(-10)
# The first trading day is actually 2015-1-2
single_day_factor_values = my_factor['2015-1-2']
# Because prices are indexed over the total time period, while the factor values dataframe
# has a dynamic universe that excludes hard to trade stocks, each day there may be assets in
# the returns dataframe that are not present in the factor values dataframe. We have to filter down
# as a result.
single_day_forward_returns = forward_returns_df.loc['2015-1-2'][single_day_factor_values.index]
#Your code goes here
rolling_corr = pd.Series(index=None, data=None)
#Your code goes here
# Your code goes here
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: b. Spearman Rank Correlation
Step3: Check your results against scipy's Spearman rank function. stats.spearmanr
Step4: Exercise 2
Step5: b. Non-Monotonic Relationships
Step7: Exercise 3
Step8: b. Rolling Spearman Rank Correlation
Step9: b. Rolling Spearman Rank Correlation
|
14,940 | <ASSISTANT_TASK:>
Python Code:
import sys
sys.path.insert(0, '..')
import time
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('pdf', 'png')
plt.rcParams['savefig.dpi'] = 75
plt.rcParams['figure.autolayout'] = False
plt.rcParams['figure.figsize'] = 10, 6
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['font.size'] = 16
plt.rcParams['lines.linewidth'] = 2.0
plt.rcParams['lines.markersize'] = 8
plt.rcParams['legend.fontsize'] = 14
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = "serif"
plt.rcParams['font.serif'] = "cm"
plt.rcParams['text.latex.preamble'] = "\usepackage{subdepth}, \usepackage{type1cm}"
import numpy as np
from Configuration import Configuration
from MotorUnitPoolNoChannel import MotorUnitPoolNoChannel
from InterneuronPoolNoChannel import InterneuronPoolNoChannel
from NeuralTract import NeuralTract
from SynapsesFactory import SynapsesFactory
conf = Configuration('confMNPoolWithRenshawCells.rmto')
conf.simDuration_ms = 5000 # Here I change simulation duration without changing the Configuration file.
# Time vector for the simulation
t = np.arange(0.0, conf.simDuration_ms, conf.timeStep_ms)
membPotential = np.zeros_like(t, dtype = 'd')
pools = dict()
pools[0] = MotorUnitPoolNoChannel(conf, 'SOL')
pools[1] = NeuralTract(conf, 'CMExt')
pools[2] = InterneuronPoolNoChannel(conf, 'RC', 'ext')
Syn = SynapsesFactory(conf, pools)
GammaOrder = 10
FR = 1000/12.0
tic = time.time()
for i in xrange(0, len(t)-1):
pools[1].atualizePool(t[i], FR, GammaOrder) # NeuralTract
pools[0].atualizeMotorUnitPool(t[i]) # MN pool
pools[3].atualizePool(t[i]) # RC synaptic Noise
pools[2].atualizeInterneuronPool(t[i]) # RC pool
toc = time.time()
print str(toc - tic) + ' seconds'
pools[0].listSpikes()
pools[1].listSpikes()
pools[2].listSpikes()
plt.figure()
plt.plot(pools[1].poolTerminalSpikes[:, 0],
pools[1].poolTerminalSpikes[:, 1]+1, '.')
plt.xlabel('t (ms)')
plt.ylabel('Descending Command index')
plt.figure()
plt.plot(pools[0].poolTerminalSpikes[:, 0],
pools[0].poolTerminalSpikes[:, 1]+1, '.')
plt.xlabel('t (ms)')
plt.ylabel('Motor Unit index')
plt.figure()
plt.plot(pools[2].poolSomaSpikes[:, 0],
pools[2].poolSomaSpikes[:, 1]+1, '.')
plt.xlabel('t (ms)')
plt.ylabel('Renshaw cell index')
plt.figure()
plt.plot(t, pools[0].Muscle.force, '-')
plt.xlabel('t (ms)')
plt.ylabel('Muscle force (N)')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The spike times of all descending commands along the 5000 ms of simulation is shown in Fig. \ref{fig
Step2: The spike times of the MNs along the 5000 ms of simulation is shown in Fig. \ref{fig
Step3: The spike times of the Renshaw cells along the 5000 ms of simulation is shown in Fig. \ref{fig
Step4: The muscle force during the simulation \ref{fig
|
14,941 | <ASSISTANT_TASK:>
Python Code:
import tqdm
import numpy as np
import espressomd.observables
import espressomd.accumulators
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
%matplotlib inline
espressomd.assert_features(
["ENGINE", "ROTATION", "MASS", "ROTATIONAL_INERTIA", "CUDA"])
ED_PARAMS = {'time_step': 0.01,
'box_l': 3*[10.],
'skin': 0.4,
'active_velocity': 5,
'kT': 1,
'gamma': 1,
'gamma_rotation': 1,
'mass': 0.1,
'rinertia': 3*[1.],
'corr_tmax': 100}
ED_N_SAMPLING_STEPS = 5000000
system = espressomd.System(box_l=ED_PARAMS['box_l'])
system.cell_system.skin = ED_PARAMS['skin']
system.time_step = ED_PARAMS['time_step']
pos_obs = espressomd.observables.ParticlePositions(
ids=[part_act.id, part_pass.id])
msd = espressomd.accumulators.Correlator(obs1=pos_obs,
corr_operation="square_distance_componentwise",
delta_N=1,
tau_max=ED_PARAMS['corr_tmax'],
tau_lin=16)
system.auto_update_accumulators.add(msd)
vel_obs = espressomd.observables.ParticleVelocities(
ids=[part_act.id, part_pass.id])
vacf = espressomd.accumulators.Correlator(obs1=vel_obs,
corr_operation="componentwise_product",
delta_N=1,
tau_max=ED_PARAMS['corr_tmax'],
tau_lin=16)
system.auto_update_accumulators.add(vacf)
ang_obs = espressomd.observables.ParticleAngularVelocities(
ids=[part_act.id, part_pass.id])
avacf = espressomd.accumulators.Correlator(obs1=ang_obs,
corr_operation="componentwise_product",
delta_N=1,
tau_max=ED_PARAMS['corr_tmax'],
tau_lin=16)
system.auto_update_accumulators.add(avacf)
for i in tqdm.tqdm(range(100)):
system.integrator.run(int(ED_N_SAMPLING_STEPS/100))
system.auto_update_accumulators.remove(msd)
msd.finalize()
system.auto_update_accumulators.remove(vacf)
vacf.finalize()
system.auto_update_accumulators.remove(avacf)
avacf.finalize()
taus_msd = msd.lag_times()
msd_result = msd.result()
msd_result = np.sum(msd_result, axis=2)
taus_vacf = vacf.lag_times()
vacf_result = np.sum(vacf.result(), axis=2)
taus_avacf = avacf.lag_times()
avacf_result = np.sum(avacf.result(), axis=2)
fig_msd = plt.figure(figsize=(10, 6))
plt.plot(taus_msd, msd_result[:, 0], label='active')
plt.plot(taus_msd, msd_result[:, 1], label='passive')
plt.xlim((taus_msd[1], None))
plt.loglog()
plt.xlabel('t')
plt.ylabel('MSD(t)')
plt.legend()
plt.show()
def acf_stable_regime(x, y):
Remove the noisy tail in autocorrelation functions of finite time series.
cut = np.argmax(y <= 0.) - 2
assert cut >= 1
return (x[1:cut], y[1:cut])
fig_vacf = plt.figure(figsize=(10, 6))
plt.plot(*acf_stable_regime(taus_vacf, vacf_result[:, 0]), label='active')
plt.plot(*acf_stable_regime(taus_vacf, vacf_result[:, 1]), label='passive')
plt.xlim((taus_vacf[1], None))
plt.loglog()
plt.xlabel('t')
plt.ylabel('VACF(t)')
plt.legend()
plt.show()
fig_avacf = plt.figure(figsize=(10, 6))
plt.plot(*acf_stable_regime(taus_avacf, avacf_result[:, 0]), label='active')
plt.plot(*acf_stable_regime(taus_avacf, avacf_result[:, 1]), label='passive')
plt.xlim((taus_avacf[1], None))
plt.loglog()
plt.xlabel('t')
plt.ylabel('AVACF(t)')
plt.legend()
plt.show()
def clear_system(system):
system.part.clear()
system.thermostat.turn_off()
system.constraints.clear()
system.auto_update_accumulators.clear()
system.time = 0.
clear_system(system)
import espressomd.shapes
import espressomd.math
RECT_PARAMS = {'length': 100,
'radius': 20,
'funnel_inner_radius': 3,
'funnel_angle': np.pi / 4.0,
'funnel_thickness': 0.1,
'n_particles': 500,
'active_velocity': 7,
'time_step': 0.01,
'wca_sigma': 0.5,
'wca_epsilon': 0.1,
'skin': 0.4,
'kT': 0.1,
'gamma': 1.,
'gamma_rotation': 1}
RECT_STEPS_PER_SAMPLE = 100
RECT_N_SAMPLES = 500
TYPES = {'particles': 0,
'boundaries': 1}
box_l = np.array(
[RECT_PARAMS['length'], 2*RECT_PARAMS['radius'], 2*RECT_PARAMS['radius']])
system.box_l = box_l
system.cell_system.skin = RECT_PARAMS['skin']
system.time_step = RECT_PARAMS['time_step']
system.thermostat.set_langevin(
kT=RECT_PARAMS['kT'], gamma=RECT_PARAMS['gamma'], gamma_rotation=RECT_PARAMS['gamma_rotation'], seed=42)
cylinder = espressomd.shapes.Cylinder(
center=0.5 * box_l,
axis=[1, 0, 0], radius=RECT_PARAMS['radius'], length=RECT_PARAMS['length'], direction=-1)
system.constraints.add(shape=cylinder, particle_type=TYPES['boundaries'])
# Setup walls
wall = espressomd.shapes.Wall(dist=0, normal=[1, 0, 0])
system.constraints.add(shape=wall, particle_type=TYPES['boundaries'])
wall = espressomd.shapes.Wall(dist=-RECT_PARAMS['length'], normal=[-1, 0, 0])
system.constraints.add(shape=wall, particle_type=TYPES['boundaries'])
funnel_length = (RECT_PARAMS['radius']-RECT_PARAMS['funnel_inner_radius']
)/np.tan(RECT_PARAMS['funnel_angle'])
com_deviations = list()
times = list()
def moving_average(data, window_size):
return np.convolve(data, np.ones(window_size), 'same') / window_size
smoothing_window = 10
com_smoothed = moving_average(com_deviations, smoothing_window)
fig_rect = plt.figure(figsize=(10, 6))
plt.plot(times[smoothing_window:-smoothing_window],
com_smoothed[smoothing_window:-smoothing_window])
plt.xlabel('t')
plt.ylabel('center of mass deviation')
plt.show()
clear_system(system)
import espressomd.lb
HYDRO_PARAMS = {'box_l': 3*[25],
'time_step': 0.01,
'skin': 1,
'agrid': 1,
'dens': 1,
'visc': 1,
'gamma': 1,
'mass': 5,
'dipole_length': 2,
'active_force': 0.1,
'mode': 'pusher'}
HYDRO_N_STEPS = 2000
system.box_l = HYDRO_PARAMS['box_l']
system.cell_system.skin = HYDRO_PARAMS['skin']
system.time_step = HYDRO_PARAMS['time_step']
system.min_global_cut = HYDRO_PARAMS['dipole_length']
box_l = np.array(HYDRO_PARAMS['box_l'])
pos = box_l/2.
pos[2] = -10.
system.integrator.run(HYDRO_N_STEPS)
vels = np.squeeze(lbf[:, int(system.box_l[1]/2), :].velocity)
vel_abs = np.linalg.norm(vels, axis=2)
lb_shape = lbf.shape
xs, zs = np.meshgrid(np.linspace(0.5, box_l[0] - 0.5, num=lb_shape[0]),
np.linspace(0.5, box_l[2] - 0.5, num=lb_shape[2]))
fig_vels, ax_vels = plt.subplots(figsize=(10, 6))
im = plt.pcolormesh(vel_abs.T, cmap='YlOrRd')
plt.quiver(xs, zs, vels[:, :, 0].T, vels[:, :, 2].T, angles='xy', scale=0.005)
circ = plt.Circle(particle.pos_folded[[0, 2]], 0.5, color='blue')
ax_vels.add_patch(circ)
ax_vels.set_aspect('equal')
plt.xlabel('x')
plt.ylabel('z')
cb = plt.colorbar(im, label=r'$|v_{\mathrm{fluid}}|$')
plt.show()
lbf.write_vtk_velocity('./fluid.vtk')
system.part.writevtk('./particle.vtk')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise
Step2: No more setup needed! We can run the simulation and plot our observables.
Step4: The Mean Square Displacement of an active particle is characterized by a longer ballistic regime and an increased diffusion coefficient for longer lag times. In the overdamped limit it is given by
Step5: Before we go to the second part, it is important to clear the state of the system.
Step6: Rectification
Step7: Exercise
Step8: Exercise
Step9: Even though the potential energy inside the geometry is 0 in every part of the accessible region, the active particles are clearly not Boltzmann distributed (homogenous density). Instead, they get funneled into the right half, showing the inapplicability of equilibrium statistical mechanics.
Step10: Hydrodynamics of self-propelled particles
Step11: Exercise
Step12: Exercise
Step13: We can also export the particle and fluid data to .vtk format to display the results with a visualization software like ParaView.
|
14,942 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
# 例1: a+b
a = tf.placeholder(dtype=tf.float32, shape=[2]) # 定义占位符,可以feed满足相应条件的数据
b = tf.placeholder(dtype=tf.float32, shape=[2])
c = a + b
with tf.Session() as sess: # 创建一个会话
print sess.run(c, feed_dict={a:[1.,2.], b:[3.,3.]})
# 例2: 最小值 f(x) = x(1-x)sin(x)
import matplotlib.pylab as plt
%matplotlib inline
x = tf.Variable([1.80], dtype=tf.float32) # 定义变量
#x = tf.Variable([1.7], dtype=tf.float32)
y = x * (1-x) * tf.sin(6.28*x)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(y) # 使用GD算法求最小值
init = tf.global_variables_initializer() # 变量初始化,很重要!!!
with tf.Session() as sess:
sess.run(init)
x_init, y_init = sess.run([x,y])
for i in range(100):
sess.run(train_op)
x_min,y_min = sess.run([x,y])
# plot
x = np.linspace(-1,3,100)
y = x * (1-x) * np.sin(6.28*x)
plt.plot(x,y,'b-')
plt.plot(x_init,y_init,'bo')
plt.plot(x_min,y_min,'ro')
plt.title("$min_x f(x)=x(x-1)\sin(x)$")
# 说明: 我们还是使用tinanic数据,见sklearn_titanic.ipynb
import cPickle
with open("../kaggle_titanic/data/train_data","rb") as f:
X_train, y_train = cPickle.load(f)
X_train = X_train.astype(np.float32)
y_train = y_train.reshape((-1,1)).astype(np.float32)
X_tra, X_val, y_tra, y_val = train_test_split(X_train,y_train, test_size=0.25)
N_INPUT = 14
MAX_STEP = 1000
def inference(x): # 一般情况下,把正向传播部分到到一起,称之为infercence,如果要修改模型,很多时候修改这部分就可以了
w = tf.Variable(np.random.randn(N_INPUT,1),dtype=tf.float32)
b = tf.Variable([0.], dtype=tf.float32)
h = tf.matmul(x,w) + b # h = x * w + b
return h
x = tf.placeholder(tf.float32, shape=[None, N_INPUT])
y = tf.placeholder(tf.float32,shape=[None, 1])
y_ = inference(x)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=y_)
y_pred = tf.cast(tf.greater(y_, 0.5), tf.float32)
correct = tf.equal(y_pred, y)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) # loss is not 1-accuracy
train_op = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)
acc1 = []
with tf.Session() as sess:
init = tf.global_variables_initializer() # 变量初始化,很重要!!!
sess.run(init)
for i in range(MAX_STEP):
_, acc_tra = sess.run([train_op,accuracy],feed_dict={x:X_tra, y:y_tra})
if i % 10 == 0 or i+1 == MAX_STEP:
acc_val = sess.run(accuracy, feed_dict={x:X_val, y:y_val})
acc1.append([i, acc_tra, acc_val])
if i % 100 == 0 or i+1 == MAX_STEP:
print "%d, train accuracy :%.4f, test accuracy: %.4f" % (i, acc_tra, acc_val)
N_INPUT = 14
MAX_STEP = 1000
N_HID = 7
def inference(x):
w1 = tf.Variable(np.random.randn(N_INPUT,N_HID),dtype=tf.float32)
b1 = tf.Variable([0.], dtype=tf.float32)
h1 = tf.nn.tanh(tf.matmul(x,w1) + b1)
w2 = tf.Variable(np.random.randn(N_HID,1),dtype=tf.float32)
b2 = tf.Variable([0.], dtype=tf.float32)
h2 = tf.matmul(h1,w2) + b2
return h2
x = tf.placeholder(tf.float32, shape=[None, N_INPUT])
y = tf.placeholder(tf.float32,shape=[None, 1])
y_ = inference(x)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=y_)
y_pred = tf.cast(tf.greater(y_, 0.5), tf.float32)
correct = tf.equal(y_pred, y)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
train_op = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)
acc2 = []
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for i in range(MAX_STEP):
_, acc_tra = sess.run([train_op,accuracy],feed_dict={x:X_tra, y:y_tra})
if i % 10 == 0 or i+1 == MAX_STEP:
acc_val = sess.run(accuracy, feed_dict={x:X_val, y:y_val})
acc2.append([i, acc_tra, acc_val])
if i % 100 == 0 or i+1 == MAX_STEP:
print "%d, train accuracy :%.4f, test accuracy: %.4f" % (i, acc_tra, acc_val)
import numpy as np
import matplotlib.pylab as plt
%matplotlib inline
acc1 = np.array(acc1)
acc2 = np.array(acc2)
plt.figure(figsize=(12,6))
plt.plot(acc1[:,0],acc1[:,1],'b--')
plt.plot(acc1[:,0],acc1[:,2],'b-')
plt.plot(acc2[:,0],acc2[:,1],'g--')
plt.plot(acc2[:,0],acc2[:,2],'g-')
plt.title("step vs. accuracy")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TF简介
Step2: LR算法
Step3: LR算法简介:
Step4: 添加一个隐藏层
Step5: 比较
|
14,943 | <ASSISTANT_TASK:>
Python Code:
from pydna.readers import read
cyc1 = read("cyc1.gb")
cyc1
cyc1.isorf()
pUG35 = read("pUG35.gb")
pUG35
p426GPD = read("p426GPD.gb")
p426GPD
pUG35.list_features()
gfp=pUG35.extract_feature(5)
gfp.seq
gfp.isorf()
from Bio.Restriction import SmaI
linear_vector= p426GPD.linearize(SmaI)
linear_vector
from pydna.design import primer_design
cyc1_amplicon = primer_design(cyc1)
cyc1_amplicon.figure()
gfp_amplicon = primer_design(gfp)
fragments = ( linear_vector, cyc1_amplicon, gfp_amplicon, linear_vector )
from Bio.Restriction import BamHI
if not any( x.cut(BamHI) for x in fragments ):
print("no cut!")
else:
print("cuts!")
from Bio.Restriction import NotI
if not any( x.cut(NotI) for x in fragments ):
print("no cut!")
else:
print("cuts!")
from pydna.dseqrecord import Dseqrecord
site = Dseqrecord(NotI.site)
site.seq
from pydna.design import assembly_fragments
linear_vector.locus = "p426GPD"
cyc1_amplicon.locus = "CYC1"
gfp_amplicon.locus = "GFP"
fragment_list = assembly_fragments((linear_vector, site, cyc1_amplicon,gfp_amplicon,linear_vector))
fragment_list
fragment_list[1].figure()
from pydna.assembly import Assembly
fragment_list = fragment_list[:-1]
fragment_list
asm = Assembly(fragment_list)
asm
candidate = asm.assemble_circular()[0]
candidate
p426GPD_CYC1_GFP = candidate
p426GPD_CYC1_GFP.write("p426GPD_CYC1_GFP.gb")
from pydna.amplicon import Amplicon
amplicons1 = [x for x in fragment_list if isinstance(x, Amplicon)]
amplicons1
# Get forward and reverse primer for each Amplicon
primers1 = [(y.forward_primer, y.reverse_primer) for y in amplicons1]
# print primer pairs:
for pair in primers1:
print(pair[0].format("fasta"))
print(pair[1].format("fasta"))
print()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The cyc1.gb sequence file only contains the ORF, so we can use it directly. The sequence file can be inspected using the ling above.
Step2: The pUG35 is a plasmid containing the GFP gene. We have to find the exact DNA fragment we want. The pUG35 genbank file contains features, one of which is the GFP ORF. Inspection in ApE concluded that the feature number 5 in the list below is the GFP ORF.
Step3: We extract the GFP sequence from Feature #5. The GFP gene is on the antisense strand, but it is returned in the correct orientation
Step4: We need to linearize p426GPD vector before the assembly. The SmaI restriction enzyme cuts between the promoter and the terminator.
Step5: We will amplify mosrt of the fragments using PCR, so we have to design primers first.
Step6: The primer_design function returns an Amplicon object which describes a PCR amplification
Step7: Then it is practical to collect all fragments to be assembled in a list or tuple. Note that below, the linear_vector appears both in the beginning and at the end. We do this since we would like to have a circular assembly.
Step8: We would like to have a unique cutting enzyme befor the cyc1 gene, so we should try to find some that dont cut
Step9: BamHI apparently cuts, lets try with NotI
Step10: NotI does not cut, lets use this!
Step11: We note that the amplicons are now a little bit larger than before. The assembly_fragments function basically adds tails to the primers of amplicon objects to facilitate the assembly. The NotI site is small ,so it was incorporated in the formward PCR primer of the CYC1 Amplicon. We can see that the CYC1 primers are quite a bit longer
Step12: Finally, we assemble the fragments using the Assembly class
Step13: We remove the final fragment, since we want a circular fragment.
|
14,944 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import pastas as ps
import matplotlib.pyplot as plt
ps.show_versions()
ps.set_log_level("INFO")
oseries = pd.read_csv("../data/nb5_head.csv", parse_dates=True,
squeeze=True, index_col=0)
rain = pd.read_csv("../data/nb5_prec.csv", parse_dates=True, squeeze=True,
index_col=0)
evap = pd.read_csv("../data/nb5_evap.csv", parse_dates=True, squeeze=True,
index_col=0)
waterlevel = pd.read_csv("../data/nb5_riv.csv", parse_dates=True,
squeeze=True, index_col=0)
ps.plots.series(oseries, [rain, evap, waterlevel], figsize=(10, 5), hist=False);
ml = ps.Model(oseries.resample("D").mean().dropna(), name="River")
sm = ps.RechargeModel(rain, evap, rfunc=ps.Exponential, name="recharge")
ml.add_stressmodel(sm)
ml.solve(tmin="2000", tmax="2019-10-29")
ml.plots.results(figsize=(12, 8));
w = ps.StressModel(waterlevel, rfunc=ps.One, name="waterlevel",
settings="waterlevel")
ml.add_stressmodel(w)
ml.solve(tmin="2000", tmax="2019-10-29")
axes = ml.plots.results(figsize=(12, 8));
axes[-1].set_xlim(0,10); # By default, the axes between responses are shared.
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. import and plot data
Step2: 2. Create a timeseries model
Step3: 3. Adding river water levels
|
14,945 | <ASSISTANT_TASK:>
Python Code:
NAME = "Michelle Appel"
NAME2 = "Verna Dankers"
NAME3 = "Yves van Montfort"
EMAIL = "michelle.appel@student.uva.nl"
EMAIL2 = "verna.dankers@student.uva.nl"
EMAIL3 = "yves.vanmontfort@student.uva.nl"
%pylab inline
plt.rcParams["figure.figsize"] = [20,10]
import numpy as np
import matplotlib.pyplot as plt
lims = (0, 2*np.pi)
def gen_cosine(n):
x = np.linspace(lims[0], lims[1], num=n, endpoint=True)
s = 0.2
mu = np.cos(x)
t = np.random.normal(loc=mu, scale=s)
return x, t
### Test your function
np.random.seed(5)
N = 10
x, t = gen_cosine(N)
assert x.shape == (N,), "the shape of x is incorrect"
assert t.shape == (N,), "the shape of t is incorrect"
def designmatrix(x, M): # it is highly recommended to write a helper function that computes Phi
Phi = []
for i in range(M+1):
Phi.append(np.power(x, i))
Phi = np.matrix(Phi).transpose()
return Phi
def fit_polynomial(x, t, M):
Phi = designmatrix(x, M)
w_ml = (np.linalg.inv(Phi.T*Phi)*Phi.T)*np.matrix(t).T
return np.squeeze(np.asarray(w_ml)), Phi
### Test your function
N = 10
x = np.square((np.linspace(-1, 1, N)))
t = 0.5*x + 1.5
m = 2
w, Phi = fit_polynomial(x,t,m)
assert w.shape == (m+1,), "The shape of w is incorrect"
assert Phi.shape == (N, m+1), "The shape of Phi is incorrect"
np.random.seed(5)
M = (0, 2, 4, 8)
N = 10
Nc = 1000
x, t = gen_cosine(N)
xc = np.linspace(lims[0], lims[1], Nc)
tc = np.cos(xc)
fig = plt.figure()
for i, m in enumerate(M):
ax = plt.subplot(2,2,i+1)
w, Phi = fit_polynomial(x,t,m)
tf = w*designmatrix(xc, m).T
# Plot the prediction function
plt.plot(xc, tf.T, color='r')
# Plot the cosine function
plt.plot(xc, tc, color='g')
# Plot the data
plt.scatter(x,t, marker='x')
ax.annotate("M={}".format(m), xy=(4,1), xytext=(4, 1), size=15)
plt.xlabel("x")
plt.ylabel("t")
plt.show()
def fit_polynomial_reg(x, t, m, lamb):
Phi = designmatrix(x, m)
w_ml = (np.linalg.inv(lamb*np.identity(m+1) + Phi.T*Phi)*Phi.T)*np.matrix(t).T
return np.squeeze(np.asarray(w_ml)), Phi
### Test your function
N = 10
x = np.square((np.linspace(-1, 1, N)))
t = 0.5*x + 1.5
m = 2
lamb = 0.1
w, Phi = fit_polynomial_reg(x,t,m, lamb)
assert w.shape == (m+1,), "The shape of w is incorrect"
assert Phi.shape == (N, m+1), "The shape of w is incorrect"
def pred_error(x_train, x_valid, t_train, t_valid, M, reg):
w_train, Phi_train = fit_polynomial_reg(x_train, t_train, M, reg)
Phi_valid = designmatrix(x_valid, M)
w_train = np.matrix(w_train).T
err_t = Phi_valid * w_train - np.matrix(t_valid).T
pred_err = err_t.T * err_t
return pred_err
### Test your function
N = 10
x = np.linspace(-1, 1, N)
t = 0.5*np.square(x) + 1.5
M = 2
reg = 0.1
pred_err = pred_error(x[:-2], x[-2:], t[:-2], t[-2:], M, reg)
assert pred_err < 0.01, "pred_err is too big"
def kfold_indices(N, k):
all_indices = np.arange(N,dtype=int)
np.random.shuffle(all_indices)
idx = [int(i) for i in np.floor(np.linspace(0,N,k+1))]
train_folds = []
valid_folds = []
for fold in range(k):
valid_indices = all_indices[idx[fold]:idx[fold+1]]
valid_folds.append(valid_indices)
train_folds.append(np.setdiff1d(all_indices, valid_indices))
return train_folds, valid_folds
def find_best_m_and_lamb(x, t, Ms, lambs, K):
n = np.size(x)
folds = kfold_indices(n, K)
Mv, lambv = np.meshgrid(Ms, lambs)
errs = np.empty(Mv.shape)
for i in np.ndindex(Mv.shape):
for k in range(K):
ftr = folds[0][k]
fva = folds[1][k]
errs[i] += pred_error(x[ftr], x[fva], t[ftr], t[fva], Mv[i], lambv[i])
best_idx = np.unravel_index(np.argmin(errs), errs.shape)
return Mv[best_idx], lambv[best_idx]
### If you want you can write your own test here
np.random.seed(5)
N = 10
Nc = 1000
M = 10
k = 5
lamb_p = 10
Ms = np.arange(M+1)
lambs = np.exp(-np.arange(lamb_p + 1)[::-1])
x, t = gen_cosine(N)
xc = np.linspace(lims[0], lims[1], Nc)
tc = np.cos(xc)
M_best, lamb_best = find_best_m_and_lamb(x, t, Ms, lambs, k)
print("The best values are M = {} and lambda = {:.6f}".format(M_best, lamb_best))
w, Phi = fit_polynomial_reg(x, t, M_best, lamb_best)
tf = w*designmatrix(xc, M_best).T
fig = plt.figure()
# Make clear which M and lambda were found
ax = fig.add_subplot(111)
ax.annotate("M={}, $\lambda$={:.6f}".format(M_best, lamb_best), xy=(4,1), xytext=(4, 1), size=15)
# Plot the dataset
plt.scatter(x, t, marker='x')
# Plot the function we try to approximate
plt.plot(xc, tc, color='g')
# Plot the model found through cross-validation
plt.plot(xc, tf.T, color='r')
plt.xlabel("x")
plt.ylabel("t")
plt.xlim(0, 2*np.pi)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
start = 0
stop = 2*np.pi
N = 1000
def gen_cosine2(n):
Generate x-data from a uniform distribution between 0 and 2pi.
x = np.random.uniform(0,2*np.pi, (n))
sigma = 0.2
mu = np.cos(x)
t = np.random.normal(loc=mu, scale=sigma)
return x, t
x2, t2 = gen_cosine2(10)
# plt.scatter(x2, t2)
# plt.show()
### Test your function
np.random.seed(5)
N = 10
x, t = gen_cosine2(N)
assert x.shape == (N,), "the shape of x is incorrect"
assert t.shape == (N,), "the shape of t is incorrect"
import matplotlib.pyplot as plt
def fit_polynomial_bayes(x, t, M, alpha, beta):
Fit a polynomial to data x with corresponding targets t.
M indicates the order of the polynomial, alpha is the precision of the
predicitve distribution and beta is the noise precision.
# Calculate S and m
Phi = designmatrix(x, M)
S = np.linalg.inv(alpha * np.identity(M+1) + beta * Phi.T * Phi)
m = np.array((beta * S * Phi.T * np.matrix(t).T).T)[0]
return m, S, Phi
### Test your function
N = 10
x = np.linspace(-1, 1, N)
t = 0.5*np.square(x) + 1.5
M = 2
alpha = 0.5
beta = 25
m, S, Phi = fit_polynomial_bayes(x, t, M, alpha, beta)
assert m.shape == (M+1,), "the shape of m is incorrect"
assert S.shape == (M+1, M+1), "the shape of S is incorrect"
assert Phi.shape == (N, M+1), "the shape of Phi is incorrect"
def predict_polynomial_bayes(x, m, S, beta):
Predict the target values for input x
and return the predictions, the posterior variance and Phi.
Phi = designmatrix(x, len(m)-1)
sigma = [(1 / beta + np.asscalar(Phi[i] * S * Phi[i].T))
for i in range(len(x))]
mean = [np.asscalar(m*Phi[i].T) for i in range(len(x))]
return np.array(mean), np.array(sigma), Phi
### Test your function
np.random.seed(5)
N = 10
x = np.linspace(-1, 1, N)
m = np.empty(3)
S = np.empty((3, 3))
beta = 25
mean, sigma, Phi = predict_polynomial_bayes(x, m, S, beta)
assert mean.shape == (N,), "the shape of mean is incorrect"
assert sigma.shape == (N,), "the shape of sigma is incorrect"
assert Phi.shape == (N, m.shape[0]), "the shape of Phi is incorrect"
import matplotlib.pyplot as plt
# Generate 10 datapoints
x3, t3 = gen_cosine2(10)
# Compute posterior mean and covariance
alpha = 1/2
beta = 1/(0.2*0.2)
M = 4
posterior_mean, covariance, Phi = fit_polynomial_bayes(x3, t3, M, alpha, beta)
# Get Bayesian predictive distribution
mean, sigma, Phi = predict_polynomial_bayes(x3, posterior_mean, covariance, beta)
# Plot the predictive mean
x = np.arange(0.0, 2*np.pi, 0.01)
p1 = plt.plot(x, posterior_mean[0] + posterior_mean[1]*x + posterior_mean[2]*(x*x) + \
posterior_mean[3]*np.power(x,3) + posterior_mean[4]*np.power(x,4), label="Predictive mean")
# Plot the predictive variance
mean, sigma, Phi = predict_polynomial_bayes(x, posterior_mean, covariance, beta)
p2 = plt.fill_between(x, mean-(np.sqrt(sigma)), mean+(np.sqrt(sigma)), alpha=0.1, label="Predictive variance")
# Include the datapoints in your plot
p3 = plt.scatter(x3, t3, label="Datapoints")
# Control layout
axes = plt.gca()
axes.set_xlim([0, 2*np.pi])
axes.set_ylim([-1.5, 1.5])
plt.xlabel("x")
plt.ylabel("t")
legend()
plt.show()
# Draw 100 samples from the parameters' posterior distribution
samples = np.random.multivariate_normal(posterior_mean, covariance, size=100)
# Plot every sample
for i, s in enumerate(samples):
plt.plot(x, s[0] + s[1]*x + s[2]*(x*x) + s[3]*np.power(x,3) + s[4]*np.power(x,4))
# Control layout
x = np.arange(0.0, 2*np.pi, 0.01)
plt.xlabel("x")
plt.ylabel("target")
axes = plt.gca()
axes.set_xlim([0,2*np.pi])
axes.set_ylim([-1.5,1.5])
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lab 1
Step2: $\newcommand{\bPhi}{\mathbf{\Phi}}$
Step3: 1.2 Polynomial regression (10 points)
Step4: 1.3 Plot (5 points)
Step5: 1.4 Regularized linear regression (10 points)
Step6: 1.5 Model selection by cross-validation (15 points)
Step7: b) (10 points) Now write a method find_best_m_and_lamb(x, t) that finds the best values for $M$ and $\lambda$. The method should return the best $M$ and $\lambda$. To get you started, here is a method you can use to generate indices of cross-validation folds.
Step8: 1.7 Plot best cross-validated fit (5 points)
Step10: Part 2
Step12: 2.2 Compute Posterior (15 points)
Step14: 2.3 Prediction (10 points)
Step15: 2.4 Plot predictive distribution (10 points)
Step16: b) (5 points) For a second plot, draw 100 samples from the parameters' posterior distribution. Each of these samples is a certain choice of parameters for 4-th order polynomial regression.
|
14,946 | <ASSISTANT_TASK:>
Python Code:
Table((4, 1, 8),
(9, 7, 3),
(5, 2, 6))
Table(TableHeaderRow('a','b','c'),
(1, 2, 3),
(2, 4, 6),
)
Table({'a': (1, 2),
'b': (2, 4),
'c': (3, 6)})
Table({'a': (1, 2),
'b': (2,),
'c': (3, 6)})
# Computing values
t = Table(TableHeaderRow('number', 'square', 'cube'))
for x in range(1, 11):
t.append_row((x, x**2, x**3))
t
# Styling determined by code
t = Table(TableHeaderRow('divisions', 'result'))
num = 55
for x in range(7):
if num < 1:
resultcell = TableCell(num, bg_colour='DarkBlue', text_colour='white')
else:
resultcell = TableCell(num)
t.append_row((x, resultcell))
num /= 3
t
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can add a header row like this
Step2: Table also accepts dicts (or any mapping) with keys as column headers and values as column contents. The order of columns is undefined unless the mapping is an OrderedDict.
Step3: The number of column values (rows) need not be equal
Step4: You can build a table incrementally using Table.append_row(). If you need it, rows also have an append_cell() method.
Step5: You can style cells with the bg_colour and text_colour parameters. This only works in HTML for the moment; if you convert the notebook to LaTeX, the colours will be ignored.
|
14,947 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
def load_data(filename):
import csv
with open(filename, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
df = pd.DataFrame([[-1 if el == '?' else int(el) for el in r] for r in csvreader])
df.columns=["patient_id", "radius", "texture", "perimeter", "smoothness", "compactness", "concavity", "concave_points", "symmetry", "fractal_dimension", "malignant"]
df['malignant'] = df['malignant'].map({2: 0, 4: 1})
return df
training_set = load_data("data/breast-cancer.train")
test_set = load_data("data/breast-cancer.test")
print "Training set has %d patients" % (training_set.shape[0])
print "Test set has %d patients\n" % (test_set.shape[0])
print training_set.iloc[:, 0:6].head(3)
print
print training_set.iloc[:, 6:11].head(3)
training_set_malignant = training_set['malignant']
training_set_features = training_set.iloc[:, 1:10]
test_set_malignant = test_set['malignant']
test_set_features = test_set.iloc[:, 1:10]
from sklearn.preprocessing import MinMaxScaler
from sklearn import svm
# (1) Scale the 'training set'
scaler = MinMaxScaler()
scaled_training_set_features = scaler.fit_transform(training_set_features)
# (2) Create the model
model = svm.LinearSVC(C=0.1)
# (3) Fit the model using the 'training set'
model.fit(scaled_training_set_features, training_set_malignant)
# (4) Scale the 'test set' using the same scaler as the 'training set'
scaled_test_set_features = scaler.transform(test_set_features)
# (5) Use the model to predict malignancy the 'test set'
test_set_malignant_predictions = model.predict(scaled_test_set_features)
print test_set_malignant_predictions
from sklearn import metrics
accuracy = metrics.accuracy_score(test_set_malignant, \
test_set_malignant_predictions) * 100
((tn, fp), (fn, tp)) = metrics.confusion_matrix(test_set_malignant, \
test_set_malignant_predictions)
print "Accuracy: %.2f%%" % (accuracy)
print "True Positives: %d, True Negatives: %d" % (tp, tn)
print "False Positives: %d, False Negatives: %d" % (fp, fn)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Training and Test Data Sets
Step2: Linear Support Vector Machine Classification
Step3: Evaluating performance of the model
|
14,948 | <ASSISTANT_TASK:>
Python Code:
# Some styling
from IPython.display import display, HTML
from IPython.display import IFrame, Image
s=
<style>
.rendered_html h1{
font-family: "Roboto", helvetica;
color: #8896B4; !important
}
.rendered_html h2{
font-family: "Roboto", helvetica;
color: #5C6E95; !important
}
.rendered_html h3{
font-family: "Roboto", helvetica;
color: #3A4E79; !important
}
.rendered_html li{
font-family: "Roboto", helvetica;
color: #0D1F45; !important
}
.reveal a{
font-family: "Roboto", helvetica;
color: #8896B4; !important
}
#notebook {
background-color: #365776;
}
.reveal blockquote{
font-family: "Roboto", helvetica;
color: #C18451; !important
}
</style>
display(HTML(s))
height=300
IFrame("http://d3js.org/", width=1000, height=height)
IFrame("http://blockbuilder.org/", width=1000, height=height)
IFrame('http://bost.ocks.org/mike/', width=1000, height=height)
IFrame("http://alignedleft.com/", width=1000, height=height)
IFrame("http://www.jeromecukier.net/", width=1000, height=300)
Image("../screenshots/xgboost-sneakpeak.png", width=1000, height=300)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: D3.js
Step2: What's D3.js?
Step3: A website inside a web presentation
Step4: What is D3.js
Step5: Scott Murray (@alignedleft)
Step6: Jérôme Cukier
Step7: Thanks for coming!
|
14,949 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
domain = np.linspace(-5.0,5.0,100)
y = np.power(domain, 2)
%matplotlib inline
# "magic" command telling Jupyter NB to embed plots
# always label and title your plot, at minimum
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Parabolic Curve")
p = plt.plot(domain, y)
x3 = np.power(domain, 3)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("X to the 3rd Power")
p = plt.plot(domain, x3)
def poly(x):
return (x - 3) * (x + 5) * (x - 1) * x
Poly = np.vectorize(poly)
y = Poly(domain)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("4th Degree Polynomial")
plt.grid()
p = plt.plot(domain, y)
y0 = np.sin(domain)
y1 = np.cos(domain)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Sine & Cosine")
plt.grid()
plt.plot(domain, y0, color = "orange", label="Sine")
plt.plot(domain, y1, color = "green", label="Cosine")
p = plt.legend()
domain.shape
col0 = pd.Series(domain)
col1 = pd.Series(np.power(domain,2))
col2 = pd.Series(x3)
col3 = Poly(domain)
datadict = {"Input":col0, "Parabola":col1, "3rd Power":col2, "Polynomial":col3}
df = pd.DataFrame(datadict, columns = ["Input", "Parabola", "3rd Power", "Polynomial"])
df.head()
df.loc[:,"3rd Power"].head()
df.loc[:10,["Input", "3rd Power"]] # rows 0-10 inclusive, two columns
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lets define a domain from -5 to 5, of 100 points, and plot some XY curves that show some functions.
Step2: Now that we've plotted some data, lets organize the data into a data table, or "data frame" to be more precise. Pandas is all about the DataFrame object.
Step3: Without the columns argument, there's no guarantee that datadict will gives us the left-to-right column order we desire.
Step4: Here we're starting to introduce how data may be selected by numeric indexes, yes, but also by labels.
|
14,950 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
rng = np.random.RandomState(1999)
n_samples = 1000
X = rng.rand(n_samples)
y = np.sin(20 * X) + .05 * rng.randn(X.shape[0])
X_t = np.linspace(0, 1, 100)
y_t = np.sin(20 * X_t)
plt.scatter(X, y, color='steelblue', label='measured y')
plt.plot(X_t, y_t, linestyle='-', color='darkred', label='true y')
plt.title('Noisy Example Function')
plt.legend(loc='lower left')
rng = np.random.RandomState(1999)
n_samples = 1000
X = rng.rand(n_samples)
y = np.sin(20 * X) + .05 * rng.randn(X.shape[0])
plt.scatter(X, y, color='steelblue')
plt.title('Noisy Data')
rng = np.random.RandomState(1999)
n_samples = 1000
X = rng.rand(n_samples)
y = np.sin(20 * X) + .95 * rng.randn(n_samples)
plt.scatter(X, y, color='steelblue')
plt.title('Really Noisy Data')
# from mrmartin.ner/?p=223
def exponential_kernel(x1, x2):
# Broadcasting tricks to get every pairwise distance.
return np.exp(-(x1[np.newaxis, :, :] - x2[:, np.newaxis, :])[:, :, 0] ** 2).T
# Covariance calculation for a given kernel
def covariance(kernel, x1, x2):
return kernel(x1, x2)
rng = np.random.RandomState(1999)
# Initial guess
kernel = exponential_kernel
init = np.zeros((1, 1))
sigma = covariance(kernel, init, init)
xpts = np.arange(-3, 3, step=0.01).reshape((-1, 1))
plt.errorbar(xpts.squeeze(), np.zeros(len(xpts)), yerr=sigma.squeeze(),
capsize=0, color='steelblue')
plt.ylim(-3, 3)
plt.title("Initial guess")
def conditional(x_new, x, y, kernel):
cov_xxn = covariance(kernel, x_new, x)
cov_x = covariance(kernel, x, x)
cov_xn = covariance(kernel, x_new, x_new)
mean = cov_xxn.dot(np.linalg.pinv(cov_x)).dot(y)
variance = cov_xn - cov_xxn.dot(np.linalg.pinv(cov_x)).dot(cov_xxn.T)
return mean, variance
# First point estimate
x_new = np.atleast_2d(1.)
# No conditional, this is the first value!
y_new = np.atleast_2d(0 + rng.randn())
x = x_new
y = y_new
# Plotting
y_pred, sigma_pred = conditional(xpts, x, y, kernel=kernel)
plt.errorbar(xpts.squeeze(), y_pred.squeeze(), yerr=np.diag(sigma_pred),
capsize=0, color='steelblue')
plt.plot(x, y, color='darkred', marker='o', linestyle='')
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.figure()
# Second point estimate
x_new = np.atleast_2d(-0.7)
mu, s = conditional(x_new, x, y, kernel=kernel)
y_new = np.atleast_2d(mu + np.diag(s)[:, np.newaxis] * rng.randn(*x_new.shape))
x = np.vstack((x, x_new))
y = np.vstack((y, y_new))
# Plotting
y_pred, sigma_pred = conditional(xpts, x, y, kernel=kernel)
plt.errorbar(xpts.squeeze(), y_pred.squeeze(), yerr=np.diag(sigma_pred),
capsize=0, color='steelblue')
plt.plot(x, y, color='darkred', marker='o', linestyle='')
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.figure()
# Multipoint estimate
x_new = rng.rand(3, 1)
mu, s = conditional(x_new, x, y, kernel=kernel)
y_new = mu + np.diag(s)[:, np.newaxis] * rng.randn(*x_new.shape)
x = np.vstack((x, x_new))
y = np.vstack((y, y_new))
# Plotting
y_pred, sigma_pred = conditional(xpts, x, y, kernel=kernel)
plt.errorbar(xpts.squeeze(), y_pred.squeeze(), yerr=np.diag(sigma_pred),
capsize=0, color='steelblue')
plt.plot(x, y, color='darkred', marker='o', linestyle='')
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.show()
mean, var = conditional(np.array([[1]]), x, y, kernel=kernel)
print("Expected value for x = %i, %.4f" % (1, mean))
print("Uncertainty %.4f" % var)
print()
mean, var = conditional(np.array([[3]]), x, y, kernel=kernel)
print("Expected value for x = %i, %.4f" % (3, mean))
print("Uncertainty %.4f" % var)
print()
mean, var = conditional(np.array([[1E6]]), x, y, kernel=kernel)
print("Expected value for x = %i, %.4f" % (1E6, mean))
print("Uncertainty %.4f" % var)
print()
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin
from scipy import linalg
from sklearn.utils import check_array
import matplotlib.pyplot as plt
def plot_gp_confidence(gp, show_gp_points=True, X_low=-1, X_high=1,
X_step=.01, xlim=None, ylim=None):
xpts = np.arange(X_low, X_high, step=X_step).reshape((-1, 1))
try:
y_pred = gp.predict(xpts)
mean = gp.predicted_mean_
var = gp.predicted_var_
if gp.predicted_mean_.shape[1] > 1:
raise ValueError("plot_gp_confidence only works for 1 dimensional Gaussian processes!")
rng = np.random.RandomState(1999)
y_new = mean + np.diag(var)[:, np.newaxis] * rng.randn(*xpts.shape)
except TypeError:
y_pred = xpts * 0
var = gp.predicted_var_ * np.ones((xpts.shape[0], xpts.shape[0]))
plt.errorbar(xpts.squeeze(), y_pred.squeeze(), yerr=np.diag(var),
capsize=0, color='steelblue')
if show_gp_points:
plt.plot(gp._X, gp._y, color='darkred', marker='o', linestyle='')
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.show()
# from mrmartin.ner/?p=223
def exponential_kernel(x1, x2):
# Broadcasting tricks to get every pairwise distance.
return np.exp(-(x1[np.newaxis, :, :] - x2[:, np.newaxis, :])[:, :, 0] ** 2).T
class SimpleGaussianProcessRegressor(BaseEstimator, RegressorMixin):
def __init__(self, kernel_function, copy=True):
self.kernel_function = kernel_function
self.copy = copy
self.predicted_mean_ = 0
self.predicted_var_ = self._covariance(np.zeros((1, 1)), np.zeros((1, 1)))
self._X = None
self._y = None
def _covariance(self, x1, x2):
return self.kernel_function(x1, x2)
def fit(self, X, y):
self._X = None
self._y = None
return self.partial_fit(X, y)
def partial_fit(self, X, y):
X = check_array(X, copy=self.copy)
y = check_array(y, copy=self.copy)
if self._X is None:
self._X = X
self._y = y
else:
self._X = np.vstack((self._X, X))
self._y = np.vstack((self._y, y))
def predict(self, X, y=None):
X = check_array(X, copy=self.copy)
cov_xxn = self._covariance(X, self._X)
cov_x = self._covariance(self._X, self._X)
cov_xn = self._covariance(X, X)
cov_x_inv = linalg.pinv(cov_x)
mean = cov_xxn.dot(cov_x_inv).dot(self._y)
var = cov_xn - cov_xxn.dot(cov_x_inv).dot(cov_xxn.T)
self.predicted_mean_ = mean
self.predicted_var_ = var
return mean
gp = SimpleGaussianProcessRegressor(exponential_kernel)
plt.title('Initial GP Confidence')
plot_gp_confidence(gp, X_low=-3, X_high=3, X_step=.01,
xlim=(-3, 3), ylim=(-3, 3))
rng = np.random.RandomState(1999)
n_samples = 200
X = rng.rand(n_samples, 1)
y = np.sin(20 * X) + .05 * rng.randn(X.shape[0], 1)
plt.title('Noisy Data')
plt.scatter(X, y, color='steelblue')
plt.show()
gp.fit(X, y)
X_new = rng.rand(5, 1)
gp.predict(X_new)
plt.title('Final GP Confidence')
plot_gp_confidence(gp, show_gp_points=False, X_low=0, X_high=1, X_step=.01)
gp = SimpleGaussianProcessRegressor(exponential_kernel)
plt.title('Initial GP Confidence')
plot_gp_confidence(gp, X_low=-3, X_high=3, X_step=.01,
xlim=(-3, 3), ylim=(-3, 3))
rng = np.random.RandomState(1999)
n_samples = 200
X = rng.rand(n_samples, 1)
y = np.sin(20 * X) + .95 * rng.randn(X.shape[0], 1)
plt.title('Noisy Data')
plt.scatter(X, y, color='steelblue')
plt.show()
gp.fit(X, y)
X_new = rng.rand(5, 1)
gp.predict(X_new)
plt.title('Final GP Confidence')
plot_gp_confidence(gp, show_gp_points=False, X_low=0, X_high=1, X_step=.01)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we have a set of values, $X$, and another set of values $y$. The values of $X$ are related to $y$ by a function $f(x)$, which is described by the equation $y = sin(C * X) + \varepsilon$, where $\varepsilon$ is some noise (in this case Gaussian noise with a variance of .05) and $C$ is some constant (in this case, 20, increasing the frequency of the oscillations so things look nice).
Step2: Now imagine a case like the above, where the red line values are unknown. We have points $X$, and measurements from those points $y$. We can also look at the graph and approximate the red line from the previous graph running through the center of the blue points. If we do this procedure in a mathematical way, we are learning $f(x)$ from the data!
Step3: Looking at the above plot, it is easy to see that generating the "red line" like above would be much more difficult, even though the generating function $sin()$ is the same. In a sense, you could say that the distribution of possible functions to generate those $y$ values from $X$ is very wide, and it is hard to find the "best guess" for $f(x)$.
Step4: Now that we have initialized the GP, we want to estimate a new $y$ given a new input $x$. Without any prior knowledge our guess will not be very good, which is represented by the wide blue line across the plot (our confidence bounds). Luckily, we have a set of $x$ values that are paired with $y$ values , called our training set, which we can use to learn a possible model. To make these updates, we will need a new tool
Step5: We can see from the above plots that we have a pretty good idea of the values we would get out of the function given $x = 1$. It is less clear what values we would get for $x = 3$, and only gets worse as we travel off the plot.
Step6: The numerical results above agree with our intuition looking at the final plot.
Step7: Classy
Step8: To Boldly Go...
|
14,951 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import timeit
import pstats, cProfile
from GWTSA import *
print 'packages succesfully imported!'
%matplotlib inline
# Provide the forcings precipitation and potential evapotransapiration
n = 50
P = E = np.zeros(n)
# Provide the model parameters for the soil module
S_rmax = 0.01 # is equal to ±100 milimeters
K_p = 0.001 # is equal to 0.01 m/d
Gamma = 1.0
Imax = 0.0015
# Provide some details for the timesteps to calculate the soil state
t = np.arange(0,n,1)
dt= 1
S0 = 0.5 * S_rmax # Calculate the initial soil state (assumed to be half full)
S = Unsat_Zone.perc(t, P, E, S_rmax, K_p, Gamma, Imax, dt, solver=0)[1]; #1 index return the soil state S
S1 = Unsat_Zone.perc(t, P, E, S_rmax, K_p, Gamma, Imax, dt, solver=1)[1]; #1 index return the soil state S
S2 = S0 * np.exp((-K_p)*t/(S_rmax)) #Plot the exact solution when P and E are zero and Beta=1
# Make a plot of the two solutions for visual comparison
plt.plot(t,S)
plt.plot(t,S1)
plt.plot(t,S2)
plt.legend(['explicit', 'implicit', 'analytical'], loc = 'best')
plt.ylabel('S [meter]')
plt.xlabel('Time [days]')
plt.title('exact solution vs. numerical solution')
n = 365
t = np.arange(0,n,1)
P = np.zeros(n)
P[:int(n/2.0)] = 2.0 / 1000 # All forcings must be provided in meters
E = 0.001 * np.ones(n)
# Provide the model parameters for the soil module
S_rmax = 0.01 # is equal to ±100 milimeters
K_p = 0.001 # is equal to 0.01 m/d
Gamma = 1.0
Imax = 0.0015
R, S = Unsat_Zone.perc(t, P, E, S_rmax, K_p, Gamma, Imax, dt, solver=0)
R1, S1 = Unsat_Zone.perc(t, P, E, S_rmax, K_p, Gamma, Imax, dt, solver=1)
plt.figure(figsize=(15,4))
plt.subplot(121)
plt.plot(t,S)
plt.plot(t,S1)
plt.ylabel('S [meter]')
plt.xlabel('Time [days]')
plt.axhline(S_rmax, color='r')
plt.legend(['explicit', 'implicit', r'$S_rmax$'], loc='best')
plt.title('Soil State over Time')
plt.ylim(0, S_rmax+0.001)
plt.subplot(122)
plt.plot(R)
plt.plot(R1)
plt.title('recharge over time')
plt.ylabel('Recharge [m/d]')
plt.xlabel('Time [days]')
plt.legend(['explicit', 'implicit', r'$S_rmax$'], loc='best')
plt.ylim(0, max(R)+0.0001)
# Provide the forcings precipitation and potential evapotransapiration
C = np.genfromtxt('./KNMI_Bilt.txt' , delimiter=',', skip_header=10000, usecols=[2, 3]);
P = C[:,0]/10000.
E = C[:,1]/10000.
n = len(P)
t = np.arange(0,n,1)
print E
# Provide the model parameters for the soil module
S_rmax = 0.28 # is equal to ±100 milimeters
K_p = 0.009 # is equal to 0.01 m/d
Gamma = 2.0
Imax = 0.0015
R, S = Unsat_Zone.perc(t, P, E, S_rmax, K_p, Gamma, Imax, dt, solver=0)
R1, S1 = Unsat_Zone.perc(t, P, E, S_rmax, K_p, Gamma, Imax, dt, solver=1)
# Bar plot of the precipitation excess ( Takes a while to plot so commented here)
plt.figure(figsize=(17,8))
#plt.subplot(311)
#plt.bar(t, (P-E), lw=0)
#plt.xlim(0, n); plt.ylim(0)
#plt.title('Precipitation excess and the resulting soil state')
#plt.ylabel('precipitation excess [m]')
# Plot of the resulting Soil State
plt.subplot(312)
plt.plot(t, S)
plt.plot(t,S1)
plt.ylabel('S [m]')
plt.xlabel('Time [d]')
plt.xlim(0, n);
plt.legend(['explicit', 'implicit'])
plt.subplot(313)
plt.plot(t, R)
plt.plot(t,R1)
plt.ylabel('R [m]')
plt.xlabel('Time [d]')
plt.xlim(0, n);
plt.ylim(0,0.005)
plt.legend(['explicit', 'implicit'])
#Create an variable to store the percolation functions as the wrapper does not work with function?
X1 = Unsat_Zone.perc
X2 = Unsat_Zone_Python.perc
# Write a wrapper for the Cython file and time it
def wrapper(X1, t, P, E, S_cap, K_sat, Beta, D, dt, solver=1):
def wrapped():
return X1(t, P, E, S_cap, K_sat, Beta, D, dt, solver=1)
return wrapped
wrapped = wrapper(X1,t, P, E, S_cap, K_sat, Beta, D, dt, solver=1)
Cython = timeit.timeit(wrapped, number=1000)
print 'Time taken is', Cython
# Write a wrapper for the Python file and time it
def wrapper(X2, t, P, E, S_cap, K_sat, Beta, D, dt):
def wrapped():
return X2(t, P, E, S_cap, K_sat, Beta, D, dt)
return wrapped
wrapped = wrapper(X2,t, P, E, S_cap, K_sat, Beta, D, dt)
Python = timeit.timeit(wrapped, number=1000)
print 'using Cython File is', (Python/Cython), 'times faster than python to solve the soil module'
cProfile.runctx("Unsat_Zone.perc(t, P, E, S_cap, K_sat, Beta, D, dt, solver=1)", globals(), locals(), "Profile.prof", sort=-1)
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
n = 30
t = np.arange(0,n,1)
P = np.zeros(n)
P[0] = 10.0 / 1000 # All forcings must be provided in meters
E = 0.00 * np.ones(n)
dt=1
# Provide the model parameters for the soil module
S_rmax = 0.01 # is equal to ±100 milimeters
K_p = 0.001 # is equal to 0.01 m/d
Gamma = 1.0
Imax = 0.0015
R, S = Unsat_Zone.perc(t, P, E, S_rmax, K_p, Gamma, Imax, dt, solver=0)
R1, S1 = Unsat_Zone.perc(t, P, E, S_rmax, K_p, Gamma, Imax, dt, solver=1)
plt.figure(figsize=(15,4))
plt.subplot(121)
plt.plot(t,S)
plt.plot(t,S1)
plt.ylabel('S [meter]')
plt.xlabel('Time [days]')
plt.axhline(S_rmax, color='r')
plt.legend(['explicit', 'implicit', r'$S_rmax$'], loc='best')
plt.title('Soil State over Time')
plt.ylim(0, S_rmax+0.001)
plt.subplot(122)
plt.plot(R)
plt.plot(R1)
plt.title('recharge over time')
plt.ylabel('Recharge [m/d]')
plt.xlabel('Time [days]')
plt.legend(['explicit', 'implicit', r'$S_rmax$'], loc='best')
plt.ylim(0, max(R)+0.0001)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. ANALYTICAL SOLUTION VS. NUMERICAL SOLUTION
Step2: 2. EMPTYING AND FILLING THE UNSATURATED ZONE
Step3: 3. RANDOM FORCINGS
Step4: Compare performance of Cythonized and Python Module
Step5: Speeding it up further
Step6: Delay in the unsaturated zone?
|
14,952 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pylab as pl
pl.rcParams["figure.figsize"] = 9,6
###################################################################
##This script calculates the values of Atomic Function up(x) (1971)
###################################################################
################### One Pulse of atomic function
def up1(x: float) -> float:
#Atomic function table
up_y = [0.5, 0.48, 0.460000017,0.440000421,0.420003478,0.400016184, 0.380053256, 0.360139056,
0.340308139, 0.320605107,0.301083436, 0.281802850, 0.262826445, 0.244218000, 0.226041554,
0.208361009, 0.191239338, 0.174736305, 0.158905389, 0.143991189, 0.129427260, 0.115840866,
0.103044024, 0.9110444278e-01, 0.798444445e-01, 0.694444445e-01, 0.598444445e-01,
0.510444877e-01, 0.430440239e-01, 0.358409663e-01, 0.294282603e-01, 0.237911889e-01,
0.189053889e-01, 0.147363055e-01, 0.112393379e-01, 0.836100883e-02, 0.604155412e-02,
0.421800000e-02, 0.282644445e-02, 0.180999032e-02, 0.108343562e-02, 0.605106267e-03,
0.308138660e-03, 0.139055523e-03, 0.532555251e-04, 0.161841328e-04, 0.347816874e-05,
0.420576116e-05, 0.167693347e-07, 0.354008603e-10, 0]
up_x = np.arange(0.5, 1.01, 0.01)
res = 0.
if ((x>=0.5) and (x<=1)):
for i in range(len(up_x) - 1):
if (up_x[i] >= x) and (x < up_x[i+1]):
N1 = 1 - (x - up_x[i])/0.01
res = N1 * up_y[i] + (1 - N1) * up_y[i+1]
return res
return res
############### Atomic Function Pulse with width, shift and scale #############
def upulse(t: float, a = 1., b = 0., c = 1., d = 0.) -> float:
x = (t - b)/a
res = 0.
if (x >= 0.5) and (x <= 1):
res = up1(x)
elif (x >= 0.0) and (x < 0.5):
res = 1 - up1(1 - x)
elif (x >= -1 and x <= -0.5):
res = up1(-x)
elif (x > -0.5) and (x < 0):
res = 1 - up1(1 + x)
res = d + res * c
return res
############### Atomic Function Applied to list with width, shift and scale #############
def up(x: list, a = 1., b = 0., c = 1., d = 0.) -> list:
res = []
for i in range(len(x)):
res.append(upulse(x[i], a, b, c, d))
return res
x = np.arange(-2.0, 2.0, 0.01)
pl.title('Atomic Function up(x)')
pl.plot(x, up(x), label='Atomic Function')
pl.grid(True)
pl.show()
############### Atomic String #############
def AString1(x: float) -> float:
res = 1 * (upulse(x/2.0 - 0.5) - 0.5)
return res
############### Atomic String Pulse with width, shift and scale #############
def AStringPulse(t: float, a = 1., b = 0., c = 1., d = 0.) -> float:
x = (t - b)/a
if (x < -1):
res = -0.5
elif (x > 1):
res = 0.5
else:
res = AString1(x)
res = d + res * c
return res
###### Atomic String Applied to list with width, shift and scale #############
def AString(x: list, a = 1., b = 0., c = 1., d = 0.) -> list:
res = []
for i in range(len(x)):
res.append(AStringPulse(x[i], a, b, c, d))
#res[i] = AStringPulse(x[i], a, b, c)
return res
###### Summation of two lists #############
def Sum(x1: list, x2: list) -> list:
res = []
for i in range(len(x1)):
res.append(x1[i] + x2[i])
return res
x = np.arange(-2.0, 2.0, 0.01)
pl.title('Atomic String Function')
pl.plot(x, AString(x, 1.0, 0, 1, 0), label='Atomic String')
pl.grid(True)
pl.show()
x = np.arange(-2.0, 2.0, 0.01)
#This Calculates Derivative
dx = x[1] - x[0]
dydx = np.gradient(up(x), dx)
pl.plot(x, up(x), label='Atomic Function')
pl.plot(x, AString(x, 1.0, 0, 1, 0), linewidth=2, label='Atomic String Function')
pl.plot(x, dydx, '--', label='A-Function Derivative')
pl.title('Atomic and AString Functions')
pl.legend(loc='best', numpoints=1)
pl.grid(True)
pl.show()
x = np.arange(-2.0, 2.0, 0.01)
pl.plot(x, up(x), label='Atomic Function', linewidth=2)
pl.plot(x, dydx, '--', label='Atomic Function Derivative', linewidth=1, color="Green")
pl.title('Atomic Function and Its Derivative')
pl.legend(loc='best', numpoints=1)
pl.grid(True)
pl.show()
x = np.arange(-2.0, 2.0, 0.01)
pl.plot(x, up(x, 1, -1), '--', linewidth=1, label='Atomic Function at x=-1')
pl.plot(x, up(x, 1, +0), '--', linewidth=1, label='Atomic Function at x=0')
pl.plot(x, up(x, 1, -1), '--', linewidth=1, label='Atomic Function at x=-1')
pl.plot(x, Sum(up(x, 1, -1), Sum(up(x), up(x, 1, 1))), linewidth=2, label='Atomic Function Compounding')
pl.title('Atomic Function Compounding represent 1')
pl.legend(loc='best', numpoints=1)
pl.grid(True)
pl.show()
x = np.arange(-5.0, 5.0, 0.01)
pl.plot(x, up(x), label='Atomic Function', linewidth=2)
#pl.plot(x, dydx, '--', label='Atomic Function Derivative', linewidth=1, color="Green")
pl.title('Atomic Function is compactly supported')
pl.legend(loc='best', numpoints=1)
pl.grid(True)
pl.show()
######### Presentation of Atomic Function via Atomic Strings ##########
x = np.arange(-2.0, 2.0, 0.01)
pl.plot(x, AString(x, 1, 0, 1, 0), '--', linewidth=1, label='AString(x)')
pl.plot(x, AString(x, 0.5, -0.5, +1, 0), '--', linewidth=2, label='+AString(2x+1)')
pl.plot(x, AString(x, 0.5, +0.5, -1, 0), '--', linewidth=2, label='-AString(2x-1)')
#pl.plot(x, up(x, 1.0, 0, 1, 0), '--', linewidth=1, label='Atomic Function')
AS2 = Sum(AString(x, 0.5, -0.5, +1, 0), AString(x, 0.5, +0.5, -1, 0))
pl.plot(x, AS2, linewidth=3, label='Up(x) via Strings')
pl.title('Atomic Function as a Combination of AStrings')
pl.legend(loc='best', numpoints=1)
pl.grid(True)
pl.show()
x = np.arange(-2, 2.0, 0.01)
pl.title('AString and Fabius Functions')
pl.plot(x, AString(x, 0.5, 0.5, 1, 0.5), label='Fabius Function')
pl.plot(x, AString(x, 1, 0, 1, 0), label='AString Function')
pl.legend(loc='best', numpoints=1)
pl.grid(True)
pl.show()
x = np.arange(-3, 3, 0.01)
pl.plot(x, AString(x, 1, -1.0, 1, 0), '--', linewidth=1, label='AString 1')
pl.plot(x, AString(x, 1, +0.0, 1, 0), '--', linewidth=1, label='AString 2')
pl.plot(x, AString(x, 1, +1.0, 1, 0), '--', linewidth=1, label='AString 3')
AS2 = Sum(AString(x, 1, -1.0, 1, 0), AString(x, 1, +0.0, 1, 0))
AS3 = Sum(AS2, AString(x, 1, +1.0, 1, 0))
pl.plot(x, AS3, label='AStrings Sum', linewidth=2)
pl.title('Atomic Strings compose Line')
pl.legend(loc='best', numpoints=1)
pl.grid(True)
pl.show()
x = np.arange(-40.0, 40.0, 0.01)
width = 10.0
height = 10.0
#pl.plot(x, ABline (x, 1, 0), label='ABLine 1*x')
pl.plot(x, AString(x, width, -3*width/2, height, -3*width/2), '--', linewidth=1, label='AString 1')
pl.plot(x, AString(x, width, -1*width/2, height, -1*width/2), '--', linewidth=1, label='AString 2')
pl.plot(x, AString(x, width, +1*width/2, height, +1*width/2), '--', linewidth=1, label='AString 3')
pl.plot(x, AString(x, width, +3*width/2, height, +3*width/2), '--', linewidth=1, label='AString 4')
AS2 = Sum(AString(x, width, -3*width/2, height, -3*width/2), AString(x, width, -1*width/2, height, -1*width/2))
AS3 = Sum(AS2, AString(x, width,+1*width/2, height, +1*width/2))
AS4 = Sum(AS3, AString(x, width,+3*width/2, height, +3*width/2))
pl.plot(x, AS4, label='AStrings Joins', linewidth=2)
pl.title('Atomic Strings Combinations')
pl.legend(loc='best', numpoints=1)
pl.grid(True)
pl.show()
x = np.arange(-30.0, 30.0, 0.01)
#pl.plot(x, ABline (x, 1, 0), label='ABLine 1*x')
pl.plot(x, AString(x, 10.0,-15, 10, -15), '--', linewidth=1, label='AString Quantum 1')
pl.plot(x, AString(x, 10.0, -5, 10, -5), '--', linewidth=1, label='AString Quantum 2')
pl.plot(x, AString(x, 10.0, +5, 10, +5), '--', linewidth=1, label='AString Quantum 3')
pl.plot(x, AString(x, 10.0,+15, 10, +15), '--', linewidth=1, label='AString Quantum 4')
AS2 = Sum(AString(x, 10.0, -15, 10, -15), AString(x, 10., -5, 10, -5))
AS3 = Sum(AS2, AString(x, 10, +5, 10, +5))
AS4 = Sum(AS3, AString(x, 10,+15, 10, +15))
pl.plot(x, AS4, label='Spacetime Dimension', linewidth=2)
pl.title('Representing Spacetime by joining of Atomic Strings')
pl.legend(loc='best', numpoints=1)
pl.grid(True)
pl.show()
x = np.arange(-50.0, 50.0, 0.1)
dx = x[1] - x[0]
CS6 = Sum(up(x, 5, -30, 5, 5), up(x, 15, 0, 15, 5))
CS6 = Sum(CS6, up(x, 10, +30, 10, 5))
pl.plot(x, CS6, label='Spacetime Density')
IntC6 = np.cumsum(CS6)*dx/50
pl.plot(x, IntC6, label='Spacetime Shape (Geodesics)')
DerC6 = np.gradient(CS6, dx)
pl.plot(x, DerC6, label='Spacetime Curvature')
LightTrajectory = -10 -IntC6/5
pl.plot(x, LightTrajectory, label='Light Trajectory')
pl.title('Shape of Curved Spacetime model')
pl.legend(loc='best', numpoints=1)
pl.grid(True)
pl.show()
#pl.rcParams["figure.figsize"] = 16,12
book = pl.imread('BookSpread_small.png')
pl.imshow(book)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font color=teal>2. Atomic String Function (AString) is an Integral and Composing Branch of Atomic Function up(x) (introduced in 2017 by S. Yu. Eremenko)</font>
Step2: Atomic String, Atomic Function (AF) and AF Derivative plotted together
Step3: <font color=teal>3. Properties of Atomic Function Up(x)</font>
Step4: 3.2. Partition of Unity
Step5: 3.3. Atomic Function (AF) is a 'finite', 'compactly supported', or 'solitary' function
Step6: 3.4 Atomic Function is a non-analytical function (can not be represented by Taylor's series), but with known Fourier Transformation allowing to exactly calculate AF in certain points, with tabular representation provided in script above.
Step7: 4.3. AStrings and Atomic Solitons
Step8: 4.6. Partition of Line from Atomic String functions
Step9: Partition based on AString with certain width and height depending on a size of 'quanta'
Step10: <font color=teal>5. Model of Spacetime composed from AStrings Quanta (AString Metriants)
Step11: 5.3. Model of Spacetime curvature and gravity based on AStrings
Step12: <font color=teal>6. 'Soliton Nature' book</font>
|
14,953 | <ASSISTANT_TASK:>
Python Code:
import numpy
import numpy as np
# find out where we are in the file directory
import os, sys
print(os.getcwd())
datafilefolder = "./data/"
m=5
n=4
A = 11.111111*np.array(range(m*n),dtype=np.float32).reshape((m,n))
print(A)
Afilename = "A_mat_5_4.npy"
try:
A.tofile(datafilefolder+ Afilename )
except IOError:
if not os.path.exists(datafilefolder):
os.makedirs(datafilefolder)
print(os.listdir(datafilefolder))
print(os.listdir(os.getcwd()))
A_in = np.fromfile(datafilefolder+ Afilename, dtype=np.float32)
print(A_in)
import pandas
import pandas as pd
copoly_v_DF = pd.read_csv(datafilefolder + "copolymer_viscosity.csv")
copoly_v_DF.describe()
copoly_v_DF.head()
Manu_learn = pd.read_csv(datafilefolder+"manuf_learn.dat",header=None,delim_whitespace=True)
Manu_learn
Manu_learn.values.astype(np.float32).shape
try:
Manu_learn.values.astype(np.float32).tofile(datafilefolder+ "manuf_learn.npy" )
except IOError:
if not os.path.exists(datafilefolder):
os.makedirs(datafilefolder)
manuf_learn_in = np.fromfile(datafilefolder+ "manuf_learn.npy", dtype=np.float32)
manuf_learn_in.shape
manuf_learn_in = manuf_learn_in.reshape((20,8))
manuf_learn_in[:3,:]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: numpy.ndarray.tofile
Step2: numpy.fromfile
Step3: Then go to CUDA C++14 file binIO_playground.cu or the C++14 version (serial version), binIO_playground.cpp. Load it with std
Step4: Comma separated, ,
Step5: Whitespace separated, or
Step6: So the one possibility is to go from the raw file, to read in Python Pandas, and then take the panda DataFrame as a numpy array, and then output to a binary file with .tofile in numpy.
|
14,954 | <ASSISTANT_TASK:>
Python Code:
def colors_subselect(colors, num_classes=21):
dt = len(colors) // num_classes
sub_colors = []
for i in range(num_classes):
color = colors[i*dt]
if isinstance(color[0], float):
sub_colors.append([int(c * 255) for c in color])
else:
sub_colors.append([c for c in color])
return sub_colors
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
Draw a collection of lines on an image.
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def draw_rectangle(img, p1, p2, color=[255, 0, 0], thickness=2):
cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
def draw_bbox(img, bbox, shape, label, color=[255, 0, 0], thickness=2):
p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
p1 = (p1[0]+15, p1[1])
cv2.putText(img, str(label), p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.5, color, 1)
def bboxes_draw_on_img(img, classes, scores, bboxes, colors, thickness=2):
shape = img.shape
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
color = colors[classes[i]]
# Draw bounding box...
p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
# Draw text...
s = '%s/%.3f' % (classes[i], scores[i])
p1 = (p1[0]-5, p1[1])
cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.4, color, 1)
colors = colors_subselect(mpcm.plasma.colors, num_classes=21)
colors_tableau = [(255, 255, 255), (31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
from datasets import pascalvoc_2007
from datasets import pascalvoc_2012
DATASET_DIR = '/media/paul/DataExt4/PascalVOC/dataset/'
SPLIT_NAME = 'train'
BATCH_SIZE = 16
# Dataset provider loading data from the dataset.
dataset = pascalvoc_2007.get_split(SPLIT_NAME, DATASET_DIR)
provider = slim.dataset_data_provider.DatasetDataProvider(dataset,
shuffle=False,
# num_epochs=1,
common_queue_capacity=2 * BATCH_SIZE,
common_queue_min=BATCH_SIZE)
[image, shape, bboxes, labels] = provider.get(['image', 'shape', 'object/bbox', 'object/label'])
print('Dataset:', dataset.data_sources, '|', dataset.num_samples)
# images = tf.train.batch(
# [image_crop],
# batch_size=BATCH_SIZE,
# num_threads=1,
# capacity=5 * BATCH_SIZE)
# Problem: image shape is not fully defined => random crop with deterministic size.
xy = tf.random_uniform((2, ), minval=0, maxval=shape[0] // 3, dtype=tf.int64)
image_crop = tf.slice(image, [0, 0, 0], [250, 250, 3])
print('Original vs crop:', image.get_shape(), image_crop.get_shape())
# with queues.QueueRunners(sess):
# Start populating queues.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# Draw groundtruth bounding boxes using TF routine.
image_bboxes = tf.squeeze(tf.image.draw_bounding_boxes(tf.expand_dims(tf.to_float(image) / 255., 0),
tf.expand_dims(bboxes, 0)))
# Eval and display the image + bboxes.
rimg, rshape, rbboxes, rlabels = isess.run([image_bboxes, shape, bboxes, labels])
print('Image shape:', rimg.shape, rshape)
print('Bounding boxes:', rbboxes)
print('Labels:', rlabels)
fig = plt.figure(figsize = (10,10))
plt.imshow(rimg)
from nets import ssd_vgg_300
from nets import ssd_common
from preprocessing import ssd_vgg_preprocessing as ssd_preprocessing
ckpt_filename = '/media/paul/DataExt4/PascalVOC/training/ckpts/SSD_300x300_ft/ssd_300_vgg.ckpt'
ckpt_filename = '/home/paul/Development/Research/SSD-Tensorflow/logs/ssd_300_vgg/model.ckpt-1084'
ckpt_filename = './ssd_300_vgg.ckpt'
# Image pre-processimg
out_shape = (300, 300)
image_pre, labels_pre, bboxes_pre, bbox_img = \
ssd_vgg_preprocessing.preprocess_for_eval(image, labels, bboxes, out_shape,
resize=ssd_preprocessing.Resize.PAD_AND_RESIZE)
image_4d = tf.expand_dims(image_pre, 0)
# SSD construction.
reuse = True if 'ssd' in locals() else None
params = ssd_vgg_300.SSDNet.default_params
ssd = ssd_vgg_300.SSDNet(params)
with slim.arg_scope(ssd.arg_scope(weight_decay=0.0005)):
predictions, localisations, logits, end_points = ssd.net(image_4d, is_training=False, reuse=reuse)
# SSD default anchor boxes.
img_shape = out_shape
layers_anchors = ssd.anchors(img_shape, dtype=np.float32)
# Targets encoding.
target_labels, target_localizations, target_scores = ssd_common.tf_ssd_bboxes_encode(labels, bboxes_pre, layers_anchors)
# Initialize variables.
init_op = tf.global_variables_initializer()
isess.run(init_op)
# Restore SSD model.
saver = tf.train.Saver()
saver.restore(isess, ckpt_filename)
# Run model.
[rimg, rpredictions, rlocalisations, glabels, gbboxes, rbbox_img, rt_labels, rt_localizations, rt_scores] = \
isess.run([image_4d, predictions, localisations, labels, bboxes_pre, bbox_img,
target_labels, target_localizations, target_scores])
# Compute classes and bboxes from the net outputs.
rclasses, rscores, rbboxes,_,_ = ssd_common.ssd_bboxes_select(rpredictions, rlocalisations, layers_anchors,
threshold=0.5, img_shape=img_shape,
num_classes=21, decode=True)
rbboxes = ssd_common.bboxes_clip(rbbox_img, rbboxes)
rclasses, rscores, rbboxes = ssd_common.bboxes_sort(rclasses, rscores, rbboxes, top_k=400, priority_inside=False)
rclasses, rscores, rbboxes = ssd_common.bboxes_nms(rclasses, rscores, rbboxes, threshold=0.35)
# Draw bboxes
img_bboxes = np.copy(ssd_preprocessing.np_image_unwhitened(rimg[0]))
bboxes_draw_on_img(img_bboxes, rclasses, rscores, rbboxes, colors_tableau, thickness=1)
# bboxes_draw_on_img(img_bboxes, test_labels, test_scores, test_bboxes, colors_tableau, thickness=1)
print('Labels / scores:', list(zip(rclasses, rscores)))
print('Grountruth labels:', list(glabels))
print(gbboxes)
fig = plt.figure(figsize = (10,10))
plt.imshow(img_bboxes)
test_bboxes = []
test_labels = []
test_scores = []
for i in range(0, 3):
yref, xref, href, wref = layers_anchors[i]
ymin = yref - href / 2.
xmin = xref - wref / 2.
ymax = yref + href / 2.
xmax = xref + wref / 2.
bb = np.stack([ymin, xmin, ymax, xmax], axis=-1)
idx = yref.shape[0] // 2
idx = np.random.randint(yref.shape[0])
# print(bb[idx, idx].shape)
test_bboxes.append(bb[idx, idx])
test_labels.append(np.ones(href.shape, dtype=np.int64) * i)
test_scores.append(np.ones(href.shape))
test_bboxes = np.concatenate(test_bboxes)
test_labels = np.concatenate(test_labels)
test_scores = np.concatenate(test_scores)
print(test_bboxes.shape)
print(test_labels.shape)
print(test_scores.shape)
rt_labels, rt_localizations, rt_scores
for i in range(len(rt_labels)):
print(rt_labels[i].shape)
idxes = np.where(rt_labels[i] > 0)
# idxes = np.where(rt_scores[i] > 0.)
print(idxes)
print(rt_localizations[i][idxes])
print(list(zip(rt_labels[i][idxes], rt_scores[i][idxes])))
print()
# fig = plt.figure(figsize = (8,8))
# plt.imshow(ssd_preprocessing.np_image_unwhitened(rimg[0]))
# print('Ground truth labels: ', rlabels)
# Request threads to stop. Just to avoid error messages
# coord.request_stop()
# coord.join(threads)
PleaseStopHere;
# Input placeholder.
net_shape = (300, 300)
img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_preprocessing.preprocess_for_eval(
img_input, labels, None, net_shape, resize=ssd_preprocessing.Resize.PAD_AND_RESIZE)
image_4d = tf.expand_dims(image_pre, 0)
# Re-define the model
reuse = True if 'ssd' in locals() else None
with slim.arg_scope(ssd.arg_scope(weight_decay=0.0005)):
predictions, localisations, logits, end_points = ssd.net(image_4d, is_training=False, reuse=reuse)
# Main processing routine.
def process_image(img, select_threshold=0.5, nms_threshold=0.35, net_shape=(300, 300)):
# Run SSD network.
rimg, rpredictions, rlocalisations, rbbox_img = isess.run([image_4d, predictions, localisations, bbox_img],
feed_dict={img_input: img})
# Compute classes and bboxes from the net outputs.
rclasses, rscores, rbboxes, rlayers, ridxes = ssd_common.ssd_bboxes_select(
rpredictions, rlocalisations, layers_anchors,
threshold=select_threshold, img_shape=net_shape, num_classes=21, decode=True)
# print(list(zip(classes, scores)))
# print(rlayers)
# print(ridxes)
rbboxes = ssd_common.bboxes_clip(rbbox_img, rbboxes)
rclasses, rscores, rbboxes = ssd_common.bboxes_sort(rclasses, rscores, rbboxes,
top_k=400, priority_inside=True, margin=0.0)
rclasses, rscores, rbboxes = ssd_common.bboxes_nms(rclasses, rscores, rbboxes, threshold=nms_threshold)
# Resize bboxes to original image shape.
rbboxes = ssd_common.bboxes_resize(rbbox_img, rbboxes)
return rclasses, rscores, rbboxes
# Test on demo images.
path = '../demo/'
image_names = sorted(os.listdir(path))
img = mpimg.imread(path + image_names[3])
rclasses, rscores, rbboxes = process_image(img)
# Draw results.
img_bboxes = np.copy(img)
bboxes_draw_on_img(img_bboxes, rclasses, rscores, rbboxes, colors_tableau, thickness=2)
fig = plt.figure(figsize = (12, 12))
plt.imshow(img_bboxes)
idxes = np.where(inside)
rscores[idxes]
a = tf.constant([[5.0, 2], [5.0, 2]])
b = tf.constant([5.0, 2])
c = a * b
d = tf.nn.l2_normalize(a, dim=1)
# We can just use 'c.eval()' without passing 'sess'
print(d.eval())
import caffe
import numpy as np
from caffe.proto import caffe_pb2
caffe_filename = '/media/paul/DataExt4/PascalVOC/training/ckpts/SSD_300x300_ft/ssd_300_vgg.caffemodel'
caffemodel_params = caffe_pb2.NetParameter()
caffemodel_str = open(caffe_filename, 'rb').read()
caffemodel_params.ParseFromString(caffemodel_str)
layers = caffemodel_params.layer
names = [(i, l.name) for i, l in enumerate(layers)]
types = set([l.type for i, l in enumerate(layers)])
print(types)
names
layer = layers[59]
layer = layers[1]
print(layer.type)
a = np.array(layer.blobs[0].data)
s = layer.blobs[0].shape
print(s, 38*38)
# print(a)
from nets import caffe_scope
csc = caffe_scope.CaffeScope()
d = {}
d[csc.conv_biases_init] = 0
d[csc.conv_biases_init] += 1
min_dim = 300
mbox_source_layers = ['conv4_3', 'fc7', 'conv6_2', 'conv7_2', 'conv8_2', 'conv9_2']
min_ratio = 15
max_ratio = 90
step = int(math.floor((max_ratio - min_ratio) / (len(mbox_source_layers) - 2)))
min_sizes = []
max_sizes = []
for ratio in range(min_ratio, max_ratio + 1, step):
min_sizes.append(min_dim * ratio / 100.)
max_sizes.append(min_dim * (ratio + step) / 100.)
min_sizes = [min_dim * 7 / 100.] + min_sizes
max_sizes = [min_dim * 15 / 100.] + max_sizes
print(min_sizes)
print(max_sizes)
feat_shapes=[(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
steps = [8, 16, 32, 64, 100, 300]
offset = 0.5
for i in range(len(steps)):
print((feat_shapes[i][0] - offset) * steps[i] / 300, (feat_shapes[i][0] - offset) / feat_shapes[i][0])
37.5 * 8. / 300
.5 / 38
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some drawing routines
Step2: Pascal VOC dataset
Step3: Test SSD-300 model using TFRecords pipeline
Step4: Test SSD-300 model using sample images
Step5: Some TensorFlow tests...
Step6: A few tests on Caffe model files...
|
14,955 | <ASSISTANT_TASK:>
Python Code:
# charger les données dans un dataframe de Pandas
import pandas as pd
col_names = ['pregnant', 'glucose', 'bp', 'skin', 'insulin', 'bmi', 'pedigree', 'age', 'label']
pima = pd.read_csv('./pima-indians-diabetes.data.txt', header=None, names=col_names)
# afficher les 5 premières lignes
pima.head()
# On définit X et y, on va supposer que nous n'avons que 4 attributs : 'pregnant', 'insulin', 'bmi', 'age'
feature_cols = ['pregnant', 'insulin', 'bmi', 'age']
X = pima[feature_cols]
y = pima.label
# diviser X et y en training and testing
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=0)
# on va apprendre un modèle de régression logistique
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
# On prédit pour les données de test
y_pred_class = logreg.predict(X_test)
# accuracy
from sklearn import metrics
print (metrics.accuracy_score(y_test, y_pred_class))
# on examine la distribution des classes des données de test, la classe dominante est la classe 0
print ('classe 0 :', sum(y_test==0))
print ('classe 1 :', sum(y_test==1))
import numpy as np
zero_preds = np.zeros_like(y_test) #Un tableau de 0 (classe 0) de la même taille que y_test
print ('Null accuracy :',metrics.accuracy_score(y_test, zero_preds))
# Afficher les 25 premières valeurs
print 'True:', y_test.values[0:25]
print 'Pred:', y_pred_class[0:25]
#..................Que REMARQUEZ VOUS?
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(8,4))
plt.subplot(1,2,1)
_=plt.hist(y_test.values, color='red', normed=True)
plt.title('Vrais valeurs')
_=plt.ylim(0,10)
plt.subplot(1,2,2)
_=plt.hist(y_pred_class, color ='blue', normed=True)
plt.title('Valeurs Predites')
_=plt.ylim(0,10)
print (metrics.confusion_matrix(y_test, y_pred_class))
print 'True:', y_test.values[0:25]
print 'Pred:', y_pred_class[0:25]
# on stocke la matrice de confusion et on récupère les TP, TN, FP, FN
confusion = metrics.confusion_matrix(y_test, y_pred_class)
TP = confusion[1, 1]
TN = confusion[0, 0]
FP = confusion[0, 1]
FN = confusion[1, 0]
print (TP + TN) / float(TP + TN + FP + FN)
print metrics.accuracy_score(y_test, y_pred_class)
print (FP + FN) / float(TP + TN + FP + FN)
print 1 - metrics.accuracy_score(y_test, y_pred_class)
print TP / float(TP + FN)
print metrics.recall_score(y_test, y_pred_class)
print TP / float(TP + FP)
print metrics.precision_score(y_test, y_pred_class)
print TN / float(TN + FP)
print FP / float(TN + FP)
# afficher les 10 premières prédictions
logreg.predict(X_test)[0:10]
# afficher les degrés d'appartenance (les 10 premiers)
logreg.predict_proba(X_test)[0:10, :]
# afficher les degrés d'appartenance pour la classe 1 (les 10 premiers)
logreg.predict_proba(X_test)[0:10, 1]
# on stocke les degrés d'appartenance pour la classe 1
y_pred_prob = logreg.predict_proba(X_test)[:, 1]
# histogramme des probabilités prédites
plt.hist(y_pred_prob, bins=8)
plt.xlim(0, 1)
plt.title('Histogramme des probabilites predites')
plt.xlabel('Probabilite Predite d etre diabetique')
plt.ylabel('Frequence')
# Predire un diabete si la probabilité prédite est supérieure à 0.3
y_pred_class = np.array(y_pred_prob>0.3, dtype=int)
# afficher les 10 premières probabilités prédites
y_pred_prob[0:10]
# afficher les 10 premières classes prédites
y_pred_class[0:10]
# ancienne matrice de confusion (seuil par défaut = 0.5)
print confusion
# nouvelle matrice de confusion (seuil = 0.3)
print metrics.confusion_matrix(y_test, y_pred_class)
# la sensitivity a augmenté (avant sensitivité = 0.24)
print 46 / float(46 + 16)
# la spécificité a diminué (avant spécificité = 0.91)
print 80 / float(80 + 50)
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_prob)
plt.plot(fpr, tpr)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title('courbe ROC du classifieur de diabetes')
plt.xlabel('False Positive Rate (1 - Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.grid(True)
print metrics.roc_auc_score(y_test, y_pred_prob)
x = np.array( [0.6, 0.4, 0.2] )
np.array (x>0.3 , dtype=int )
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Label
Step2: Classification accuracy
Step3: Le taux de prédiction est 0.6927, ce qui à première vue peut sembler satisfaisant Mais est-ce le cas?
Step4: La performance du modèle
Step5: Conclusion
Step6: Chaque instance du Test set appartient à exactement une et une seule case
Step7: Indicateurs de performance (metrics) à partir de la matrice de confusion
Step8: Erreur de classification (Classification Error ou Misclassification Rate)
Step9: Sensibilité (Sensitivity) ou Rappel (Recall) ou "True Positive Rate"
Step10: Precision
Step11: Spécificité (Specificity)
Step12: False Positive Rate
Step13: Conclusion
Step14: Diminuer le seuil de prédiction du diabète pour augmenter la sensitivity du classifieur
Step15: Conclusion
Step16: la courbe ROC vous aide à choisir un seuil afin d'avoir un certain équilibre entre la sensitivity et la specificity qui satisfait votre besoin
Step17: Interprétation de AUC
|
14,956 | <ASSISTANT_TASK:>
Python Code:
import os
from sklearn import preprocessing
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import preprocessing
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import clone
from sklearn import preprocessing
from sklearn import svm
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics import classification_report
from sklearn.tree import DecisionTreeClassifier
import datetime as dt
fp_df = os.path.expanduser('~/cltk_data/user_data/tlg_bow_df.pickle')
dataframe_bow = joblib.load(fp_df)
Y = dataframe_bow['epithet']
X = dataframe_bow.drop(['epithet', 'id', 'author'], 1)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=0)
def scale_data(X_train, X_test, Y_train, Y_test):
Take Vectors,
'''
-PREPOCESSING
-Here, scaled data has zero mean and unit varience
-We save the scaler to later use with testing/prediction data
'''
print('Scaling data ...')
t0 = dt.datetime.utcnow()
scaler = preprocessing.StandardScaler().fit(X_train)
fp_scaler = os.path.expanduser('~/cltk_data/user_data/tlg_bow_scaler.pickle')
joblib.dump(scaler, fp_scaler)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
return X_train_scaled, X_test_scaled, Y_train, Y_test
X_train_scaled, X_test_scaled, Y_train, Y_test = scale_data(X_train, X_test, Y_train, Y_test)
def run_tree(X_train_scaled, X_test_scaled, Y_train, Y_test):
Run decision tree with scikit.
Experiment with: 'max_depth'
'''
-This is where we define the models with pre-defined parameters
-We can learn these parameters given our data
'''
print('Defining and fitting models ...')
t0 = dt.datetime.utcnow()
dec_tree = DecisionTreeClassifier()
dec_tree.fit(X_train_scaled, Y_train)
fp_model_pickle = os.path.expanduser('~/cltk_data/user_data/tlg_bow_dt.pickle')
joblib.dump(dec_tree, fp_model_pickle)
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
Y_prediction_tree = dec_tree.predict(X_test_scaled)
print('tree_predictions ', Y_prediction_tree)
expected = Y_test
print('actual_values ', expected)
print()
print('----Tree_report--------------------------------')
print(classification_report(expected, Y_prediction_tree))
run_tree(X_train_scaled, X_test_scaled, Y_train, Y_test)
def run_random_forest(X_train_scaled, X_test_scaled, Y_train, Y_test):
Scikit random forest
Experiment with 'n_estimators'
t0 = dt.datetime.utcnow()
n_estimators = 30
rf_model = RandomForestClassifier(n_estimators=n_estimators)
# Train
clf = clone(rf_model)
clf = rf_model.fit(X_train_scaled, Y_train)
#joblib.dump(clf, 'models/random_forest.pickle')
fp_model_pickle = os.path.expanduser('~/cltk_data/user_data/tlg_bow_fandom_forest.pickle')
joblib.dump(clf, fp_model_pickle)
scores = clf.score(X_train_scaled, Y_train)
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
Y_prediction = clf.predict(X_test_scaled)
print('tree_predictions ', Y_prediction)
expected = Y_test
print('actual_values ', expected)
print()
print('----Random forest report--------------------------------')
print(classification_report(expected, Y_prediction))
run_random_forest(X_train_scaled, X_test_scaled, Y_train, Y_test)
def run_svc(X_train_scaled, X_test_scaled, Y_train, Y_test):
Run SVC with scikit.
# This is where we define the models with pre-defined parameters
# We can learn these parameters given our data
print('Defining and fitting SVC model ...')
t0 = dt.datetime.utcnow()
scv = svm.LinearSVC(C=100.)
scv.fit(X_train_scaled, Y_train)
fp_model_pickle = os.path.expanduser('~/cltk_data/user_data/tlg_bow_svc.pickle')
joblib.dump(scv, fp_model_pickle)
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
Y_prediction_svc = scv.predict(X_test_scaled)
print('svc_predictions ', Y_prediction_svc)
expected = Y_test
print('actual_values ', expected)
print()
print('----SVC_report--------------------------------')
print(classification_report(expected, Y_prediction_svc))
run_svc(X_train_scaled, X_test_scaled, Y_train, Y_test)
def run_ada_boost(X_train_scaled, X_test_scaled, Y_train, Y_test):
Scikit random forest.
For plotting see:
http://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_iris.html
Experiment with 'n_estimators'
n_estimators = 30
ada_classifier = AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)
# Train
clf = clone(ada_classifier)
clf = ada_classifier.fit(X_train_scaled, Y_train)
fp_model_pickle = os.path.expanduser('~/cltk_data/user_data/tlg_bow_ada_boost.pickle')
joblib.dump(clf, fp_model_pickle)
scores = clf.score(X_train_scaled, Y_train)
Y_prediction = clf.predict(X_test_scaled)
print('tree_predictions ', Y_prediction)
expected = Y_test
print('actual_values ', expected)
print()
print(classification_report(expected, Y_prediction))
run_ada_boost(X_train_scaled, X_test_scaled, Y_train, Y_test)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we run this a second time, on the second (b) feature table that has removed all epithets with fewer than 27 representative documents. The results are better (overall F1 score for decision tree is 0.44, random forest is 0.47; in a these were 0.33 and 0.40, respectively).
Step3: Decision tree
Step5: Random forest
Step7: SVC
Step9: ADA boost
|
14,957 | <ASSISTANT_TASK:>
Python Code:
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import cntk as C
from cntk import Trainer
from cntk.layers import default_options
from cntk.device import set_default_device, gpu, cpu
from cntk.initializer import normal
from cntk.io import (MinibatchSource, CTFDeserializer, StreamDef, StreamDefs,
INFINITELY_REPEAT)
from cntk.layers import Dense, Convolution2D, ConvolutionTranspose2D, BatchNormalization
from cntk.learners import (adam, UnitType, learning_rate_schedule,
momentum_as_time_constant_schedule, momentum_schedule)
from cntk.logging import ProgressPrinter
%matplotlib inline
# Select the right target device when this notebook is being tested:
if 'TEST_DEVICE' in os.environ:
import cntk
if os.environ['TEST_DEVICE'] == 'cpu':
C.device.set_default_device(C.device.cpu())
else:
C.device.set_default_device(C.device.gpu(0))
C.device.set_default_device(C.device.gpu(0))
isFast = True
# Ensure the training data is generated and available for this tutorial
# We search in two locations in the toolkit for the cached MNIST data set.
data_found = False
for data_dir in [os.path.join("..", "Examples", "Image", "DataSets", "MNIST"),
os.path.join("data", "MNIST")]:
train_file = os.path.join(data_dir, "Train-28x28_cntk_text.txt")
if os.path.isfile(train_file):
data_found = True
break
if not data_found:
raise ValueError("Please generate the data by completing CNTK 103 Part A")
print("Data directory is {0}".format(data_dir))
def create_reader(path, is_training, input_dim, label_dim):
deserializer = CTFDeserializer(
filename = path,
streams = StreamDefs(
labels_unused = StreamDef(field = 'labels', shape = label_dim, is_sparse = False),
features = StreamDef(field = 'features', shape = input_dim, is_sparse = False
)
)
)
return MinibatchSource(
deserializers = deserializer,
randomize = is_training,
max_sweeps = INFINITELY_REPEAT if is_training else 1
)
np.random.seed(123)
def noise_sample(num_samples):
return np.random.uniform(
low = -1.0,
high = 1.0,
size = [num_samples, g_input_dim]
).astype(np.float32)
# architectural parameters
img_h, img_w = 28, 28
kernel_h, kernel_w = 5, 5
stride_h, stride_w = 2, 2
# Input / Output parameter of Generator and Discriminator
g_input_dim = 100
g_output_dim = d_input_dim = img_h * img_w
# We expect the kernel shapes to be square in this tutorial and
# the strides to be of the same length along each data dimension
if kernel_h == kernel_w:
gkernel = dkernel = kernel_h
else:
raise ValueError('This tutorial needs square shaped kernel')
if stride_h == stride_w:
gstride = dstride = stride_h
else:
raise ValueError('This tutorial needs same stride in all dims')
# Helper functions
def bn_with_relu(x, activation=C.relu):
h = BatchNormalization(map_rank=1)(x)
return C.relu(h)
# We use param-relu function to use a leak=0.2 since CNTK implementation
# of Leaky ReLU is fixed to 0.01
def bn_with_leaky_relu(x, leak=0.2):
h = BatchNormalization(map_rank=1)(x)
r = C.param_relu(C.constant((np.ones(h.shape)*leak).astype(np.float32)), h)
return r
def convolutional_generator(z):
with default_options(init=C.normal(scale=0.02)):
print('Generator input shape: ', z.shape)
s_h2, s_w2 = img_h//2, img_w//2 #Input shape (14,14)
s_h4, s_w4 = img_h//4, img_w//4 # Input shape (7,7)
gfc_dim = 1024
gf_dim = 64
h0 = Dense(gfc_dim, activation=None)(z)
h0 = bn_with_relu(h0)
print('h0 shape', h0.shape)
h1 = Dense([gf_dim * 2, s_h4, s_w4], activation=None)(h0)
h1 = bn_with_relu(h1)
print('h1 shape', h1.shape)
h2 = ConvolutionTranspose2D(gkernel,
num_filters=gf_dim*2,
strides=gstride,
pad=True,
output_shape=(s_h2, s_w2),
activation=None)(h1)
h2 = bn_with_relu(h2)
print('h2 shape', h2.shape)
h3 = ConvolutionTranspose2D(gkernel,
num_filters=1,
strides=gstride,
pad=True,
output_shape=(img_h, img_w),
activation=C.sigmoid)(h2)
print('h3 shape :', h3.shape)
return C.reshape(h3, img_h * img_w)
def convolutional_discriminator(x):
with default_options(init=C.normal(scale=0.02)):
dfc_dim = 1024
df_dim = 64
print('Discriminator convolution input shape', x.shape)
x = C.reshape(x, (1, img_h, img_w))
h0 = Convolution2D(dkernel, 1, strides=dstride)(x)
h0 = bn_with_leaky_relu(h0, leak=0.2)
print('h0 shape :', h0.shape)
h1 = Convolution2D(dkernel, df_dim, strides=dstride)(h0)
h1 = bn_with_leaky_relu(h1, leak=0.2)
print('h1 shape :', h1.shape)
h2 = Dense(dfc_dim, activation=None)(h1)
h2 = bn_with_leaky_relu(h2, leak=0.2)
print('h2 shape :', h2.shape)
h3 = Dense(1, activation=C.sigmoid)(h2)
print('h3 shape :', h3.shape)
return h3
# training config
minibatch_size = 128
num_minibatches = 5000 if isFast else 10000
lr = 0.0002
momentum = 0.5 #equivalent to beta1
def build_graph(noise_shape, image_shape, generator, discriminator):
input_dynamic_axes = [C.Axis.default_batch_axis()]
Z = C.input(noise_shape, dynamic_axes=input_dynamic_axes)
X_real = C.input(image_shape, dynamic_axes=input_dynamic_axes)
X_real_scaled = X_real / 255.0
# Create the model function for the generator and discriminator models
X_fake = generator(Z)
D_real = discriminator(X_real_scaled)
D_fake = D_real.clone(
method = 'share',
substitutions = {X_real_scaled.output: X_fake.output}
)
# Create loss functions and configure optimazation algorithms
G_loss = 1.0 - C.log(D_fake)
D_loss = -(C.log(D_real) + C.log(1.0 - D_fake))
G_learner = adam(
parameters = X_fake.parameters,
lr = learning_rate_schedule(lr, UnitType.sample),
momentum = momentum_schedule(0.5)
)
D_learner = adam(
parameters = D_real.parameters,
lr = learning_rate_schedule(lr, UnitType.sample),
momentum = momentum_schedule(0.5)
)
# Instantiate the trainers
G_trainer = Trainer(
X_fake,
(G_loss, None),
G_learner
)
D_trainer = Trainer(
D_real,
(D_loss, None),
D_learner
)
return X_real, X_fake, Z, G_trainer, D_trainer
def train(reader_train, generator, discriminator):
X_real, X_fake, Z, G_trainer, D_trainer = \
build_graph(g_input_dim, d_input_dim, generator, discriminator)
# print out loss for each model for upto 25 times
print_frequency_mbsize = num_minibatches // 25
print("First row is Generator loss, second row is Discriminator loss")
pp_G = ProgressPrinter(print_frequency_mbsize)
pp_D = ProgressPrinter(print_frequency_mbsize)
k = 2
input_map = {X_real: reader_train.streams.features}
for train_step in range(num_minibatches):
# train the discriminator model for k steps
for gen_train_step in range(k):
Z_data = noise_sample(minibatch_size)
X_data = reader_train.next_minibatch(minibatch_size, input_map)
if X_data[X_real].num_samples == Z_data.shape[0]:
batch_inputs = {X_real: X_data[X_real].data, Z: Z_data}
D_trainer.train_minibatch(batch_inputs)
# train the generator model for a single step
Z_data = noise_sample(minibatch_size)
batch_inputs = {Z: Z_data}
G_trainer.train_minibatch(batch_inputs)
G_trainer.train_minibatch(batch_inputs)
pp_G.update_with_trainer(G_trainer)
pp_D.update_with_trainer(D_trainer)
G_trainer_loss = G_trainer.previous_minibatch_loss_average
return Z, X_fake, G_trainer_loss
reader_train = create_reader(train_file, True, d_input_dim, label_dim=10)
# G_input, G_output, G_trainer_loss = train(reader_train, dense_generator, dense_discriminator)
G_input, G_output, G_trainer_loss = train(reader_train,
convolutional_generator,
convolutional_discriminator)
# Print the generator loss
print("Training loss of the generator is: {0:.2f}".format(G_trainer_loss))
def plot_images(images, subplot_shape):
plt.style.use('ggplot')
fig, axes = plt.subplots(*subplot_shape)
for image, ax in zip(images, axes.flatten()):
ax.imshow(image.reshape(28, 28), vmin=0, vmax=1.0, cmap='gray')
ax.axis('off')
plt.show()
noise = noise_sample(36)
images = G_output.eval({G_input: noise})
plot_images(images, subplot_shape=[6, 6])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Select the notebook runtime environment devices / settings
Step2: There are two run modes
Step3: Data Reading
Step4: The random noise we will use to train the GAN is provided by the noise_sample function to generate random noise samples from a uniform distribution within the interval [-1, 1].
Step5: Model Creation
Step6: Generator
Step7: Discriminator
Step8: We use a minibatch size of 128 and a fixed learning rate of 0.0002 for training. In the fast mode (isFast = True) we verify only functional correctness with 5000 iterations.
Step9: Build the graph
Step10: With the value functions defined we proceed to interatively train the GAN model. The training of the model can take significnantly long depending on the hardware especiallly if isFast flag is turned off.
Step11: Generating Fake (Synthetic) Images
|
14,958 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
import os.path as osp
input_size = {50, 3, 224, 224}
fake_data = np.random.rand(2, 224, 224, 3)
from mynet import CaffeNet
images = tf.placeholder(tf.float32, [None, 224, 224, 3])
net = CaffeNet({'data':images})
sesh = tf.Session()
sesh.run(tf.global_variables_initializer())
# Load the data
net.load('mynet.npy', sesh)
# Forward pass
output = sesh.run(net.get_output(), feed_dict={images: fake_data})
sesh.close()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Images are 224x224 pixels, with 3 channels. Batch size is 50. This is specified in the caffemodel but not in the tf class (mynet.py)
Step2: Now to actually load the model.
|
14,959 | <ASSISTANT_TASK:>
Python Code:
import magma as m
import mantle
def fulladder(A, B, C):
return A^B^C, A&B|B&C|C&A # sum, carry
assert fulladder(1, 0, 0) == (1, 0), "Failed"
assert fulladder(0, 1, 0) == (1, 0), "Failed"
assert fulladder(1, 1, 0) == (0, 1), "Failed"
assert fulladder(1, 0, 1) == (0, 1), "Failed"
assert fulladder(1, 1, 1) == (1, 1), "Failed"
print("Success!")
class FullAdder(m.Circuit):
name = "FullAdderExample"
IO = ["I0", m.In(m.Bit), "I1", m.In(m.Bit), "CIN", m.In(m.Bit), "O", m.Out(m.Bit), "COUT", m.Out(m.Bit)]
@classmethod
def definition(io):
O, COUT = fulladder(io.I0, io.I1, io.CIN)
io.O <= O
io.COUT <= COUT
from magma.simulator import PythonSimulator
fulladder_magma = PythonSimulator(FullAdder)
assert fulladder_magma(1, 0, 0) == fulladder(1, 0, 0), "Failed"
assert fulladder_magma(0, 1, 0) == fulladder(0, 1, 0), "Failed"
assert fulladder_magma(1, 1, 0) == fulladder(1, 1, 0), "Failed"
assert fulladder_magma(1, 0, 1) == fulladder(1, 0, 1), "Failed"
assert fulladder_magma(1, 1, 1) == fulladder(1, 1, 1), "Failed"
print("Success!")
from magma.waveform import waveform
test_vectors_raw = [
[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 1],
[1, 0, 0, 1, 0],
[1, 0, 1, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 1, 1, 1]
]
waveform(test_vectors_raw, ["a", "b", "cin", "sum", "cout"])
from fault.test_vectors import generate_simulator_test_vectors
from bit_vector import BitVector
test_vectors = [
[BitVector(x) for x in test_vector]
for test_vector in test_vectors_raw
]
tests = generate_simulator_test_vectors(FullAdder, flatten=False)
print( "Success" if tests == test_vectors else "Failure" )
m.compile("build/FullAdder", FullAdder, output="coreir")
%cat build/FullAdder.json
m.compile("build/FullAdder", FullAdder, output="coreir-verilog")
%cat build/FullAdder.v
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A full adder has three single bit inputs, and returns the sum and the carry. The sum is the exclusive or of the 3 bits, the carry is 1 if any two of the inputs bits are 1. Here is a schematic of a full adder circuit (from logisim).
Step2: We can test our Python function to verify that our implementation behaves as expected. We'll use the standard Python assert pattern.
Step3: Now that we have an implementation of fulladder as a Python function,
Step4: First, notice that the FullAdder is a subclass of Circuit. All Magma circuits are classes in python.
Step5: Here is another way to test the circuit.
Step6: We can use the simulator to also generate a set of test vectors.
Step7: Finally, compare the simulated test vectors to the expected values.
Step8: The last step we will do is generate coreir and verilog for the full adder circuit.
|
14,960 | <ASSISTANT_TASK:>
Python Code:
import itertools
屋子 = 第一間, _, 中間, _, _ = [1, 2, 3, 4, 5]
所有順序 = list(itertools.permutations(屋子))
所有順序
def 在右邊(h1, h2):
"h1 緊鄰 h2 的右邊."
return h1-h2 == 1
def 隔壁(h1, h2):
"h1 h2 在隔壁"
return abs(h1-h2) == 1
def zebra_puzzle():
return [locals()
for (紅, 綠, 白, 黃, 藍) in 所有順序
for (英國人, 西班牙人, 烏克蘭人, 日本人, 挪威人) in 所有順序
for (咖啡, 茶, 牛奶, 橘子汁, 水) in 所有順序
for (OldGold, Kools, Chesterfields, LuckyStrike, Parliaments) in 所有順序
for (狗, 蝸牛, 狐狸, 馬, 斑馬) in 所有順序
if 英國人 is 紅 #2
if 西班牙人 is 狗 #3
if 咖啡 is 綠 #4
if 烏克蘭人 is 茶 #5
if 在右邊(綠, 白) #6
if OldGold is 蝸牛 #7
if Kools is 黃 #8
if 牛奶 is 中間 #9
if 挪威人 is 第一間 #10
if 隔壁(Chesterfields, 狐狸) #11
if 隔壁(Kools, 馬) #12
if LuckyStrike is 橘子汁 #13
if 日本人 is Parliaments #14
if 隔壁(挪威人, 藍) #15
]
zebra_puzzle()
def zebra_puzzle():
return [locals()
for (紅, 綠, 白, 黃, 藍) in 所有順序
if 在右邊(綠, 白) #6
for (英國人, 西班牙人, 烏克蘭人, 日本人, 挪威人) in 所有順序
if 英國人 is 紅 #2
if 挪威人 is 第一間 #10
if 隔壁(挪威人, 藍) #15
for (咖啡, 茶, 牛奶, 橘子汁, 水) in 所有順序
if 咖啡 is 綠 #4
if 烏克蘭人 is 茶 #5
if 牛奶 is 中間 #9
for (OldGold, Kools, Chesterfields, LuckyStrike, Parliaments) in 所有順序
if Kools is 黃 #8
if LuckyStrike is 橘子汁 #13
if 日本人 is Parliaments #14
for (狗, 蝸牛, 狐狸, 馬, 斑馬) in 所有順序
if 西班牙人 is 狗 #3
if OldGold is 蝸牛 #7
if 隔壁(Chesterfields, 狐狸) #11
if 隔壁(Kools, 馬) #12
]
zebra_puzzle()
def result(d): return {i:[k for k,v in d.items() if v == i] for i in 屋子}
def zebra_puzzle():
return [result(locals())
for (紅, 綠, 白, 黃, 藍) in 所有順序
if 在右邊(綠, 白)
for (英國人, 西班牙人, 烏克蘭人, 日本人, 挪威人) in 所有順序
if 英國人 is 紅
if 挪威人 is 第一間
if 隔壁(挪威人, 藍)
for (咖啡, 茶, 牛奶, 橘子汁, 水) in 所有順序
if 咖啡 is 綠
if 烏克蘭人 is 茶
if 牛奶 is 中間
for (OldGold, Kools, Chesterfields, LuckyStrike, Parliaments) in 所有順序
if Kools is 黃
if LuckyStrike is 橘子汁
if 日本人 is Parliaments
for (狗, 蝸牛, 狐狸, 馬, 斑馬) in 所有順序
if 西班牙人 is 狗
if OldGold is 蝸牛
if 隔壁(Chesterfields, 狐狸)
if 隔壁(Kools, 馬) ]
zebra_puzzle()[0]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 時間太長!
|
14,961 | <ASSISTANT_TASK:>
Python Code:
import google.datalab.bigquery as bq
import pandas as pd
%%bq query -n requests
SELECT timestamp, latency, endpoint
FROM `cloud-datalab-samples.httplogs.logs_20140615`
WHERE endpoint = 'Popular' OR endpoint = 'Recent'
%%bq sample --count 5 --query requests
df = requests.execute(output_options=bq.QueryOutput.dataframe()).result()
len(df)
df.head(5)
df.dtypes
groups = df.groupby('endpoint')
groups.dtypes
for name, df_group in groups:
print('%s - %d items' % (name, len(df_group)))
print(df_group.head(3))
print()
groups['latency'].describe()
def unique(x):
return sorted(set(list(x)))
groups['latency'].agg({ 'list': lambda x: list(x), 'unique': lambda x: unique(x) })
df_series = df.pivot(index='timestamp', columns='endpoint', values='latency').fillna(method = 'backfill')
df_series[10:20]
len(df_series)
df_series.plot(logy = True)
df_series.resample(rule='10min').mean().plot(logy = True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From SQL to DataFrames
Step2: Data Manipulation
Step3: Or it can be inspected for schema,
Step4: or further transformed locally, for example to perform grouping,
Step5: and then analyze a dimension per group,
Step6: or even run a set of custom aggregation functions.
Step7: Data Visualization
Step8: Resampling
|
14,962 | <ASSISTANT_TASK:>
Python Code:
%%bash
git status
%%bash
git log
%%bash
git show
%%writefile foo.md
Fetchez la vache
%%bash
git add foo.md
%%bash
git st
%%bash
git diff foo.md
%%bash
git diff git_intro.ipynb
%%bash
git rm -f foo.md
%%bash
git st
%%bash
git branch new_post
%%bash
git checkout new_post
%%writefile my_new_post.md
# Q: What is the meaning of life?
# A: 42
%%bash
git st
%%bash
git add my_new_post.md
%%bash
git st
%%bash
git ci -m "Adding my new post." my_new_post.md
%%bash
git push
%%bash
git push --set-upstream origin new_post
%%bash
git remote --verbose
%%bash
git remote add upstream https://github.com/ocefpaf/git_intro_demo.git
git remote --verbose
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: GitHub workflow
Step2: PR ready!? What now?
|
14,963 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
def lorentz_derivs(yvec, t, sigma, rho, beta):
Compute the the derivatives for the Lorentz system at yvec(t).
x = yvec[0]
y = yvec[1]
z = yvec[2]
dx = sigma*(y - x)
dy = x*(rho - z) - y
dz = x*y - beta*z
return np.array([dx, dy, dz])
assert np.allclose(lorentz_derivs((1,1,1),0, 1.0, 1.0, 2.0),[0.0,-1.0,-1.0])
def solve_lorentz(ic, max_time=4.0, sigma=10.0, rho=28.0, beta=8.0/3.0):
Solve the Lorenz system for a single initial condition.
Parameters
----------
ic : array, list, tuple
Initial conditions [x,y,z].
max_time: float
The max time to use. Integrate with 250 points per time unit.
sigma, rho, beta: float
Parameters of the differential equation.
Returns
-------
soln : np.ndarray
The array of the solution. Each row will be the solution vector at that time.
t : np.ndarray
The array of time points used.
# YOUR CODE HERE
soln = odeint(ic, y0 = [1.0, 1.0, 1.0], t = np.linspace(0, max_time, 100), args=(sigma, rho, beta))
return soln
print(soln)
print(solve_lorentz((1, 1, 1), max_time=4.0, sigma=10.0, rho=28.0, beta=8.0/3.0))
assert True # leave this to grade solve_lorenz
N = 5
colors = plt.cm.hot(np.linspace(0,1,N))
for i in range(N):
# To use these colors with plt.plot, pass them as the color argument
print(colors[i])
def plot_lorentz(N=10, max_time=4.0, sigma=10.0, rho=28.0, beta=8.0/3.0):
Plot [x(t),z(t)] for the Lorenz system.
Parameters
----------
N : int
Number of initial conditions and trajectories to plot.
max_time: float
Maximum time to use.
sigma, rho, beta: float
Parameters of the differential equation.
# YOUR CODE HERE
np.random.seed(1)
plt.plot(solve_lorentz(np.random.rand(15, 1), max_time, sigma, rho, beta), np.linspace(0, 4.0, 100))
plot_lorentz()
assert True # leave this to grade the plot_lorenz function
# YOUR CODE HERE
raise NotImplementedError()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Lorenz system
Step4: Write a function solve_lorenz that solves the Lorenz system above for a particular initial condition $[x(0),y(0),z(0)]$. Your function should return a tuple of the solution array and time array.
Step6: Write a function plot_lorentz that
Step7: Use interact to explore your plot_lorenz function with
|
14,964 | <ASSISTANT_TASK:>
Python Code:
import ipyvolume as ipv
fig = ipv.figure()
scatter = ipv.examples.gaussian(show=False)
ipv.show()
plane = ipv.plot_plane("z");
import ipywidgets as widgets
widgets.jslink((fig, 'slice_z'), (plane, 'z_offset'));
## Uncomment to try
# import vaex
# import matplotlib.pylab as plt
# import PIL.Image
# df = vaex.from_arrays(x=scatter.x, y=scatter.y)
# fig2d = plt.figure()
# ax = fig2d.add_axes([0, 0, 1, 1])
# df.viz.heatmap(df.x, df.y, shape=64, show=False, colorbar=False, tight_layout=False)
# fig2d.axes[0].axis('off');
# plt.draw()
# image = PIL.Image.frombytes('RGB', fig2d.canvas.get_width_height(), fig2d.canvas.tostring_rgb())
# plt.close()
# image
# example how put a png as texture
import PIL.Image
import requests
import io
url = 'https://vaex.io/img/logos/spiral-small.png'
r = requests.get(url, stream=True)
f = io.BytesIO(r.content)
image = PIL.Image.open(f)
plane.u = [0.0, 1.0, 1.0, 0.0]
plane.v = [0.0, 0.0, 1.0, 1.0]
plane.texture = image
import ipyvolume as ipv
fig = ipv.figure()
volume = ipv.examples.head(show=False, description="Patient X")
ipv.show()
slice_x = ipv.plot_plane('x', volume=volume, description="Slice X", description_color="black", icon="mdi-knife")
slice_y = ipv.plot_plane('y', volume=volume, description="Slice Y", description_color="black", icon="mdi-knife")
slice_z = ipv.plot_plane('z', volume=volume, description="Slice Z", description_color="black", icon="mdi-knife",
visible=False)
import ipywidgets as widgets
widgets.jslink((fig, 'slice_x'), (slice_x, 'x_offset'))
widgets.jslink((fig, 'slice_y'), (slice_y, 'y_offset'))
widgets.jslink((fig, 'slice_z'), (slice_z, 'z_offset'));
# uncomment to save
ipv.save("slice.html", devmode=True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, we add a plane at z=0.
Step2: By holding the shift key and hovering the mouse at the edges of the bounding box (or activate slice mode in the toolbar, and click), we modify the slice_z property. By linking the slice_z property to the z_offset property of the mesh/plane, we can interactively move the plane. Note that in order to change the z_offset, you need to hover the mouse at the sides of the bounding box, which means you need to make sides of the bounding box visible.
Step3: Adding a texture
Step4: On just download an image
Step5: And assign it to the plane's texture. Note that we should also set its u and v coordinates, so we know where the edges of the texture map should go
Step6: Slicing a volume
Step7: We now add 3 planes, and pass our volume so it can be used as a texture map.
Step8: Again, by connecting the slice coordinates to the offsets of the planes, we can create 3 slicing planes that can be controlled interactively.
Step9: Note that you can save the output to an html file, and the slicing will still work without a connected kernel.
|
14,965 | <ASSISTANT_TASK:>
Python Code:
from miscpy.utils.sympyhelpers import *
init_printing()
th,psi,thd,psidd,thdd,psidd,Omega,I1,I2,t,M1,C = \
symbols('theta,psi,thetadot,psidot,thetaddot,psiddot,Omega,I_1,I_2,t,M_1,C')
diffmap = {th:thd,psi:psid,thd:thdd,psid:psidd}
bCa = rotMat(1,th);bCa
iWb_B = bCa*Matrix([0,0,psid])+ Matrix([thd,0,0]); iWb_B
iWc_B = iWb_B +Matrix([0,Omega,0]); iWc_B
IG_B = diag(I1,I2,I1);IG_B
hG_B = IG_B*iWc_B; hG_B
dhG_B = difftotalmat(hG_B,t,diffmap) + skew(iWb_B)*hG_B; dhG_B
skew(iWb_B)*hG_B
dhG_B_simp = dhG_B.subs(Omega+psid*sin(th),C); dhG_B_simp
solve([dhG_B_simp[0] + M1,dhG_B_simp[2]],[thdd,psidd])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Spinning Symmetric Rigid Body setup
Step2: $\left[{}^\mathcal{I}\boldsymbol{\omega}^\mathcal{B}\right]_\mathcal{B}$
Step3: ${}^\mathcal{I}\boldsymbol{\omega}^\mathcal{C} = {}^\mathcal{I}\boldsymbol{\omega}^\mathcal{B} + {}^\mathcal{B}\boldsymbol{\omega}^\mathcal{C}$.
Step4: $\left[ \mathbb I_G \right]_\mathcal B$
Step5: $\left[{}^\mathcal{I} \mathbf h_G\right]_\mathcal{B}$
Step6: $\vphantom{\frac{\mathrm{d}}{\mathrm{d}t}}^\mathcal{I}\frac{\mathrm{d}}{\mathrm{d}t} {}^\mathcal{I} \mathbf h_G = \vphantom{\frac{\mathrm{d}}{\mathrm{d}t}}^\mathcal{B}\frac{\mathrm{d}}{\mathrm{d}t} {}^\mathcal{I} \mathbf h_G + {}^\mathcal{I}\boldsymbol{\omega}^\mathcal{B} \times \mathbf h_G$.
Step7: Note that the $\mathbf b_2$ component of ${}^\mathcal{I}\boldsymbol{\omega}^\mathcal{B} \times \mathbf h_G$ is zero
Step8: Define $C \triangleq \Omega + \dot\psi\sin\theta$ and substitute into $\left[\vphantom{\frac{\mathrm{d}}{\mathrm{d}t}}^\mathcal{I}\frac{\mathrm{d}}{\mathrm{d}t} {}^\mathcal{I} \mathbf h_G\right]_\mathcal{B}$
Step9: Assume an external torque generating moment about $G$ of $\mathbf M_G = -M_1\mathbf b_1$
|
14,966 | <ASSISTANT_TASK:>
Python Code:
import os
import tensorflow as tf
import numpy as np
from google.cloud import bigquery
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# do not change these
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
def write_list_to_disk(my_list, filename):
with open(filename, 'w') as f:
for item in my_list:
line = "%s\n" % item
f.write(line)
sql=
#standardSQL
SELECT
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
GROUP BY
content_id
content_ids_list = bigquery.Client().query(sql).to_dataframe()['content_id'].tolist()
write_list_to_disk(content_ids_list, "content_ids.txt")
print("Some sample content IDs {}".format(content_ids_list[:3]))
print("The total number of articles is {}".format(len(content_ids_list)))
sql=
#standardSQL
SELECT
(SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
GROUP BY
category
categories_list = bigquery.Client().query(sql).to_dataframe()['category'].tolist()
write_list_to_disk(categories_list, "categories.txt")
print(categories_list)
sql=
#standardSQL
SELECT
REGEXP_EXTRACT((SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)), r"^[^,]+") AS first_author
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
GROUP BY
first_author
authors_list = bigquery.Client().query(sql).to_dataframe()['first_author'].tolist()
write_list_to_disk(authors_list, "authors.txt")
print("Some sample authors {}".format(authors_list[:10]))
print("The total number of authors is {}".format(len(authors_list)))
sql=
WITH site_history as (
SELECT
fullVisitorId as visitor_id,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id,
(SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category,
(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title,
(SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) AS author_list,
SPLIT(RPAD((SELECT MAX(IF(index=4, value, NULL)) FROM UNNEST(hits.customDimensions)), 7), '.') as year_month_array,
LEAD(hits.customDimensions, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) as nextCustomDimensions
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND
fullVisitorId IS NOT NULL
AND
hits.time != 0
AND
hits.time IS NOT NULL
AND
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
)
SELECT
visitor_id,
content_id,
category,
REGEXP_REPLACE(title, r",", "") as title,
REGEXP_EXTRACT(author_list, r"^[^,]+") as author,
DATE_DIFF(DATE(CAST(year_month_array[OFFSET(0)] AS INT64), CAST(year_month_array[OFFSET(1)] AS INT64), 1), DATE(1970,1,1), MONTH) as months_since_epoch,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) as next_content_id
FROM
site_history
WHERE (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) IS NOT NULL
AND ABS(MOD(FARM_FINGERPRINT(CONCAT(visitor_id, content_id)), 10)) < 9
training_set_df = bigquery.Client().query(sql).to_dataframe()
training_set_df.to_csv('training_set.csv', header=False, index=False, encoding='utf-8')
training_set_df.head()
sql=
WITH site_history as (
SELECT
fullVisitorId as visitor_id,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id,
(SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category,
(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title,
(SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) AS author_list,
SPLIT(RPAD((SELECT MAX(IF(index=4, value, NULL)) FROM UNNEST(hits.customDimensions)), 7), '.') as year_month_array,
LEAD(hits.customDimensions, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) as nextCustomDimensions
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND
fullVisitorId IS NOT NULL
AND
hits.time != 0
AND
hits.time IS NOT NULL
AND
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
)
SELECT
visitor_id,
content_id,
category,
REGEXP_REPLACE(title, r",", "") as title,
REGEXP_EXTRACT(author_list, r"^[^,]+") as author,
DATE_DIFF(DATE(CAST(year_month_array[OFFSET(0)] AS INT64), CAST(year_month_array[OFFSET(1)] AS INT64), 1), DATE(1970,1,1), MONTH) as months_since_epoch,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) as next_content_id
FROM
site_history
WHERE (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) IS NOT NULL
AND ABS(MOD(FARM_FINGERPRINT(CONCAT(visitor_id, content_id)), 10)) >= 9
test_set_df = bigquery.Client().query(sql).to_dataframe()
test_set_df.to_csv('test_set.csv', header=False, index=False, encoding='utf-8')
test_set_df.head()
%%bash
wc -l *_set.csv
!head *_set.csv
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will use this helper function to write lists containing article ids, categories, and authors for each article in our database to local file.
Step3: Pull data from BigQuery
Step5: There should be 15,634 articles in the database.
Step7: The categories are 'News', 'Stars & Kultur', and 'Lifestyle'.
Step10: There should be 385 authors in the database.
Step11: Let's have a look at the two csv files we just created containing the training and test set. We'll also do a line count of both files to confirm that we have achieved an approximate 90/10 train/test split.
|
14,967 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import statsmodels.api as sm # For some reason this import is necessary...
import statsmodels.formula.api as smapi
import statsmodels.graphics as smgraph
import matplotlib.pyplot as plt
%matplotlib inline
x = np.arange(30, dtype=float)
# Make some y data with random noise
y = 10 * x + 5.0*np.random.randn(30)
# Add outlier #
y[10] = 180.
y[20] = 130
plt.plot(x, y, 'o')
# Make fit #
regression = smapi.ols("data ~ x", data=dict(data=y, x=x)).fit()
regression.summary()
test = regression.outlier_test()
test
print('Bad data points (bonf(p) < 0.05):')
test[test['bonf(p)'] < 0.05]
outliers = test[test['bonf(p)'] < 0.05].index.values
outliers
figure = smgraph.regressionplots.plot_fit(regression, 1)
line = smgraph.regressionplots.abline_plot(model_results=regression, ax=figure.axes[0])
plt.plot(outliers, y[outliers], 'xm', label='outliers', ms=14)
plt.legend(loc=0);
import statsmodels.formula.api as smapi
def get_outliers(features, target):
regression = smapi.ols("target ~ features", data=locals()).fit()
test = regression.outlier_test()
outliers = test[test['bonf(p)'] < 0.05]
return list(outliers.index.values)
def test_outliers():
x = np.arange(30, dtype=float)
# Make some y data with random noise
y = 10 * x + 5.0*np.random.randn(30)
# Add outlier
y[10] = 180.
y[20] = 130
outliers = [10, 20]
prediction = get_outliers(features=x, target=y)
assert outliers == prediction
test_outliers()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Test data
Step2: Regression
Step3: Test for Outliers
Step4: Figure
Step5: Create a function and test it
|
14,968 | <ASSISTANT_TASK:>
Python Code:
from ozapfdis import jdeps
deps = jdeps.read_jdeps_file(
"../dataset/jdeps_dropover.txt",
filter_regex="at.dropover")
deps.head()
deps = deps[['from', 'to']]
deps['group_from'] = deps['from'].str.split(".").str[2]
deps['group_to'] = deps['to'].str.split(".").str[2]
deps.head()
from ausi import d3
d3.create_d3force(
deps,
"jdeps_demo_output/dropover_d3forced",
group_col_from="group_from",
group_col_to="group_to")
d3.create_semantic_substrate(
deps,
"jdeps_demo_output/dropover_semantic_substrate")
d3.create_hierarchical_edge_bundling(
deps,
"jdeps_demo_output/dropover_bundling")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Modeling
Step2: Visualization
|
14,969 | <ASSISTANT_TASK:>
Python Code:
L=json.loads(file('../json/L.json','r').read())
M=json.loads(file('../json/M.json','r').read())
N=json.loads(file('../json/N.json','r').read())
import requests
AP={}
for c in M:
if c not in AP:AP[c]={}
for i in range(len(L[c])):
AP[c][N[c][i]]=L[c][i]
baseurl='https://www.airportia.com/'
import requests, urllib2
def urlgetter(url):
s = requests.Session()
cookiesopen = s.get(url)
cookies=str(s.cookies)
fcookies=[[k[:k.find('=')],k[k.find('=')+1:k.find(' for ')]] for k in cookies[cookies.find('Cookie '):].split('Cookie ')[1:]]
#push token
opener = urllib2.build_opener()
for k in fcookies:
opener.addheaders.append(('Cookie', k[0]+'='+k[1]))
#read html
return s.get(url).content
SD={}
SC=json.loads(file('../json/SC2.json','r').read())
#pop out last - if applicable
try: SD.pop(c)
except: pass
for h in range(len(AP.keys())):
c=AP.keys()[h]
#country not parsed yet
if c in SC:
if c not in SD:
SD[c]=[]
print h,c
airportialinks=AP[c]
sch={}
#all airports of country, where there is traffic
for i in airportialinks:
if i in SC[c]:
print i,
if i not in sch:sch[i]={}
url=baseurl+airportialinks[i]
m=urlgetter(url)
for d in range (3,17):
#date not parsed yet
if d not in sch[i]:
url=baseurl+airportialinks[i]+'arrivals/201704'+str(d)
m=urlgetter(url)
soup = BeautifulSoup(m, "lxml")
#if there are flights at all
if len(soup.findAll('table'))>0:
sch[i][d]=pd.read_html(m)[0]
else: print '--W-',d,
SD[c]=sch
print
dbpath='E:/Dropbox/Public/datarepo/aviation/' #large file db path
file(dbpath+"json/SD_arrv.json",'w').write(repr(SD))
cnc_path='../../universal/countries/'
cnc=pd.read_excel(cnc_path+'cnc.xlsx').set_index('Name')
MDF=pd.DataFrame()
for c in SD:
sch=SD[c]
mdf=pd.DataFrame()
for i in sch:
for d in sch[i]:
df=sch[i][d].drop(sch[i][d].columns[3:],axis=1).drop(sch[i][d].columns[0],axis=1)
df['To']=i
df['Date']=d
mdf=pd.concat([mdf,df])
mdf=mdf.replace('Hahn','Frankfurt')
mdf=mdf.replace('Hahn HHN','Frankfurt HHN')
if len(sch)>0:
mdf['City']=[i[:i.rfind(' ')] for i in mdf['From']]
mdf['Airport']=[i[i.rfind(' ')+1:] for i in mdf['From']]
cpath=str(cnc.T.loc[c]['ISO2']).lower()
if cpath=='nan':cpath='na'
file('../countries/'+cpath+"/json/mdf_arrv.json",'w').write(json.dumps(mdf.reset_index().to_json()))
MDF=pd.concat([MDF,mdf])
print c,
dbpath='E:/Dropbox/Public/datarepo/aviation/' #large file db path
MDF.reset_index().to_json(dbpath+'json/MDF_arrv.json')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: record schedules for 2 weeks, then augment count with weekly flight numbers.
Step2: good dates
Step3: Save
|
14,970 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import requests
from io import BytesIO
# NBER recessions
from pandas_datareader.data import DataReader
from datetime import datetime
usrec = DataReader('USREC', 'fred', start=datetime(1947, 1, 1), end=datetime(2013, 4, 1))
# Get the RGNP data to replicate Hamilton
dta = pd.read_stata('https://www.stata-press.com/data/r14/rgnp.dta').iloc[1:]
dta.index = pd.DatetimeIndex(dta.date, freq='QS')
dta_hamilton = dta.rgnp
# Plot the data
dta_hamilton.plot(title='Growth rate of Real GNP', figsize=(12,3))
# Fit the model
mod_hamilton = sm.tsa.MarkovAutoregression(dta_hamilton, k_regimes=2, order=4, switching_ar=False)
res_hamilton = mod_hamilton.fit()
res_hamilton.summary()
fig, axes = plt.subplots(2, figsize=(7,7))
ax = axes[0]
ax.plot(res_hamilton.filtered_marginal_probabilities[0])
ax.fill_between(usrec.index, 0, 1, where=usrec['USREC'].values, color='k', alpha=0.1)
ax.set_xlim(dta_hamilton.index[4], dta_hamilton.index[-1])
ax.set(title='Filtered probability of recession')
ax = axes[1]
ax.plot(res_hamilton.smoothed_marginal_probabilities[0])
ax.fill_between(usrec.index, 0, 1, where=usrec['USREC'].values, color='k', alpha=0.1)
ax.set_xlim(dta_hamilton.index[4], dta_hamilton.index[-1])
ax.set(title='Smoothed probability of recession')
fig.tight_layout()
print(res_hamilton.expected_durations)
# Get the dataset
ew_excs = requests.get('http://econ.korea.ac.kr/~cjkim/MARKOV/data/ew_excs.prn').content
raw = pd.read_table(BytesIO(ew_excs), header=None, skipfooter=1, engine='python')
raw.index = pd.date_range('1926-01-01', '1995-12-01', freq='MS')
dta_kns = raw.loc[:'1986'] - raw.loc[:'1986'].mean()
# Plot the dataset
dta_kns[0].plot(title='Excess returns', figsize=(12, 3))
# Fit the model
mod_kns = sm.tsa.MarkovRegression(dta_kns, k_regimes=3, trend='nc', switching_variance=True)
res_kns = mod_kns.fit()
res_kns.summary()
fig, axes = plt.subplots(3, figsize=(10,7))
ax = axes[0]
ax.plot(res_kns.smoothed_marginal_probabilities[0])
ax.set(title='Smoothed probability of a low-variance regime for stock returns')
ax = axes[1]
ax.plot(res_kns.smoothed_marginal_probabilities[1])
ax.set(title='Smoothed probability of a medium-variance regime for stock returns')
ax = axes[2]
ax.plot(res_kns.smoothed_marginal_probabilities[2])
ax.set(title='Smoothed probability of a high-variance regime for stock returns')
fig.tight_layout()
# Get the dataset
filardo = requests.get('http://econ.korea.ac.kr/~cjkim/MARKOV/data/filardo.prn').content
dta_filardo = pd.read_table(BytesIO(filardo), sep=' +', header=None, skipfooter=1, engine='python')
dta_filardo.columns = ['month', 'ip', 'leading']
dta_filardo.index = pd.date_range('1948-01-01', '1991-04-01', freq='MS')
dta_filardo['dlip'] = np.log(dta_filardo['ip']).diff()*100
# Deflated pre-1960 observations by ratio of std. devs.
# See hmt_tvp.opt or Filardo (1994) p. 302
std_ratio = dta_filardo['dlip']['1960-01-01':].std() / dta_filardo['dlip'][:'1959-12-01'].std()
dta_filardo['dlip'][:'1959-12-01'] = dta_filardo['dlip'][:'1959-12-01'] * std_ratio
dta_filardo['dlleading'] = np.log(dta_filardo['leading']).diff()*100
dta_filardo['dmdlleading'] = dta_filardo['dlleading'] - dta_filardo['dlleading'].mean()
# Plot the data
dta_filardo['dlip'].plot(title='Standardized growth rate of industrial production', figsize=(13,3))
plt.figure()
dta_filardo['dmdlleading'].plot(title='Leading indicator', figsize=(13,3));
mod_filardo = sm.tsa.MarkovAutoregression(
dta_filardo.iloc[2:]['dlip'], k_regimes=2, order=4, switching_ar=False,
exog_tvtp=sm.add_constant(dta_filardo.iloc[1:-1]['dmdlleading']))
np.random.seed(12345)
res_filardo = mod_filardo.fit(search_reps=20)
res_filardo.summary()
fig, ax = plt.subplots(figsize=(12,3))
ax.plot(res_filardo.smoothed_marginal_probabilities[0])
ax.fill_between(usrec.index, 0, 1, where=usrec['USREC'].values, color='gray', alpha=0.2)
ax.set_xlim(dta_filardo.index[6], dta_filardo.index[-1])
ax.set(title='Smoothed probability of a low-production state');
res_filardo.expected_durations[0].plot(
title='Expected duration of a low-production state', figsize=(12,3));
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hamilton (1989) switching model of GNP
Step2: We plot the filtered and smoothed probabilities of a recession. Filtered refers to an estimate of the probability at time $t$ based on data up to and including time $t$ (but excluding time $t+1, ..., T$). Smoothed refers to an estimate of the probability at time $t$ using all the data in the sample.
Step3: From the estimated transition matrix we can calculate the expected duration of a recession versus an expansion.
Step4: In this case, it is expected that a recession will last about one year (4 quarters) and an expansion about two and a half years.
Step5: Below we plot the probabilities of being in each of the regimes; only in a few periods is a high-variance regime probable.
Step6: Filardo (1994) Time-Varying Transition Probabilities
Step7: The time-varying transition probabilities are specified by the exog_tvtp parameter.
Step8: Below we plot the smoothed probability of the economy operating in a low-production state, and again include the NBER recessions for comparison.
Step9: Using the time-varying transition probabilities, we can see how the expected duration of a low-production state changes over time
|
14,971 | <ASSISTANT_TASK:>
Python Code:
import random
class Person:
def __init__(self, location):
self.ill = False
self.injured = False
self.age = 35
self.location = location
self.location.numAgents += 1
# Set to true when an agent resides on a link.
self.travelling = False
def selectRoute(self):
total_score = 0.0
for i in range(0,len(self.location.links)):
total_score += 40000.0 / (10.0 + self.location.links[i].distance)
selected_value = random.random() * total_score
checked_score = 0.0
for i in range(0,len(self.location.links)):
checked_score += 40000.0 / (10.0 + self.location.links[i].distance)
if selected_value < checked_score:
return i
# Simple workarounds to make this Python member functions work properly in Jupyter
# (which doesn't carry over the class structure from the previous fragment).
Person.selectRoute = selectRoute
def evolve(self):
movechance = self.location.movechance
outcome = random.random()
self.travelling = False
if outcome < movechance:
# determine here which route to take?
chosenRoute = self.selectRoute()
# update location to link endpoint
self.location.numAgents -= 1
self.location = self.location.links[chosenRoute]
self.location.numAgents += 1
self.travelling = True
# Simple workarounds to make this Python member functions work properly in Jupyter
# (which doesn't carry over the class structure from the previous fragment).
Person.evolve = evolve
def finish_travel(self):
if self.travelling:
# update location (which is on a link) to link endpoint
self.location.numAgents -= 1
self.location = self.location.endpoint
self.location.numAgents += 1
# Simple workarounds to make this Python member functions work properly in Jupyter
# (which doesn't carry over the class structure from the previous fragment).
Person.finish_travel = finish_travel
class Location:
def __init__(self, name, x=0.0, y=0.0, movechance=0.001):
self.name = name
self.x = x
self.y = y
self.movechance = movechance
self.links = []
self.numAgents = 0
class Link:
def __init__(self, endpoint, distance):
# distance in km.
self.distance = float(distance)
# links for now always connect two endpoints
self.endpoint = endpoint
# number of agents that are in transit.
self.numAgents = 0
class Ecosystem:
def __init__(self):
self.locations = []
self.locationNames = []
self.agents = []
self.time = 0
def addLocation(self, name, x="0.0", y="0.0", movechance=0.1):
l = Location(name, x, y, movechance)
self.locations.append(l)
self.locationNames.append(l.name)
return l
# Simple workarounds to make this Python member functions work properly in Jupyter
# (which doesn't carry over the class structure from the previous fragment).
Ecosystem.addLocation = addLocation
def addAgent(self, location):
self.agents.append(Person(location))
# Simple workarounds to make this Python member functions work properly in Jupyter
# (which doesn't carry over the class structure from the previous fragment).
Ecosystem.addAgent = addAgent
def linkUp(self, endpoint1, endpoint2, distance="1.0"):
Creates a link between two endpoint locations
endpoint1_index = 0
endpoint2_index = 0
for i in range(0, len(self.locationNames)):
if(self.locationNames[i] == endpoint1):
endpoint1_index = i
if(self.locationNames[i] == endpoint2):
endpoint2_index = i
self.locations[endpoint1_index].links.append( Link(self.locations[endpoint2_index], distance) )
self.locations[endpoint2_index].links.append( Link(self.locations[endpoint1_index], distance) )
# Simple workarounds to make this Python member functions work properly in Jupyter
# (which doesn't carry over the class structure from the previous fragment).
Ecosystem.linkUp = linkUp
def doTimeStep(self):
#update agent locations
for a in self.agents:
a.evolve()
for a in self.agents:
a.finish_travel()
#update link properties
self.time += 1
# Simple workarounds to make this Python member functions work properly in Jupyter
# (which doesn't carry over the class structure from the previous fragment).
Ecosystem.doTimeStep = doTimeStep
def numAgents(self):
return len(self.agents)
def printInfo(self):
print("Time: ", self.time, ", # of agents: ", len(self.agents))
for l in self.locations:
print(l.name, l.numAgents)
# Simple workarounds to make this Python member functions work properly in Jupyter
# (which doesn't carry over the class structure from the previous fragment).
Ecosystem.numAgents = numAgents
Ecosystem.printInfo = printInfo
#if __name__ == "__main__":
print("A first ABM implementation")
e = Ecosystem()
l1 = e.addLocation("Source")
l2 = e.addLocation("Sink1")
l3 = e.addLocation("Sink2")
e.linkUp("Source","Sink1","10.0")
e.linkUp("Source","Sink2","5.0")
for i in range(0,100):
e.addAgent(location=l1)
duration=10
for t in range(0,duration):
e.doTimeStep()
e.printInfo()
print "Well done!"
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Defining the refugee
Step2: I gave the Person class a simple constructor (see the _init_() function), which sets a number of parameters specific to the class. You can define any parameter you like, but I opted for the following (semi-arbitrary) set
Step3: Here, each option has a weight equal to 40000 (the approximate circumference of the planet in km) divided by (10 + [distance to the endpoint of the route in km]).
Step4: Here the chance of a Person moving at all at a given time step is given by the movechance. This movechance is a static number for each Location, allowing us to set a high movechance for unsafe locations, and a lower movechance for safer locations.
Step5: This function is a little redundant right now (it could be part of evolve()), but it allows you to later modify the code, to accomodate Persons to spend more than one time step in transit.
Step6: The Location class, too, has a number of simple parameters. These represent essential characteristics for individual locations
Step7: The Links class is accompanied with the following attributes
Step8: The Ecosystem class has the following attributes
Step9: ...a function that adds Agents to the Ecosystem
Step11: ...and a function that adds Links to the Ecosystem
Step12: Crucially, we want to evolve the system in time. This is actually done using the following function
Step13: Lastly, we add two functions to aid us in writing out some results.
Step14: Creating and running a Agent-based Simulation
Step15: Next, we establish two paths, each of which connects the source location to one of the two sink locations. As a test, we specify one of the paths to have a length of 10 kilometers, and one to have a length of 5 kilometers
Step16: With the location and links in place, we can now insert a hundred agents in the source location l1. To do that, we use the addAgent() function a hundred times.
Step17: With all the agents in place, we can now proceed to run the simulation. We run the simulation for a duration of 10 time steps, and we print basic diagnostic information after each time step
Step18: ...and with that all in place, you have just established your first working ABM model!
|
14,972 | <ASSISTANT_TASK:>
Python Code:
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "decision_trees"
def image_path(fig_id):
return os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id)
def save_fig(fig_id, tight_layout=True):
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(image_path(fig_id) + ".png", format='png', dpi=300)
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data[:, 2:] # petal length and width
y = iris.target
tree_clf = DecisionTreeClassifier(max_depth=2, random_state=42)
tree_clf.fit(X, y)
from sklearn.tree import export_graphviz
export_graphviz(
tree_clf,
out_file=image_path("iris_tree.dot"),
feature_names=iris.feature_names[2:],
class_names=iris.target_names,
rounded=True,
filled=True
)
from matplotlib.colors import ListedColormap
def plot_decision_boundary(clf, X, y, axes=[0, 7.5, 0, 3], iris=True, legend=False, plot_training=True):
x1s = np.linspace(axes[0], axes[1], 100)
x2s = np.linspace(axes[2], axes[3], 100)
x1, x2 = np.meshgrid(x1s, x2s)
X_new = np.c_[x1.ravel(), x2.ravel()]
y_pred = clf.predict(X_new).reshape(x1.shape)
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap, linewidth=10)
if not iris:
custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])
plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)
if plot_training:
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", label="Iris-Setosa")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", label="Iris-Versicolor")
plt.plot(X[:, 0][y==2], X[:, 1][y==2], "g^", label="Iris-Virginica")
plt.axis(axes)
if iris:
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
else:
plt.xlabel(r"$x_1$", fontsize=18)
plt.ylabel(r"$x_2$", fontsize=18, rotation=0)
if legend:
plt.legend(loc="lower right", fontsize=14)
plt.figure(figsize=(8, 4))
plot_decision_boundary(tree_clf, X, y)
plt.plot([2.45, 2.45], [0, 3], "k-", linewidth=2)
plt.plot([2.45, 7.5], [1.75, 1.75], "k--", linewidth=2)
plt.plot([4.95, 4.95], [0, 1.75], "k:", linewidth=2)
plt.plot([4.85, 4.85], [1.75, 3], "k:", linewidth=2)
plt.text(1.40, 1.0, "Depth=0", fontsize=15)
plt.text(3.2, 1.80, "Depth=1", fontsize=13)
plt.text(4.05, 0.5, "(Depth=2)", fontsize=11)
save_fig("decision_tree_decision_boundaries_plot")
plt.show()
tree_clf.predict_proba([[5, 1.5]])
tree_clf.predict([[5, 1.5]])
X[(X[:, 1]==X[:, 1][y==1].max()) & (y==1)] # widest Iris-Versicolor flower
not_widest_versicolor = (X[:, 1]!=1.8) | (y==2)
X_tweaked = X[not_widest_versicolor]
y_tweaked = y[not_widest_versicolor]
tree_clf_tweaked = DecisionTreeClassifier(max_depth=2, random_state=40)
tree_clf_tweaked.fit(X_tweaked, y_tweaked)
plt.figure(figsize=(8, 4))
plot_decision_boundary(tree_clf_tweaked, X_tweaked, y_tweaked, legend=False)
plt.plot([0, 7.5], [0.8, 0.8], "k-", linewidth=2)
plt.plot([0, 7.5], [1.75, 1.75], "k--", linewidth=2)
plt.text(1.0, 0.9, "Depth=0", fontsize=15)
plt.text(1.0, 1.80, "Depth=1", fontsize=13)
save_fig("decision_tree_instability_plot")
plt.show()
from sklearn.datasets import make_moons
Xm, ym = make_moons(n_samples=100, noise=0.25, random_state=53)
deep_tree_clf1 = DecisionTreeClassifier(random_state=42)
deep_tree_clf2 = DecisionTreeClassifier(min_samples_leaf=4, random_state=42)
deep_tree_clf1.fit(Xm, ym)
deep_tree_clf2.fit(Xm, ym)
plt.figure(figsize=(11, 4))
plt.subplot(121)
plot_decision_boundary(deep_tree_clf1, Xm, ym, axes=[-1.5, 2.5, -1, 1.5], iris=False)
plt.title("No restrictions", fontsize=16)
plt.subplot(122)
plot_decision_boundary(deep_tree_clf2, Xm, ym, axes=[-1.5, 2.5, -1, 1.5], iris=False)
plt.title("min_samples_leaf = {}".format(deep_tree_clf2.min_samples_leaf), fontsize=14)
save_fig("min_samples_leaf_plot")
plt.show()
angle = np.pi / 180 * 20
rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
Xr = X.dot(rotation_matrix)
tree_clf_r = DecisionTreeClassifier(random_state=42)
tree_clf_r.fit(Xr, y)
plt.figure(figsize=(8, 3))
plot_decision_boundary(tree_clf_r, Xr, y, axes=[0.5, 7.5, -1.0, 1], iris=False)
plt.show()
np.random.seed(6)
Xs = np.random.rand(100, 2) - 0.5
ys = (Xs[:, 0] > 0).astype(np.float32) * 2
angle = np.pi / 4
rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
Xsr = Xs.dot(rotation_matrix)
tree_clf_s = DecisionTreeClassifier(random_state=42)
tree_clf_s.fit(Xs, ys)
tree_clf_sr = DecisionTreeClassifier(random_state=42)
tree_clf_sr.fit(Xsr, ys)
plt.figure(figsize=(11, 4))
plt.subplot(121)
plot_decision_boundary(tree_clf_s, Xs, ys, axes=[-0.7, 0.7, -0.7, 0.7], iris=False)
plt.subplot(122)
plot_decision_boundary(tree_clf_sr, Xsr, ys, axes=[-0.7, 0.7, -0.7, 0.7], iris=False)
save_fig("sensitivity_to_rotation_plot")
plt.show()
# Quadratic training set + noise
np.random.seed(42)
m = 200
X = np.random.rand(m, 1)
y = 4 * (X - 0.5) ** 2
y = y + np.random.randn(m, 1) / 10
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor(max_depth=2, random_state=42)
tree_reg.fit(X, y)
from sklearn.tree import DecisionTreeRegressor
tree_reg1 = DecisionTreeRegressor(random_state=42, max_depth=2)
tree_reg2 = DecisionTreeRegressor(random_state=42, max_depth=3)
tree_reg1.fit(X, y)
tree_reg2.fit(X, y)
def plot_regression_predictions(tree_reg, X, y, axes=[0, 1, -0.2, 1], ylabel="$y$"):
x1 = np.linspace(axes[0], axes[1], 500).reshape(-1, 1)
y_pred = tree_reg.predict(x1)
plt.axis(axes)
plt.xlabel("$x_1$", fontsize=18)
if ylabel:
plt.ylabel(ylabel, fontsize=18, rotation=0)
plt.plot(X, y, "b.")
plt.plot(x1, y_pred, "r.-", linewidth=2, label=r"$\hat{y}$")
plt.figure(figsize=(11, 4))
plt.subplot(121)
plot_regression_predictions(tree_reg1, X, y)
for split, style in ((0.1973, "k-"), (0.0917, "k--"), (0.7718, "k--")):
plt.plot([split, split], [-0.2, 1], style, linewidth=2)
plt.text(0.21, 0.65, "Depth=0", fontsize=15)
plt.text(0.01, 0.2, "Depth=1", fontsize=13)
plt.text(0.65, 0.8, "Depth=1", fontsize=13)
plt.legend(loc="upper center", fontsize=18)
plt.title("max_depth=2", fontsize=14)
plt.subplot(122)
plot_regression_predictions(tree_reg2, X, y, ylabel=None)
for split, style in ((0.1973, "k-"), (0.0917, "k--"), (0.7718, "k--")):
plt.plot([split, split], [-0.2, 1], style, linewidth=2)
for split in (0.0458, 0.1298, 0.2873, 0.9040):
plt.plot([split, split], [-0.2, 1], "k:", linewidth=1)
plt.text(0.3, 0.5, "Depth=2", fontsize=13)
plt.title("max_depth=3", fontsize=14)
save_fig("tree_regression_plot")
plt.show()
export_graphviz(
tree_reg1,
out_file=image_path("regression_tree.dot"),
feature_names=["x1"],
rounded=True,
filled=True
)
tree_reg1 = DecisionTreeRegressor(random_state=42)
tree_reg2 = DecisionTreeRegressor(random_state=42, min_samples_leaf=10)
tree_reg1.fit(X, y)
tree_reg2.fit(X, y)
x1 = np.linspace(0, 1, 500).reshape(-1, 1)
y_pred1 = tree_reg1.predict(x1)
y_pred2 = tree_reg2.predict(x1)
plt.figure(figsize=(11, 4))
plt.subplot(121)
plt.plot(X, y, "b.")
plt.plot(x1, y_pred1, "r.-", linewidth=2, label=r"$\hat{y}$")
plt.axis([0, 1, -0.2, 1.1])
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", fontsize=18, rotation=0)
plt.legend(loc="upper center", fontsize=18)
plt.title("No restrictions", fontsize=14)
plt.subplot(122)
plt.plot(X, y, "b.")
plt.plot(x1, y_pred2, "r.-", linewidth=2, label=r"$\hat{y}$")
plt.axis([0, 1, -0.2, 1.1])
plt.xlabel("$x_1$", fontsize=18)
plt.title("min_samples_leaf={}".format(tree_reg2.min_samples_leaf), fontsize=14)
save_fig("tree_regression_regularization_plot")
plt.show()
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=10000, noise=0.4, random_state=42)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
from sklearn.model_selection import GridSearchCV
params = {'max_leaf_nodes': list(range(2, 100)), 'min_samples_split': [2, 3, 4]}
grid_search_cv = GridSearchCV(DecisionTreeClassifier(random_state=42), params, n_jobs=-1, verbose=1)
grid_search_cv.fit(X_train, y_train)
grid_search_cv.best_estimator_
from sklearn.metrics import accuracy_score
y_pred = grid_search_cv.predict(X_test)
accuracy_score(y_test, y_pred)
from sklearn.model_selection import ShuffleSplit
n_trees = 1000
n_instances = 100
mini_sets = []
rs = ShuffleSplit(n_splits=n_trees, test_size=len(X_train) - n_instances, random_state=42)
for mini_train_index, mini_test_index in rs.split(X_train):
X_mini_train = X_train[mini_train_index]
y_mini_train = y_train[mini_train_index]
mini_sets.append((X_mini_train, y_mini_train))
from sklearn.base import clone
forest = [clone(grid_search_cv.best_estimator_) for _ in range(n_trees)]
accuracy_scores = []
for tree, (X_mini_train, y_mini_train) in zip(forest, mini_sets):
tree.fit(X_mini_train, y_mini_train)
y_pred = tree.predict(X_test)
accuracy_scores.append(accuracy_score(y_test, y_pred))
np.mean(accuracy_scores)
Y_pred = np.empty([n_trees, len(X_test)], dtype=np.uint8)
for tree_index, tree in enumerate(forest):
Y_pred[tree_index] = tree.predict(X_test)
from scipy.stats import mode
y_pred_majority_votes, n_votes = mode(Y_pred, axis=0)
accuracy_score(y_test, y_pred_majority_votes.reshape([-1]))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Training and visualizing
Step2: Predicting classes and class probabilities
Step3: Sensitivity to training set details
Step4: Regression trees
Step5: Exercise solutions
Step6: b. Split it into a training set and a test set using train_test_split().
Step7: c. Use grid search with cross-validation (with the help of the GridSearchCV class) to find good hyperparameter values for a DecisionTreeClassifier. Hint
Step8: d. Train it on the full training set using these hyperparameters, and measure your model's performance on the test set. You should get roughly 85% to 87% accuracy.
Step9: 8.
Step10: b. Train one Decision Tree on each subset, using the best hyperparameter values found above. Evaluate these 1,000 Decision Trees on the test set. Since they were trained on smaller sets, these Decision Trees will likely perform worse than the first Decision Tree, achieving only about 80% accuracy.
Step11: c. Now comes the magic. For each test set instance, generate the predictions of the 1,000 Decision Trees, and keep only the most frequent prediction (you can use SciPy's mode() function for this). This gives you majority-vote predictions over the test set.
Step12: d. Evaluate these predictions on the test set
|
14,973 | <ASSISTANT_TASK:>
Python Code:
from mmlspark import CNTKModel, ModelDownloader
from pyspark.sql.functions import udf, col
from pyspark.sql.types import IntegerType, ArrayType, FloatType, StringType
from pyspark.sql import Row
from os.path import abspath, join
import numpy as np
import pickle
from nltk.tokenize import sent_tokenize, word_tokenize
import os, tarfile, pickle
import urllib.request
import nltk
modelName = "BiLSTM"
modelDir = abspath("models")
d = ModelDownloader(spark, "wasb://" + modelDir)
modelSchema = d.downloadByName(modelName)
modelName = "BiLSTM"
modelDir = abspath("models")
d = ModelDownloader(spark, "file://" + modelDir)
modelSchema = d.downloadByName(modelName)
nltk.download("punkt", download_dir=modelDir)
nltk.data.path.append(modelDir)
wordEmbFileName = "WordEmbeddings_PubMed.pkl"
pickleFile = join(abspath("models"), wordEmbFileName)
if not os.path.isfile(pickleFile):
urllib.request.urlretrieve("https://mmlspark.blob.core.windows.net/datasets/" + wordEmbFileName, pickleFile)
pickleContent = pickle.load(open(pickleFile, "rb"), encoding="latin-1")
wordToIndex = pickleContent["word_to_index"]
wordvectors = pickleContent["wordvectors"]
classToEntity = pickleContent["class_to_entity"]
nClasses = len(classToEntity)
nFeatures = wordvectors.shape[1]
maxSentenceLen = 613
content = "Baricitinib, Methotrexate, or Baricitinib Plus Methotrexate in Patients with Early Rheumatoid\
Arthritis Who Had Received Limited or No Treatment with Disease-Modifying-Anti-Rheumatic-Drugs (DMARDs):\
Phase 3 Trial Results. Keywords: Janus kinase (JAK), methotrexate (MTX) and rheumatoid arthritis (RA) and\
Clinical research. In 2 completed phase 3 studies, baricitinib (bari) improved disease activity with a\
satisfactory safety profile in patients (pts) with moderately-to-severely active RA who were inadequate\
responders to either conventional synthetic1 or biologic2DMARDs. This abstract reports results from a\
phase 3 study of bari administered as monotherapy or in combination with methotrexate (MTX) to pts with\
early active RA who had limited or no prior treatment with DMARDs. MTX monotherapy was the active comparator."
sentences = sent_tokenize(content)
df = spark.createDataFrame(enumerate(sentences), ["index","sentence"])
# Add the tokenizers to all worker nodes
def prepNLTK(partition):
localPath = abspath("nltk")
nltk.download("punkt", localPath)
nltk.data.path.append(localPath)
return partition
df = df.rdd.mapPartitions(prepNLTK).toDF()
tokenizeUDF = udf(word_tokenize, ArrayType(StringType()))
df = df.withColumn("tokens",tokenizeUDF("sentence"))
countUDF = udf(len, IntegerType())
df = df.withColumn("count",countUDF("tokens"))
def wordToEmb(word):
return wordvectors[wordToIndex.get(word.lower(), wordToIndex["UNK"])]
def featurize(tokens):
X = np.zeros((maxSentenceLen, nFeatures))
X[-len(tokens):,:] = np.array([wordToEmb(word) for word in tokens])
return [float(x) for x in X.reshape(maxSentenceLen, nFeatures).flatten()]
featurizeUDF = udf(featurize, ArrayType(FloatType()))
df = df.withColumn("features", featurizeUDF("tokens"))
df.show()
model = CNTKModel() \
.setModelLocation(spark, modelSchema.uri) \
.setInputCol("features") \
.setOutputCol("probs") \
.setOutputNodeIndex(0) \
.setMiniBatchSize(1)
df = model.transform(df).cache()
df.show()
def probsToEntities(probs, wordCount):
reshaped_probs = np.array(probs).reshape(maxSentenceLen, nClasses)
reshaped_probs = reshaped_probs[-wordCount:,:]
return [classToEntity[np.argmax(probs)] for probs in reshaped_probs]
toEntityUDF = udf(probsToEntities,ArrayType(StringType()))
df = df.withColumn("entities", toEntityUDF("probs", "count"))
df.show()
# Color Code the Text based on the entity type
colors = {
"B-Disease": "blue",
"I-Disease":"blue",
"B-Drug":"lime",
"I-Drug":"lime",
"B-Chemical":"lime",
"I-Chemical":"lime",
"O":"black",
"NONE":"black"
}
def prettyPrint(words, annotations):
formattedWords = []
for word,annotation in zip(words,annotations):
formattedWord = "<font size = '2' color = '{}'>{}</font>".format(colors[annotation], word)
if annotation in {"O","NONE"}:
formattedWords.append(formattedWord)
else:
formattedWords.append("<b>{}</b>".format(formattedWord))
return " ".join(formattedWords)
prettyPrintUDF = udf(prettyPrint, StringType())
df = df.withColumn("formattedSentence", prettyPrintUDF("tokens", "entities")) \
.select("formattedSentence")
sentences = [row["formattedSentence"] for row in df.collect()]
df.registerTempTable("df")
from IPython.core.display import display, HTML
for sentence in sentences:
display(HTML(sentence))
%%sql -q -o df
select * from df
%%local
sentences =df["formattedSentence"]
from IPython.core.display import display, HTML
for sentence in sentences:
display(HTML(sentence))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the model and extract the data.
Step2: Download the embeddings and the tokenizer
Step3: Load the embeddings and create functions for encoding sentences
Step4: Run the CNTKModel
Step5: Show the annotated text
|
14,974 | <ASSISTANT_TASK:>
Python Code:
# Load PredicSis.ai SDK
from predicsis import PredicSis
prj = PredicSis.project('Outbound Mail Campaign')
mdl = prj.default_schema().fit('My first model')
mdl.auc()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting insights
Step2: Build a model from the default schema
|
14,975 | <ASSISTANT_TASK:>
Python Code:
from functions import connect, forward, stop, left, right, disconnect, next_notebook
from time import sleep
connect() # Executeu, polsant Majúscules + Enter
# avançar
# girar
# avançar
# girar
# avançar
# girar
# avançar
# girar
# parar
for i in range(4):
# avançar
# girar
# parar
disconnect()
next_notebook('sensors')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Programa principal
Step2: Ha funcionat a la primera? Fer un quadrat perfecte no és fàcil, i el més normal és que calga ajustar un parell de coses
Step3: És important que les instruccions de dins del bucle estiguen desplaçades cap a la dreta, és a dir indentades.
|
14,976 | <ASSISTANT_TASK:>
Python Code:
a = list(range(5))
print ("The list we created:", a, "of length", len(a))
b = list(range(6,10))
print ("The second list we created:", b, "of length", len(b))
a[1:3] = b # Line 7
print ("The first list after we changed a couple of elements is", a, "with length", len(a))
print ("hash of int(42) is", hash(42))
print ("hash of float(42.001) is", hash(42.001))
print ("hash of str('42') is", hash('42'))
try:
print ("hash of list(42) is", hash([42]))
except TypeError:
print("TypeError: unhashable type: 'list'")
print ("hash of tuple(42, '42') is", hash((42, '42')))
a = list(range(5))
print ("The list we created:", a)
b = a
print ("The second list we created:", b)
a[3] = 10
print ("The first list after changing it:", a)
print ("And the second list:", b)
a = list(range(5))
print ("The list we created:", a)
b = a[:]
print ("The second list we created:", b)
a[3] = 10
print ("The first list after changing it:", a)
print ("And the second list:", b)
a = list(range(5))
print ("The list we created:", a)
b = a
print ("The second list we created:", b)
b.append(11)
a.extend([12, 13])
print ("The first list after mutation:", a)
print ("The second list after mutation:", b)
a = list(range(5))
b = a + [11, 12]
print ("The list we created:", a)
print ("The second list we created:", b)
b.append(21)
a.extend([22, 23])
print ("The first list after mutation:", a)
print ("The second list after mutation:", b)
def rem_sublist(L, i, j):
L[i:j] = []
a = list(range(10))
print(a)
rem_sublist(a, 2, 5)
print(a)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Interesting... We wanted to change two elements (line 7), but added four instead! Rather, we mutated list a with list b. "Mutability" means, simply, that we can change the structure of the object.
Step2: We see that we can get a hash from int (exact int value), from float, and string (although hash for them is not as obvious as for int), but not for a list. You can see that trying to call hash() function on a list returns a
Step3: Aliasing. Another important concept is aliasing. Let's start with example
Step4: What happened?
Step5: Almost all list methods in Python do not return a new list, but modify (or mutate) it. For that reason, if you have several aliases, all of them reflect the changes after list mutation.
Step6: Exercise. Aliasing also happens at function call boundaries. Try to predict what the following code will do before you run it.
|
14,977 | <ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import random
import numpy as np
%matplotlib inline
class Galaxy():
Galaxy class for simply representing a galaxy.
def __init__(self, total_mass, cold_gas_mass, stellar_mass, age=0):
self.total_mass = total_mass
self.cold_gas_mass = cold_gas_mass
self.stellar_mass = stellar_mass
self.age = age
self.SFR = 0
self.color = 'red'
milky_way = Galaxy(1e12, 1e8, 1e10, age=5e9)
print(milky_way)
class Galaxy():
Galaxy class for simply representing a galaxy.
def __init__(self, total_mass, cold_gas_mass, stellar_mass, age=0):
self.total_mass = total_mass
self.cold_gas_mass = cold_gas_mass
self.stellar_mass = stellar_mass
self.age = age
self.SFR = 0
self.color = 'red'
def __repr__(self):
return "Galaxy (m_total = %.1g; m_cold = %.1g; m_stars = %.1g; age = %.1g; SFR = %0.2f)" % \
(self.total_mass, self.cold_gas_mass, self.stellar_mass, self.age, self.SFR)
milky_way = Galaxy(1e12, 1e8, 1e10, age=5e9)
print(milky_way)
class EvolvingGalaxy(Galaxy):
Galaxy class for representing a galaxy that can evolve over time.
milky_way = EvolvingGalaxy(1e12, 1e8, 1e10, age=5e9)
print(milky_way)
class EvolvingGalaxy(Galaxy):
Galaxy class for representing a galaxy that can evolve over time.
def evolve(self, time):
Evolve this galaxy forward for a period time
self.age += time
def current_state(self):
return (self.total_mass, self.cold_gas_mass, self.stellar_mass, self.age, self.SFR)
def integrate_time(galaxy, timestep, n_timesteps):
Integrate the time forward for a galaxy and record its state at each timestep; return as array
data_arr = np.empty([5, n_timesteps])
for i in range(n_timesteps):
galaxy.evolve(timestep)
data_arr[:,i] = galaxy.current_state()
return data_arr
def plot_galaxy_evolution(data_arr):
Plot the evolution of a galaxy from its input data array
plt.clf()
plt.semilogy(data[3], data[0], color='k', label='Total')
plt.semilogy(data[3], data[1], color='b', label='Gas')
plt.semilogy(data[3], data[2], color='r', label='Stars')
plt.semilogy(data[3], data[4], color='g', label='SFR')
plt.xlabel('Age')
plt.ylabel('Mass')
plt.legend(loc=1)
plt.show()
milky_way = EvolvingGalaxy(1e12, 1e8, 1e10, age=5e9)
data = integrate_time(milky_way, 1e6, 1000)
plot_galaxy_evolution(data)
class EvolvingGalaxy(Galaxy):
Galaxy class for representing a galaxy that can evolve over time.
def current_state(self):
Return a tuple of the galaxy's total_mass, cold_gas_mass, stellar_mass, age, and SFR
return (self.total_mass, self.cold_gas_mass, self.stellar_mass, self.age, self.SFR)
def calculate_star_formation_rate(self):
Calculate the star formation rate by taking a random number between 0 and 1
normalized by the galaxy total mass / 1e12;
Also updates the galaxy's color to blue if SFR > 0.01, otherwise color = red
self.SFR = random.random() * (self.total_mass / 1e12)
if self.SFR > 0.01:
self.color = 'blue'
else:
self.color = 'red'
def accrete_gas_from_IGM(self, time):
Allow the galaxy to accrete cold gas from the IGM at a variable rate normalized to
the galaxy's mass
cold_gas_accreted = random.random() * 0.1 * time * (self.total_mass / 1e12)
self.cold_gas_mass += cold_gas_accreted
self.total_mass += cold_gas_accreted
def form_stars(self, time):
Form stars according to the current star formation rate and time available
If unable cold gas, then shut off star formation
if self.cold_gas_mass > self.SFR * time:
self.cold_gas_mass -= self.SFR * time
self.stellar_mass += self.SFR * time
else:
self.SFR = 0
self.color = 'red'
def evolve(self, time):
Evolve this galaxy forward for a period time
if random.random() < 0.01:
self.calculate_star_formation_rate()
self.accrete_gas_from_IGM(time)
self.form_stars(time)
self.age += time
milky_way = EvolvingGalaxy(1e12, 1e8, 1e10, age=5e9)
data = integrate_time(milky_way, 1e6, 10000)
plot_galaxy_evolution(data)
class MovingGalaxy(EvolvingGalaxy):
Galaxy class that can evolve and move in the x,y plane
def __init__(self, total_mass, cold_gas_mass, stellar_mass, x_position, y_position, x_velocity, y_velocity, idnum, age=0):
# Replace self with super to activate the superclass's methods
super().__init__(total_mass, cold_gas_mass, stellar_mass)
self.x_position = x_position
self.y_position = y_position
self.x_velocity = x_velocity
self.y_velocity = y_velocity
self.idnum = idnum
def __repr__(self):
return "Galaxy %i (x = %.0f; y = %.0f)" % (self.idnum, self.x_position, self.y_position)
milky_way = MovingGalaxy(1e12, 1e8, 1e10, 0, 0, 0, 0, 0)
print(milky_way)
class MovingGalaxy(EvolvingGalaxy):
This galaxy can move over time in the x,y plane
def __init__(self, total_mass, cold_gas_mass, stellar_mass, x_position, y_position, x_velocity, y_velocity, idnum, age=0):
# Replace self with super to activate the superclass's methods
super().__init__(total_mass, cold_gas_mass, stellar_mass)
self.x_position = x_position
self.y_position = y_position
self.x_velocity = x_velocity
self.y_velocity = y_velocity
self.idnum = idnum
def __repr__(self):
return "Galaxy %i (x = %.0f; y = %.0f)" % (self.idnum, self.x_position, self.y_position)
def move(self, time):
self.x_position += self.x_velocity * time
self.y_position += self.y_velocity * time
def calculate_momentum(self):
return (self.total_mass * self.x_velocity, self.total_mass * self.y_velocity)
def evolve(self, time):
self.move(time)
super().evolve(time)
def distance(galaxy1, galaxy2):
x_diff = galaxy1.x_position - galaxy2.x_position
y_diff = galaxy1.y_position - galaxy2.y_position
return (x_diff**2 + y_diff**2)**0.5
class Universe():
def __init__(self):
self.xrange = (0,100)
self.yrange = (0,100)
self.galaxies = []
self.added_galaxies = []
self.removed_galaxies = []
self.time = 0
pass
def __repr__(self):
out = 'Universe: t=%.2g\n' % self.time
for galaxy in self.galaxies:
out = "%s%s\n" % (out, galaxy)
return out
def add_galaxy(self, galaxy=None):
if galaxy is None:
stellar_mass = 10**(4*random.random()) * 1e6
cold_gas_mass = 10**(4*random.random()) * 1e6
total_mass = (cold_gas_mass + stellar_mass)*1e2
galaxy = MovingGalaxy(total_mass,
cold_gas_mass,
stellar_mass,
x_position=random.random()*100,
y_position=random.random()*100,
x_velocity=random.uniform(-1,1)*1e-7,
y_velocity=random.uniform(-1,1)*1e-7,
idnum=len(self.galaxies))
self.galaxies.append(galaxy)
def remove_galaxy(self, galaxy):
if galaxy in self.galaxies:
del self.galaxies[self.galaxies.index(galaxy)]
def evolve(self, time):
for galaxy in self.galaxies:
galaxy.evolve(time)
galaxy.x_position %= 100
galaxy.y_position %= 100
self.check_for_mergers()
for galaxy in self.removed_galaxies:
self.remove_galaxy(galaxy)
for galaxy in self.added_galaxies:
self.add_galaxy(galaxy)
self.removed_galaxies = []
self.added_galaxies = []
self.time += time
def merge_galaxies(self, galaxy1, galaxy2):
print('Merging:\n%s\n%s' % (galaxy1, galaxy2))
x_mom1, y_mom1 = galaxy1.calculate_momentum()
x_mom2, y_mom2 = galaxy2.calculate_momentum()
new_total_mass = galaxy1.total_mass + galaxy2.total_mass
new_galaxy = MovingGalaxy(total_mass = new_total_mass,
cold_gas_mass = galaxy1.cold_gas_mass + galaxy2.cold_gas_mass,
stellar_mass = galaxy1.stellar_mass + galaxy2.stellar_mass,
x_position = galaxy1.x_position,
y_position = galaxy1.y_position,
x_velocity = (x_mom1 + x_mom2) / new_total_mass,
y_velocity = (y_mom1 + y_mom2) / new_total_mass,
idnum = galaxy1.idnum)
self.added_galaxies.append(new_galaxy)
self.removed_galaxies.append(galaxy1)
self.removed_galaxies.append(galaxy2)
def check_for_mergers(self):
for i, galaxy1 in enumerate(self.galaxies):
for j, galaxy2 in enumerate(self.galaxies[i+1:]):
if distance(galaxy1, galaxy2) <= 2:
self.merge_galaxies(galaxy1, galaxy2)
def plot_state(self, frame_id):
plt.clf()
x = [galaxy.x_position for galaxy in self.galaxies]
y = [galaxy.y_position for galaxy in self.galaxies]
color = [galaxy.color for galaxy in self.galaxies]
size = [galaxy.total_mass / 1e9 for galaxy in self.galaxies]
plt.scatter(x,y, color=color, s=size)
plt.xlim(uni.xrange)
plt.ylim(uni.yrange)
plt.savefig('frame%04i.png' % frame_id)
uni = Universe()
n_timesteps = 2e2
n_galaxies = 25
for i in range(n_galaxies):
uni.add_galaxy()
for i in range(int(n_timesteps)):
uni.evolve(2e9/n_timesteps)
uni.plot_state(i)
%%bash
ffmpeg -r 20 -f image2 -i frame%04d.png -vcodec libx264 -pix_fmt yuv420p -crf 25 -y movie.mp4
%%HTML
<video width="1000" height="1000" controls>
<source src="movie.mp4" type="video/mp4">
</video>
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Problem 1) Create a galaxy class
Step3: Problem 1c
Step5: Problem 1d
Step7: Problem 2) Make a more interesting galaxy class that can evolve with time
Step10: Problem 2c
Step13: Problem 2d
Step20: Problem 2e
Step22: Problem 2f
Step25: Problem 3b
Step27: Problem 3c
Step28: Problem 3d
Step29: Problem 3e
|
14,978 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mri', 'sandbox-2', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
14,979 | <ASSISTANT_TASK:>
Python Code:
sn.set_style("dark")
f, ax = pl.subplots(figsize=(9,9))
ax.imshow(stI[:,:,0], aspect='auto', cmap=pl.cm.gray)
contrastFull = np.std(stI[:,:,0]) / np.mean(stI[:,:,0])
contrastQuiet = np.std(stI[400:,100:300,0]) / np.mean(stI[400:,100:300,0])
print("Contrast in the image : {0}%".format(contrastFull * 100.0))
print("Contrast in the quiet Sun : {0}%".format(contrastQuiet * 100.0))
v = np.zeros((512,512))
for i in range(512):
for j in range(512):
pos = np.argmin(stI[i,j,20:40]) + 20
res = np.polyfit(wave[pos-2:pos+2], stI[i,j,pos-2:pos+2], 2)
w = -res[1] / (2.0 * res[0])
v[i,j] = (w-6301.5) / 6301.5 * 3e5
f, ax = pl.subplots(figsize=(9,9))
ax.imshow(np.clip(v,-5,5))
f.savefig('velocities.png')
f, ax = pl.subplots(nrows=1, ncols=2, figsize=(15,9))
ax[0].imshow(stI[:,0,:], aspect='auto', cmap=pl.cm.gray)
ax[1].imshow(stV[:,0,:], aspect='auto', cmap=pl.cm.gray)
f.savefig('exampleStokes.png')
X = stV[50:300,200:450,:].reshape((250*250,112))
maxV = np.max(np.abs(X), axis=1)
X = X / maxV[:,None]
nClusters = 9
km = MiniBatchKMeans(init='k-means++', n_clusters=nClusters, n_init=10, batch_size=500)
km.fit(X)
out = km.predict(X)
avg = np.zeros((nClusters,112))
for i in range(nClusters):
avg[i,:] = np.mean(X[out==i,:], axis=0)
f, ax = pl.subplots(ncols=3, nrows=3, figsize=(12,9))
loop = 0
for i in range(3):
for j in range(3):
percentage = X[out==i,:].shape[0] / (250*250.) * 100.0
ax[i,j].plot(km.cluster_centers_[loop,:])
ax[i,j].set_title('Class {0} - {1}%'.format(loop, percentage))
loop += 1
pl.tight_layout()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us compute simple things like the contrast and the Doppler velocity field
Step2: Now let us compute the velocity field. To this end, we compute the location of the core of the line in velocity units for each pixel.
Step3: Classification
|
14,980 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.interpolate import interp1d
import travelmaps2 as tm
from matplotlib import pyplot as plt
tm.setup(dpi=200)
fig_x = tm.plt.figure(figsize=(tm.cm2in([11, 6])))
# Locations
MDF = [19.433333, -99.133333] # Mexico City
OAX = [16.898056, -96.414167] # Oaxaca
PES = [15.861944, -97.067222] # Puerto Escondido
ACA = [16.863611, -99.8825] # Acapulco
PBL = [19., -97.883333] # Puebla
# Create basemap
m_x = tm.Basemap(width=3500000, height=2300000, resolution='c', projection='tmerc', lat_0=24, lon_0=-102)
# Plot image
###m_x.warpimage('./data/TravelMap/HYP_HR_SR_OB_DR/HYP_HR_SR_OB_DR.tif')
# Put a shade over non-Mexican countries
countries = ['USA', 'BLZ', 'GTM', 'HND', 'SLV', 'NIC', 'CUB']
tm.country(countries, m_x, fc='.8', ec='.3', lw=.5, alpha=.6)
# Fill states
fcs = 32*['none']
ecs = 32*['k']
lws = 32*[.2,]
tm.country('MEX', bmap=m_x, fc=fcs, ec=ecs, lw=lws, adm=1)
ecs = 32*['none']
#ecs[19] = 'r'
lws = 32*[1,]
tm.country('MEX', bmap=m_x, fc=fcs, ec=ecs, lw=lws, adm=1)
# Add arrows
tm.arrow(MDF, ACA, m_x, rad=.3)
tm.arrow(ACA, PES, m_x, rad=.3)
#tm.arrow(PES, OAX, m_x, rad=-.3)
tm.arrow(OAX, PBL, m_x, rad=.3)
#tm.arrow(PBL, MDF, m_x, rad=.3)
# Add visited cities
tm.city(OAX, 'Oaxaca', m_x, offs=[.6, 0])
tm.city(MDF, 'Mexiko-Stadt', m_x, offs=[-.6, .6], halign="right")
tm.city(PES, 'Puerto Escondido', m_x, offs=[-2, -1.5])
tm.city(ACA, 'Acapulco', m_x, offs=[-.8, 0], halign="right")
tm.city(PBL, 'Puebla', m_x, offs=[.6, .6])
# Save-path
#fpath = '../mexico.werthmuller.org/content/images/simon/'
#tm.plt.savefig(fpath+'MapSSTrip.png', bbox_inches='tight')
tm.plt.show()
fig_p,ax = plt.subplots(figsize=(tm.cm2in([10.8, 5])))
# Switch off axis and ticks
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('none')
# Get data
pdat = np.loadtxt('./data/Mexico/SSTripData.txt', skiprows=1)
# Plot City names and kilometers
opt = {'horizontalalignment':'center', 'verticalalignment':'left', 'rotation':'vertical'}
plt.annotate('Mexiko-Stadt', (0, 3000), **opt)
plt.annotate('Acapulco', (373, 600), **opt)
plt.annotate('Puerto Escondido', (773, 600), **opt)
plt.annotate('V. Sola de Vega', (890, 2200), **opt)
plt.annotate('Oaxaca', (1032, 2200), **opt)
plt.annotate('Puebla', (1368, 2600), **opt)
plt.annotate('Mexiko-Stadt', (1501, 3000), **opt)
# Ticks, hlines, axis
plt.xticks(np.arange(7)*250, ('0 km', '', '500 km', '', '1000 km', '', '1500 km'))
plt.yticks(np.arange(8)*500, ('0 m', '', '1000 m', '', '2000 m', '', '3000 m', ''))
plt.hlines([0, 1000, 2000, 3000], -100, 1600, colors='.8')
plt.hlines([500, 1500, 2500, 3500], -100, 1600, colors='.8', lw=.5)
plt.axis([-50, 1550, -300, 6000])
# Sum up differences to get distance, distance starts now at every waypoint
distance = np.cumsum(pdat[:,4])/1000 # 41443 data points
# Reduce data points to 1500
reduced = interp1d(distance, pdat[:,2], 'slinear')
ndist = np.linspace(0, 1500, 1500)
nelev = reduced(ndist)
# Plot data
plt.plot(ndist, nelev)
# Save-path
#fpath = '../mexico.werthmuller.org/content/images/simon/'
#plt.savefig(fpath+'Profile.png', bbox_inches='tight')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Map
Step2: 2. Profile
|
14,981 | <ASSISTANT_TASK:>
Python Code:
%pylab notebook
%precision %.4g
V = 120 # [V]
p = 4
R1 = 2.0 # [Ohm]
R2 = 2.8 # [Ohm]
X1 = 2.56 # [Ohm]
X2 = 2.56 # [Ohm]
Xm = 60.5 # [Ohm]
s = 0.05
Prot = 51 # [W]
Zf = ((R2/s + X2*1j)*(Xm*1j)) / (R2/s + X2*1j + Xm*1j)
Zf
Zb = ((R2/(2-s) + X2*1j)*(Xm*1j)) / (R2/(2-s) + X2*1j + Xm*1j)
Zb
I1 = V / (R1 +X1*1j + 0.5*Zf + 0.5*Zb)
I1_angle = arctan(I1.imag/I1.real)
print('I1 = {:.3f} V ∠{:.1f}°'.format(abs(I1), I1_angle/pi*180))
Pin = V*abs(I1)*cos(I1_angle)
print('''
Pin = {:.1f} W
============='''.format(Pin))
Pag_f = abs(I1)**2*0.5*Zf.real
Pag_f
Pag_b = abs(I1)**2*0.5*Zb.real
Pag_b
Pag = Pag_f - Pag_b
print('''
Pag = {:.0f} W
==========='''.format(Pag))
Pconv_f = (1-s)*Pag_f
Pconv_f
Pconv_b = (1-s)*Pag_b
Pconv_b
Pconv = Pconv_f - Pconv_b
print('''
Pconv = {:.0f} W
============='''.format(Pconv))
Pout = Pconv - Prot
print('''
Pout = {:.0f} W
============'''.format(Pout))
n_sync = 1800.0 # [r/min]
w_sync = n_sync * (2.0*pi/1.0) * (1.0/60.0)
tau_ind = Pag / w_sync
print('''
τ_ind = {:.2f} Nm
==============='''.format(tau_ind))
w_m = (1-s)*w_sync
tau_load = Pout / w_m
print('''
τ_load = {:.2f} Nm
================'''.format(tau_load))
eta = Pout/Pin
print('''
η = {:.1f} %
=========='''.format(eta*100))
PF = cos(I1_angle)
print('''
PF = {:.3f} lagging
=================='''.format(PF))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Description
Step2: If the slip is 0.05, find the following quantities for this motor
Step3: $$Z_B = \frac{(R_2/(2-s) + jX_2)(jX_M)}{R_2/(2-s) + jX_2 + jX_M}$$
Step4: (a)
Step5: (b)
Step6: (c)
Step7: (d)
Step8: (e)
Step9: (f)
Step10: (g)
Step11: (h)
|
14,982 | <ASSISTANT_TASK:>
Python Code:
send(IP(dst="1.2.3.4")/TCP(dport=502, options=[("MSS", 0)]))
ans = sr([IP(dst="8.8.8.8", ttl=(1, 8), options=IPOption_RR())/ICMP(seq=RandShort()), IP(dst="8.8.8.8", ttl=(1, 8), options=IPOption_Traceroute())/ICMP(seq=RandShort()), IP(dst="8.8.8.8", ttl=(1, 8))/ICMP(seq=RandShort())], verbose=False, timeout=3)[0]
ans.make_table(lambda x, y: (", ".join(z.summary() for z in x[IP].options) or '-', x[IP].ttl, y.sprintf("%IP.src% %ICMP.type%")))
from scapy.all import *
packet = IP()/TCP()
Ether()/packet
>>> ls(IP, verbose=True)
version : BitField (4 bits) = (4)
ihl : BitField (4 bits) = (None)
tos : XByteField = (0)
len : ShortField = (None)
id : ShortField = (1)
flags : FlagsField (3 bits) = (0)
MF, DF, evil
frag : BitField (13 bits) = (0)
ttl : ByteField = (64)
proto : ByteEnumField = (0)
chksum : XShortField = (None)
src : SourceIPField (Emph) = (None)
dst : DestIPField (Emph) = (None)
options : PacketListField = ([])
p = Ether()/IP(dst="www.secdev.org")/TCP()
p.summary()
print(p.dst) # first layer that has an src field, here Ether
print(p[IP].src) # explicitly access the src field of the IP layer
# sprintf() is a useful method to display fields
print(p.sprintf("%Ether.src% > %Ether.dst%\n%IP.src% > %IP.dst%"))
print(p.sprintf("%TCP.flags% %TCP.dport%"))
[p for p in IP(ttl=(1,5))/ICMP()]
p = sr1(IP(dst="8.8.8.8")/UDP()/DNS(qd=DNSQR()))
p[DNS].an
r, u = srp(Ether()/IP(dst="8.8.8.8", ttl=(5,10))/UDP()/DNS(rd=1, qd=DNSQR(qname="www.example.com")))
r, u
# Access the first tuple
print(r[0][0].summary()) # the packet sent
print(r[0][1].summary()) # the answer received
# Access the ICMP layer. Scapy received a time-exceeded error message
r[0][1][ICMP]
wrpcap("scapy.pcap", r)
pcap_p = rdpcap("scapy.pcap")
pcap_p[0]
s = sniff(count=2)
s
sniff(count=2, prn=lambda p: p.summary())
import socket
sck = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # create an UDP socket
sck.connect(("8.8.8.8", 53)) # connect to 8.8.8.8 on 53/UDP
# Create the StreamSocket and gives the class used to decode the answer
ssck = StreamSocket(sck)
ssck.basecls = DNS
# Send the DNS query
ssck.sr1(DNS(rd=1, qd=DNSQR(qname="www.example.com")))
ans, unans = srloop(IP(dst=["8.8.8.8", "8.8.4.4"])/ICMP(), inter=.1, timeout=.1, count=100, verbose=False)
%matplotlib inline
ans.multiplot(lambda x, y: (y[IP].src, (y.time, y[IP].id)), plot_xy=True)
pkt = IP() / UDP() / DNS(qd=DNSQR())
print(repr(raw(pkt)))
print(pkt.summary())
hexdump(pkt)
pkt.show()
pkt.canvas_dump()
ans, unans = traceroute('www.secdev.org', maxttl=15)
ans.world_trace()
ans = sr(IP(dst=["scanme.nmap.org", "nmap.org"])/TCP(dport=[22, 80, 443, 31337]), timeout=3, verbose=False)[0]
ans.extend(sr(IP(dst=["scanme.nmap.org", "nmap.org"])/UDP(dport=53)/DNS(qd=DNSQR()), timeout=3, verbose=False)[0])
ans.make_table(lambda x, y: (x[IP].dst, x.sprintf('%IP.proto%/{TCP:%r,TCP.dport%}{UDP:%r,UDP.dport%}'), y.sprintf('{TCP:%TCP.flags%}{ICMP:%ICMP.type%}')))
class DNSTCP(Packet):
name = "DNS over TCP"
fields_desc = [ FieldLenField("len", None, fmt="!H", length_of="dns"),
PacketLenField("dns", 0, DNS, length_from=lambda p: p.len)]
# This method tells Scapy that the next packet must be decoded with DNSTCP
def guess_payload_class(self, payload):
return DNSTCP
# Build then decode a DNS message over TCP
DNSTCP(raw(DNSTCP(dns=DNS())))
import socket
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create an TCP socket
sck.connect(("8.8.8.8", 53)) # connect to 8.8.8.8 on 53/TCP
# Create the StreamSocket and gives the class used to decode the answer
ssck = StreamSocket(sck)
ssck.basecls = DNSTCP
# Send the DNS query
ssck.sr1(DNSTCP(dns=DNS(rd=1, qd=DNSQR(qname="www.example.com"))))
from scapy.all import *
import argparse
parser = argparse.ArgumentParser(description="A simple ping6")
parser.add_argument("ipv6_address", help="An IPv6 address")
args = parser.parse_args()
print(sr1(IPv6(dst=args.ipv6_address)/ICMPv6EchoRequest(), verbose=0).summary())
# Specify the Wi-Fi monitor interface
#conf.iface = "mon0" # uncomment to test
# Create an answering machine
class ProbeRequest_am(AnsweringMachine):
function_name = "pram"
# The fake mac of the fake access point
mac = "00:11:22:33:44:55"
def is_request(self, pkt):
return Dot11ProbeReq in pkt
def make_reply(self, req):
rep = RadioTap()
# Note: depending on your Wi-Fi card, you might need a different header than RadioTap()
rep /= Dot11(addr1=req.addr2, addr2=self.mac, addr3=self.mac, ID=RandShort(), SC=RandShort())
rep /= Dot11ProbeResp(cap="ESS", timestamp=time.time())
rep /= Dot11Elt(ID="SSID",info="Scapy !")
rep /= Dot11Elt(ID="Rates",info=b'\x82\x84\x0b\x16\x96')
rep /= Dot11Elt(ID="DSset",info=chr(10))
OK,return rep
# Start the answering machine
#ProbeRequest_am()() # uncomment to test
from scapy.all import *
import nfqueue, socket
def scapy_cb(i, payload):
s = payload.get_data() # get and parse the packet
p = IP(s)
# Check if the packet is an ICMP Echo Request to 8.8.8.8
if p.dst == "8.8.8.8" and ICMP in p:
# Delete checksums to force Scapy to compute them
del(p[IP].chksum, p[ICMP].chksum)
# Set the ICMP sequence number to 0
p[ICMP].seq = 0
# Let the modified packet go through
ret = payload.set_verdict_modified(nfqueue.NF_ACCEPT, raw(p), len(p))
else:
# Accept all packets
payload.set_verdict(nfqueue.NF_ACCEPT)
# Get an NFQUEUE handler
q = nfqueue.queue()
# Set the function that will be call on each received packet
q.set_callback(scapy_cb)
# Open the queue & start parsing packes
q.fast_open(2807, socket.AF_INET)
q.try_run()
class TCPScanner(Automaton):
@ATMT.state(initial=1)
def BEGIN(self):
pass
@ATMT.state()
def SYN(self):
print("-> SYN")
@ATMT.state()
def SYN_ACK(self):
print("<- SYN/ACK")
raise self.END()
@ATMT.state()
def RST(self):
print("<- RST")
raise self.END()
@ATMT.state()
def ERROR(self):
print("!! ERROR")
raise self.END()
@ATMT.state(final=1)
def END(self):
pass
@ATMT.condition(BEGIN)
def condition_BEGIN(self):
raise self.SYN()
@ATMT.condition(SYN)
def condition_SYN(self):
if random.randint(0, 1):
raise self.SYN_ACK()
else:
raise self.RST()
@ATMT.timeout(SYN, 1)
def timeout_SYN(self):
raise self.ERROR()
TCPScanner().run()
TCPScanner().run()
# Instantiate the blocks
clf = CLIFeeder()
ijs = InjectSink("enx3495db043a28")
# Plug blocks together
clf > ijs
# Create and start the engine
pe = PipeEngine(clf)
pe.start()
clf.send("Hello Scapy !")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2_ Advanced firewalking using IP options is sometimes useful to perform network enumeration. Here is a more complicated one-liner
Step2: Now that we've got your attention, let's start the tutorial !
Step3: First steps
Step4: This last output displays the packet summary. Here, Scapy automatically filled the Ethernet type as well as the IP protocol field.
Step5: Let's create a new packet to a specific IP destination. With Scapy, each protocol field can be specified. As shown in the ls() output, the interesting field is dst.
Step6: There are not many differences with the previous example. However, Scapy used the specific destination to perform some magic tricks !
Step7: Scapy uses default values that work most of the time. For example, TCP() is a SYN segment to port 80.
Step8: Moreover, Scapy has implicit packets. For example, they are useful to make the TTL field value vary from 1 to 5 to mimic traceroute.
Step9: Sending and receiving
Step10: Another alternative is the sr() function. Like srp1(), the sr1() function can be used for layer 2 packets.
Step11: sr() sent a list of packets, and returns two variables, here r and u, where
Step12: With Scapy, list of packets, such as r or u, can be easily written to, or read from PCAP files.
Step13: Sniffing the network is as straightforward as sending and receiving packets. The sniff() function returns a list of Scapy packets, that can be manipulated as previously described.
Step14: sniff() has many arguments. The prn one accepts a function name that will be called on received packets. Using the lambda keyword, Scapy could be used to mimic the tshark command behavior.
Step15: Alternatively, Scapy can use OS sockets to send and receive packets. The following example assigns an UDP socket to a Scapy StreamSocket, which is then used to query www.example.com IPv4 address.
Step16: Visualization
Step17: Then we can use the results to plot the IP id values.
Step18: The raw() constructor can be used to "build" the packet's bytes as they would be sent on the wire.
Step19: Since some people cannot read this representation, Scapy can
Step20: "hexdump" the packet's bytes
Step21: dump the packet, layer by layer, with the values for each field
Step22: render a pretty and handy dissection of the packet
Step23: Scapy has a traceroute() function, which basically runs a sr(IP(ttl=(1..30)) and creates a TracerouteResult object, which is a specific subclass of SndRcvList().
Step24: The result can be plotted with .world_trace() (this requires GeoIP module and data, from MaxMind)
Step25: The PacketList.make_table() function can be very helpful. Here is a simple "port scanner"
Step26: Implementing a new protocol
Step27: This new packet definition can be direcly used to build a DNS message over TCP.
Step28: Modifying the previous StreamSocket example to use TCP allows to use the new DNSCTP layer easily.
Step29: Scapy as a module
Step30: Answering machines
Step31: Cheap Man-in-the-middle with NFQUEUE
Step32: Automaton
Step33: Pipes
Step34: Packet can be sent using the following command on the prompt
|
14,983 | <ASSISTANT_TASK:>
Python Code:
from sympy import *
from sympy.abc import n, i, N, x, lamda, phi, z, j, r, k, a, t, alpha
from sequences import *
init_printing()
m = 5
d_fn, h_fn = Function('d'), Function('h')
d, h = IndexedBase('d'), IndexedBase('h')
rows, cols = 5, 5
ctor = lambda i,j: d[i,j]
Matrix(rows, cols, ctor)
d_series = Eq(d_fn(t), 1+sum(d[i]*t**i for i in range(1,m)))
h_series = Eq(h_fn(t), t*(1+sum(h[i]*t**i for i in range(1,m-1)))).expand()
d_series, h_series
R = Matrix(m, m, riordan_matrix_by_convolution(m, d_series, h_series))
R
production_matrix(R) # too verbose to show
d_series = Eq(d_fn(t), 1/(1-t))
h_series = Eq(h_fn(t), t*d_series.rhs)
d_series, h_series
R = Matrix(10, 10, riordan_matrix_by_convolution(10, d_series, h_series))
R
dim = 5
a, b, b_bar, c = symbols(r'a b \bar{b} c')
M = Matrix(dim, dim,
riordan_matrix_by_recurrence(
dim, lambda n, k: {(n-1, k-1):a,
(n-1, k): b if k else b_bar,
(n-1, k+1):c}))
M
production_matrix(M)
Msubs = M.subs({a:1, b_bar:b})
Msubs, production_matrix(Msubs)
A, Z = Function('A'), Function('Z')
A_eq = Eq(A(t), 1 + t)
Z_eq = Eq(Z(t),1)
A_eq, Z_eq
R = Matrix(10, 10, riordan_matrix_by_AZ_sequences(10, (Z_eq, A_eq)))
R, production_matrix(R)
A = Function('A')
A_ones = Eq(A(t), 1/(1-t))
R = Matrix(10, 10, riordan_matrix_by_AZ_sequences(10, (A_ones, A_ones)))
R, production_matrix(R)
dim = 5
A = Function('A')
a = IndexedBase('a')
A_gen = Eq(A(t), sum((a[j] if j else 1)*t**j for j in range(dim)))
R = Matrix(dim, dim, riordan_matrix_by_AZ_sequences(dim, (A_gen, A_gen)))
R
z = IndexedBase('z')
A_gen = Eq(A(t), sum((a[j] if j else 1)*t**j for j in range(dim)))
Z_gen = Eq(Z(t), sum((z[j] if j else 1)*t**j for j in range(dim)))
Raz = Matrix(dim, dim, riordan_matrix_by_AZ_sequences(dim, (Z_gen, A_gen)))
Raz
production_matrix(R), production_matrix(Raz)
H = Function('h')
C_eq = Eq(H(t), (1-sqrt(1-4*t))/2)
C_eq, compositional_inverse(C_eq)
P_eq = Eq(H(t), t/(1-t))
(P_eq,
compositional_inverse(P_eq),
compositional_inverse(compositional_inverse(P_eq), y=t))
d_series = Eq(d_fn(t), 1/(1-t))
h_series = Eq(h_fn(t), t/(1-t))
P_inverse = group_inverse(d_series, h_series)
P_inverse
R = Matrix(10, 10, riordan_matrix_by_convolution(10, *P_inverse))
R, production_matrix(R)
catalan_term = (1-sqrt(1-4*t))/(2*t)
d_series = Eq(d_fn(t), catalan_term)
h_series = Eq(h_fn(t), t*catalan_term)
C_inverse = group_inverse(d_series, h_series, post=radsimp)
C_inverse
R = Matrix(10, 10, riordan_matrix_by_convolution(10, C_inverse[0], C_inverse[1]))
R
d_series = Eq(d_fn(t), 1)
h_series = Eq(h_fn(t), exp(t)-1)
d_series, h_series
R = Matrix(10, 10, riordan_matrix_exponential(
riordan_matrix_by_convolution(10, d_series, h_series)))
R
production_matrix(R), production_matrix(R, exp=True)
inspect(R)
d_series = Eq(d_fn(t), 1/(1-t))
h_series = Eq(h_fn(t), t/(1-t))
d_series, h_series
R = Matrix(10, 10, riordan_matrix_exponential(
riordan_matrix_by_convolution(10, d_series, h_series)))
R
production_matrix(R), production_matrix(R, exp=True)
inspect(R)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1:
Step2: By series convolution
Step3: By recurrence relation
Step4: By $A, Z$ sequences
Step5: $\mathcal{C}$
Step6: $\mathcal{R}$
Step7: Compositional inverse
Step8: Group inverse
Step9: Exponential RA
Step10: https
|
14,984 | <ASSISTANT_TASK:>
Python Code:
!wget -P ../output -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/north/daily/data/NH_seaice_extent_final.csv
!wget -P ../output -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/north/daily/data/NH_seaice_extent_nrt.csv
!wget -P ../output -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/south/daily/data/SH_seaice_extent_final.csv
!wget -P ../output -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/south/daily/data/SH_seaice_extent_nrt.csv
hemisphere = 'north' # 'south' or 'north'
climatology_years = (1981, 2010)
# some imports for working with pandas, and excel files.
import datetime as dt
import numpy as np
import os
import pandas as pd
from pandas import ExcelWriter
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
pd.options.display.mpl_style = 'default'
# code for reading a hemisphere of data from CSV files.
def parse_the_date(year, mm, dd):
return dt.date(int(year), int(mm), int(dd))
def slurp_csv(filename):
data = pd.read_csv(filename, header = None, skiprows=2,
names=["year", "mm", "dd", "extent", "missing", "source"],
parse_dates={'date':['year', 'mm', 'dd']},
date_parser=parse_the_date, index_col='date')
data = data.drop(['missing', 'source'], axis=1)
return data
def read_a_hemisphere(hemisphere):
the_dir = "../output"
final_prod_filename = os.path.join(the_dir, '{hemi}H_seaice_extent_final.csv'.format(hemi=hemisphere[0:1].upper()))
nrt_prod_filename = os.path.join(the_dir, '{hemi}H_seaice_extent_nrt.csv'.format(hemi=hemisphere[0:1].upper()))
final = slurp_csv(final_prod_filename)
nrt = slurp_csv(nrt_prod_filename)
all_data = pd.concat([final, nrt])
return all_data
df = read_a_hemisphere(hemisphere)
# df.head(3) => just shows 3 rows from your dataframe
df.head(3)
# index before turning into DatetimeIndex
print df.index[0:5]
df.index = pd.to_datetime(df.index)
df = df.reindex(index=pd.date_range('1978-10-25', dt.date.today().strftime('%Y-%m-%d')))
df['hemi'] = hemisphere
print( df.head())
print("\nindex: ")
print( df.index)
df['backfill'] = df.extent.fillna(method='bfill', limit=1)
df['forwardfill'] = df.extent.fillna(method='ffill', limit=1)
print(df.head())
print(df['19871201':'19871206'])
print(df['19880110':'19880114'])
is_really_nan = pd.isnull(df['backfill']) | pd.isnull(df['forwardfill'])
df['interpolated'] = df.extent.interpolate()
#df['interpolated'].loc[is_really_nan] = np.nan
df.interpolated.loc[is_really_nan == True] = np.nan
df = df.drop(['forwardfill', 'backfill'], axis=1)
df.head()
df['5 Day'] = pd.rolling_mean(df['interpolated'], window=5, min_periods=2)
df.head()
clim_data = df[(df.index.year >= climatology_years[0])&(df.index.year <= climatology_years[1] )].copy()
print clim_data.head(3),"\n...\n" ,clim_data.tail(3)
print len(np.unique(clim_data.index.year))
print np.unique(clim_data.index.year)
def clim_string(climatology_years):
return '{0}-{1}'.format(climatology_years[0], climatology_years[1])
def get_climatological_means(column, clim_data):
means = clim_data.copy()
means = means.groupby([clim_data.index.month, clim_data.index.day]).mean()[[column]]
means = means.rename(columns={column: clim_string(climatology_years)})
return means
daily_means = get_climatological_means('interpolated', clim_data)
five_day_means = get_climatological_means('5 Day', clim_data)
print five_day_means.head()
testmeans = clim_data.groupby([clim_data.index.month, clim_data.index.day]).mean()[['interpolated']]
testmeans.head(1)
clim_data[(clim_data.index.month == 1)&(clim_data.index.day == 1)]['interpolated'].values
np.nanmean(clim_data[(clim_data.index.month == 1)&(clim_data.index.day == 1)]['interpolated'].values)
df.index
import calendar
month_names = [calendar.month_name[x] for x in range(1,13)]
df.head(2)
df= df[['extent']].set_index([df.index.year, df.index.month, df.index.day]).unstack(0)
df.head(3)
print df.columns.nlevels
print df.columns.levels
print daily_means.columns.nlevels
df.columns = df.columns.droplevel(0)
print df.columns.nlevels
df = pd.concat([df, daily_means.copy()], axis=1)
df.to_csv('test.csv')
# cleanup
!cd ../output; rm -f NH_seaice_extent_final.csv NH_seaice_extent_nrt.csv SH_seaice_extent_final.csv SH_seaice_extent_nrt.csv
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Variables to set before running
Step2: Set date index to a special DatetimeIndex and then Reindex the dataframe so
Step3: interpolate missing data in SMMR period.
Step4: See below that in the backfill column, 1978-10-25 was filled with the value
Step5: See that 1987-12-03 gets a forward, but not a backfill
Step6: See that 1988-01-12 gets a backfill, but not a forwardfill
Step7: So the union of backfill's NaN and forward fill NaN will capture any missing
Step8: Use the interpolation scheme to do simple linear regression on the entire extent column
Step9: So now we have a simple dataframe with daily extents and daily interpolated extents
Step10: Add 5 day rolling mean from the interpolated data to the extent.
Step11: Compute climatological means by selecting a copy of the data between your desired climatology years.
Step12: show the years of the climatology and then number of years to work with.
Step13: grab the mean value of the interpolated extents for each month/day combination
Step14: check yourself
Step15: Select the January 1 data for climatology_years
Step16: Get the daily extent data into the correct format for display and for concatenating with the clim_averages
Step17: right now the data is all stored a timeseries with an index of datetimes and
Step18: So we would like to reorder (pivot) the data into a nice dataframe where
Step19: We now want to concat the climatology means on to this newly shaped dataframe.
Step20: so drop the extra extent level
Step21: Now concatinate and the dataframe is ready to be output.
|
14,985 | <ASSISTANT_TASK:>
Python Code:
from pylab import *
loss = loadtxt('loss.out')
loglog(loss[:, 1:6])
loglog(loss[:, 7:9])
xlabel('Generation/100')
ylabel('Loss')
legend(['Total', 'L1-regularization', 'L2-regularization', 'Energy-train', 'Force-train', 'Energy-test', 'Force-test'])
tight_layout()
energy_test = loadtxt('energy_test.out')
plot(energy_test[:, 1], energy_test[:, 0], '.')
plot(linspace(-3.85,-3.69), linspace(-3.85,-3.69), '-')
xlabel('DFT energy (eV/atom)')
ylabel('NEP energy (eV/atom)')
tight_layout()
force_test = loadtxt('force_test.out')
plot(force_test[:, 3:6], force_test[:, 0:3], '.')
plot(linspace(-4,4), linspace(-4,4), '-')
xlabel('DFT force (eV/A)')
ylabel('NEP force (eV/A)')
legend(['x direction', 'y direction', 'z direction'])
tight_layout()
virial_test = loadtxt('virial_test.out')
plot(virial_test[:, 1], virial_test[:, 0], '.')
plot(linspace(-2,2), linspace(-2,2), '-')
xlabel('DFT virial (eV/atom)')
ylabel('NEP virial (eV/atom)')
tight_layout()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 4.1. Checking the loss.out file.
Step2: 4.2. Checking the energy_test.out file
Step3: 4.3. Checking the force_test.out file
Step4: 4.4. Checking the virial_test.out file
|
14,986 | <ASSISTANT_TASK:>
Python Code:
import math
import numpy as np
import pandas as pd
import re
from operator import itemgetter, attrgetter
def median(dataPoints):
"computer median of given data points"
if not dataPoints:
raise 'no datapoints passed'
sortedpoints=sorted(dataPoints)
mid=len(dataPoints)//2
#even
#print mid , sortedpoints
if len(dataPoints)%2==0:
return (sortedpoints[mid-1] + sortedpoints[mid])/2.0
else:
# odd
return sortedpoints[mid]
def range(dataPoints):
"compute range of given data points"
if not dataPoints:
raise 'no datapoints passed'
return max(dataPoints)-mean(dataPoints)
def quartiles(dataPoints):
"computer first and last quartile in the datalist"
if not dataPoints:
raise 'no datapoints passed'
sortedpoints=sorted(dataPoints)
mid=len(dataPoints)//2
#even
if(len(dataPoints)%2==0):
print sortedpoints[:mid]
lowerQ=median(sortedpoints[:mid])
upperQ=median(sortedpoints[mid:])
else:
lowerQ=median(sortedpoints[:mid])
upperQ=median(sortedpoints[mid+1:])
return lowerQ,upperQ
def summary(dataPoints):
"print stat summary of data"
if not dataPoints:
raise 'no datapoints passed'
print "Summary Statistics:"
print ("Min : " , min(dataPoints))
print ("First Quartile : ",quartiles(dataPoints)[0] )
print ("median : ", median(dataPoints))
print ("Second Quartile : ", quartiles(dataPoints)[1])
print ("max : ", max(dataPoints))
return ""
datapoints=[68, 83, 58, 84, 100, 64]
#quartiles(datapoints)
print summary(datapoints)
C=50
H=30
def f1(inputList):
answer= [math.sqrt((2*C*num*1.0)/H) for num in inputList]
return ','.join(str (int(round(num))) for num in answer)
string='100,150,180'
nums=[int(num ) for num in string.split(',')]
type(nums)
print f1(nums)
dimensions=[3,5]
rows=dimensions[0]
columns=dimensions[1]
array=np.zeros((rows,columns))
#print array
for row in range(rows):
for column in range(columns):
array[row][column]=row*column
print array
string='without,hello,bag,world'
wordList=string.split(',')
wordList.sort()
#print wordList
print ','.join(word for word in wordList)
def check_password(items):
values=[]
for string in items:
if len(string) < 6 and len(string)> 12:
continue
else :
pass
if not re.search('[a-z]',string):
continue
elif not re.search('[0-9]',string):
continue
elif not re.search('[A-Z]',string):
continue
elif not re.search('[$#@]',string):
continue
elif re.search('\s',string):
continue
else :pass
values.append(string)
return ','.join(pwd for pwd in values)
string='ABd1234@1,a F1#,2w3E*,2We3345 '
items=string.split(',')
print check_password(items)
string= 'Tom,19,80 John,20,90 Jony,17,91 Jony,17,93 Json,21,85'
items= [ tuple(item.split(',')) for item in string.split(' ')]
print sorted(items, key=itemgetter(0,1,2))
string='New to Python or choosing between Python 2 and Python 3? Read Python 2 or Python 3.'
freq={}
for word in string.split(' '):
freq[word]=freq.get(word,0)+1
words=freq.keys()
for item in sorted(words):
print "%s:%d" %(item,freq.get(item))
data = {'animal': ['cat', 'cat', 'snake', 'dog', 'dog', 'cat', 'snake', 'cat', 'dog', 'dog'],
'age': [2.5, 3, 0.5, np.nan, 5, 2, 4.5, np.nan, 7, 3],
'visits': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'priority': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']}
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
# Create a DataFrame df from this dictionary data which has the index labels.
df = pd.DataFrame(data,index=labels)
#display summary of the basic information
df.info()
df.describe()
# return first 3 , last 3 rows of dataframe
print df.head(3)
#df.iloc[:3]
print ' '
print df.iloc[-3:]
#print df.tail(3)
# Select just the 'animal' and 'age' columns from the DataFrame df.
df[['animal','age']]
#df.loc[:,['animal','age']]
#Select the data in rows [3, 4, 8] and in columns ['animal', 'age'].
df.loc[df.index[[3,4,8]], ['animal','age']]
# Select only the rows where the number of visits is greater than 3.
df[df['visits']>3]
# Select the rows where the age is missing, i.e. is NaN.
df[df['age'].isnull()]
#Select the rows where the animal is a cat and the age is less than 3.
df[ (df['animal']=='cat') & (df['age'] <3) ]
#Select the rows the age is between 2 and 4 (inclusive).
df[df['age'].between(2,4)]
#Change the age in row 'f' to 1.5
df.loc['f','age']=1.5
#Calculate the sum of all visits (the total number of visits).
df['visits'].sum()
#Calculate the mean age for each different animal in df.
df.groupby('animal')['age'].mean()
# Append a new row 'k' to df with your choice of values for each column. Then delete that row to return the original DataFrame.
df.loc['k'] = [5.5, 'dog', 'no', 2]
# and then deleting the new row...
df = df.drop('k')
# Count the number of each type of animal in df.
df['animal'].value_counts()
#Sort df first by the values in the 'age' in decending order, then by the value in the 'visit' column in ascending order.
df.sort_values(by=['age','visits'], ascending=[False,True])
# The 'priority' column contains the values 'yes' and 'no'.
#Replace this column with a column of boolean values: 'yes' should be True and 'no' should be False.
df['priority']=df['priority'].map({'yes': True, 'no':False})
# In the 'animal' column, change the 'snake' entries to 'python'.
df['animal']= df['animal'].replace({'snake': 'python'})
# For each animal type and each number of visits, find the mean age.
#In other words, each row is an animal, each column is a number of visits and the values are the mean ages
#(hint: use a pivot table).
df.pivot_table(index='animal', columns='visits', values='age' , aggfunc='mean')
# You have a DataFrame df with a column 'A' of integers. For example:
df = pd.DataFrame({'A': [1, 2, 2, 3, 4, 5, 5, 5, 6, 7, 7]})
#How do you filter out rows which contain the same integer as the row immediately above?
df.loc[df['A'].shift() != df['A']]
#Given a DataFrame of numeric values, say
df = pd.DataFrame(np.random.random(size=(5, 3))) # a 5x3 frame of float values
#how do you subtract the row mean from each element in the row?
#print df
# axis=1 means row wise , axis=0 means columnwise
df.sub(df.mean(axis=1), axis=0)
#Suppose you have DataFrame with 10 columns of real numbers, for example:
df = pd.DataFrame(np.random.random(size=(5, 10)), columns=list('abcdefghij'))
#Which column of numbers has the smallest sum? (Find that column's label.)
#print df.sum(axis=0)
df.sum(axis=0).idxmin()
# How do you count how many unique rows a DataFrame has (i.e. ignore all rows that are duplicates)?
len(df) - df.duplicated(keep=False).sum()
# better is
print len(df.duplicated(keep=False))
#You have a DataFrame that consists of 10 columns of floating--point numbers.
#Suppose that exactly 5 entries in each row are NaN values.
#For each row of the DataFrame, find the column which contains the third NaN value.
#(You should return a Series of column labels.)
(df.isnull().cumsum(axis=1)==3).idxmax(axis=1)
# A DataFrame has a column of groups 'grps' and and column of numbers 'vals'. For example:
df = pd.DataFrame({'grps': list('aaabbcaabcccbbc'),
'vals': [12,345,3,1,45,14,4,52,54,23,235,21,57,3,87]})
#For each group, find the sum of the three greatest values.
df.groupby('grps')['vals'].nlargest(3).sum(level=0)
#A DataFrame has two integer columns 'A' and 'B'. The values in 'A' are between 1 and 100 (inclusive).
#For each group of 10 consecutive integers in 'A' (i.e. (0, 10], (10, 20], ...),
#calculate the sum of the corresponding values in column 'B'.
# 1. Write a Python program to print the NumPy version in your system.
print (np.__version__)
#2. Write a Python program to count the number of characters (character frequency) in a string.
l = [12.23, 13.32, 100, 36.32]
print 'original list: ' , l
print 'numpy array : ', np.array(l)
#Create a 3x3 matrix with values ranging from 2 to 10.
np.arange(2,11).reshape(3,3)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python Statistics
Step2: Some simpler exercises based on common python function
Step3: Question
Step4: Question
Step5: ``
Step6: Question
Step7: Question
Step8: Panda based exercies
Step9: DataFrames
Step10: Numpy Exercises
|
14,987 | <ASSISTANT_TASK:>
Python Code:
import geopandas as gpd
from matplotlib_scalebar.scalebar import ScaleBar
nybb = gpd.read_file(gpd.datasets.get_path('nybb'))
nybb = nybb.to_crs(32619) # Convert the dataset to a coordinate
# system which uses meters
ax = nybb.plot()
ax.add_artist(ScaleBar(1))
from shapely.geometry.point import Point
points = gpd.GeoSeries([Point(-73.5, 40.5), Point(-74.5, 40.5)], crs=4326) # Geographic WGS 84 - degrees
points = points.to_crs(32619) # Projected WGS 84 - meters
distance_meters = points[0].distance(points[1])
nybb = gpd.read_file(gpd.datasets.get_path('nybb'))
nybb = nybb.to_crs(4326) # Using geographic WGS 84
ax = nybb.plot()
ax.add_artist(ScaleBar(distance_meters))
nybb = gpd.read_file(gpd.datasets.get_path('nybb'))
ax = nybb.plot()
ax.add_artist(ScaleBar(1, dimension="imperial-length", units="ft"))
nybb = gpd.read_file(gpd.datasets.get_path('nybb')).to_crs(32619)
ax = nybb.plot()
# Position and layout
scale1 = ScaleBar(
dx=1, label='Scale 1',
location='upper left', # in relation to the whole plot
label_loc='left', scale_loc='bottom' # in relation to the line
)
# Color
scale2 = ScaleBar(
dx=1, label='Scale 2', location='center',
color='#b32400', box_color='yellow',
box_alpha=0.8 # Slightly transparent box
)
# Font and text formatting
scale3 = ScaleBar(
dx=1, label='Scale 3',
font_properties={'family':'serif', 'size': 'large'}, # For more information, see the cell below
scale_formatter=lambda value, unit: f'> {value} {unit} <'
)
ax.add_artist(scale1)
ax.add_artist(scale2)
ax.add_artist(scale3)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating a ScaleBar object
Step2: Geographic coordinate system (degrees)
Step3: After the conversion, we can calculate the distance between the points. The result slightly differs from the Great Circle Calculator but the difference is insignificant (84,921 and 84,767 meters)
Step4: Finally, we are able to use geographic coordinate system in our plot. We set value of dx parameter to a distance we just calculated
Step5: Using other units
Step6: Customization of the scale bar
|
14,988 | <ASSISTANT_TASK:>
Python Code:
# @title Installation
!pip install dm-acme
!pip install dm-acme[reverb]
!pip install dm-acme[tf]
!pip install dm-sonnet
!pip install dopamine-rl==3.1.2
!pip install atari-py
!pip install dm_env
!git clone https://github.com/deepmind/deepmind-research.git
%cd deepmind-research
!git clone https://github.com/deepmind/bsuite.git
!pip install -q bsuite/
# @title Imports
import copy
import functools
from typing import Dict, Tuple
import acme
from acme.agents.tf import actors
from acme.agents.tf.dqn import learning as dqn
from acme.tf import utils as acme_utils
from acme.utils import loggers
import sonnet as snt
import tensorflow as tf
import numpy as np
import tree
import dm_env
import reverb
from acme.wrappers import base as wrapper_base
from acme.wrappers import single_precision
import bsuite
# @title Data Loading Utilities
def _parse_seq_tf_example(example, shapes, dtypes):
Parse tf.Example containing one or two episode steps.
def to_feature(shape, dtype):
if np.issubdtype(dtype, np.floating):
return tf.io.FixedLenSequenceFeature(
shape=shape, dtype=tf.float32, allow_missing=True)
elif dtype == np.bool or np.issubdtype(dtype, np.integer):
return tf.io.FixedLenSequenceFeature(
shape=shape, dtype=tf.int64, allow_missing=True)
else:
raise ValueError(f'Unsupported type {dtype} to '
f'convert from TF Example.')
feature_map = {}
for k, v in shapes.items():
feature_map[k] = to_feature(v, dtypes[k])
parsed = tf.io.parse_single_example(example, features=feature_map)
restructured = {}
for k, v in parsed.items():
dtype = tf.as_dtype(dtypes[k])
if v.dtype == dtype:
restructured[k] = parsed[k]
else:
restructured[k] = tf.cast(parsed[k], dtype)
return restructured
def _build_sars_example(sequences):
Convert raw sequences into a Reverb SARS' sample.
o_tm1 = tree.map_structure(lambda t: t[0], sequences['observation'])
o_t = tree.map_structure(lambda t: t[1], sequences['observation'])
a_tm1 = tree.map_structure(lambda t: t[0], sequences['action'])
r_t = tree.map_structure(lambda t: t[0], sequences['reward'])
p_t = tree.map_structure(
lambda d, st: d[0] * tf.cast(st[1] != dm_env.StepType.LAST, d.dtype),
sequences['discount'], sequences['step_type'])
info = reverb.SampleInfo(key=tf.constant(0, tf.uint64),
probability=tf.constant(1.0, tf.float64),
table_size=tf.constant(0, tf.int64),
priority=tf.constant(1.0, tf.float64))
return reverb.ReplaySample(info=info, data=(
o_tm1, a_tm1, r_t, p_t, o_t))
def bsuite_dataset_params(env):
Return shapes and dtypes parameters for bsuite offline dataset.
shapes = {
'observation': env.observation_spec().shape,
'action': env.action_spec().shape,
'discount': env.discount_spec().shape,
'reward': env.reward_spec().shape,
'episodic_reward': env.reward_spec().shape,
'step_type': (),
}
dtypes = {
'observation': env.observation_spec().dtype,
'action': env.action_spec().dtype,
'discount': env.discount_spec().dtype,
'reward': env.reward_spec().dtype,
'episodic_reward': env.reward_spec().dtype,
'step_type': np.int64,
}
return {'shapes': shapes, 'dtypes': dtypes}
def bsuite_dataset(path: str,
shapes: Dict[str, Tuple[int]],
dtypes: Dict[str, type], # pylint:disable=g-bare-generic
num_threads: int,
batch_size: int,
num_shards: int,
shuffle_buffer_size: int = 100000,
shuffle: bool = True) -> tf.data.Dataset:
Create tf dataset for training.
filenames = [f'{path}-{i:05d}-of-{num_shards:05d}' for i in range(
num_shards)]
file_ds = tf.data.Dataset.from_tensor_slices(filenames)
if shuffle:
file_ds = file_ds.repeat().shuffle(num_shards)
example_ds = file_ds.interleave(
functools.partial(tf.data.TFRecordDataset, compression_type='GZIP'),
cycle_length=tf.data.experimental.AUTOTUNE,
block_length=5)
if shuffle:
example_ds = example_ds.shuffle(shuffle_buffer_size)
def map_func(example):
example = _parse_seq_tf_example(example, shapes, dtypes)
return example
example_ds = example_ds.map(map_func, num_parallel_calls=num_threads)
if shuffle:
example_ds = example_ds.repeat().shuffle(batch_size * 10)
example_ds = example_ds.map(
_build_sars_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
example_ds = example_ds.batch(batch_size, drop_remainder=True)
example_ds = example_ds.prefetch(tf.data.experimental.AUTOTUNE)
return example_ds
def load_offline_bsuite_dataset(
bsuite_id: str,
path: str,
batch_size: int,
num_shards: int = 1,
num_threads: int = 1,
single_precision_wrapper: bool = True,
shuffle: bool = True) -> Tuple[tf.data.Dataset,
dm_env.Environment]:
Load bsuite offline dataset.
# Data file path format: {path}-?????-of-{num_shards:05d}
# The dataset is not deterministic and not repeated if shuffle = False.
environment = bsuite.load_from_id(bsuite_id)
if single_precision_wrapper:
environment = single_precision.SinglePrecisionWrapper(environment)
params = bsuite_dataset_params(environment)
dataset = bsuite_dataset(path=path,
num_threads=num_threads,
batch_size=batch_size,
num_shards=num_shards,
shuffle_buffer_size=2,
shuffle=shuffle,
**params)
return dataset, environment
tmp_path = 'gs://rl_unplugged/bsuite'
level = 'catch'
dir = '0_0.0'
filename = '0_full'
path = f'{tmp_path}/{level}/{dir}/{filename}'
batch_size = 2 #@param
bsuite_id = level + '/0'
dataset, environment = load_offline_bsuite_dataset(bsuite_id=bsuite_id,
path=path,
batch_size=batch_size)
dataset = dataset.prefetch(1)
# Get total number of actions.
num_actions = environment.action_spec().num_values
obs_spec = environment.observation_spec()
print(environment.observation_spec())
# Create the Q network.
network = snt.Sequential([
snt.flatten,
snt.nets.MLP([56, 56]),
snt.nets.MLP([num_actions])
])
acme_utils.create_variables(network, [environment.observation_spec()])
# Create a logger.
logger = loggers.TerminalLogger(label='learner', time_delta=1.)
# Create the DQN learner.
learner = dqn.DQNLearner(
network=network,
target_network=copy.deepcopy(network),
discount=0.99,
learning_rate=3e-4,
importance_sampling_exponent=0.2,
target_update_period=2500,
dataset=dataset,
logger=logger)
for _ in range(10000):
learner.step()
# Create a logger.
logger = loggers.TerminalLogger(label='evaluation', time_delta=1.)
# Create an environment loop.
policy_network = snt.Sequential([
network,
lambda q: tf.argmax(q, axis=-1),
])
loop = acme.EnvironmentLoop(
environment=environment,
actor=actors.DeprecatedFeedForwardActor(policy_network=policy_network),
logger=logger)
loop.run(400)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Copyright 2021 DeepMind Technologies Limited.
Step6: Dataset and environment
Step7: DQN learner
Step8: Training loop
Step9: Evaluation
|
14,989 | <ASSISTANT_TASK:>
Python Code:
!pip install -q --upgrade tensorflow-datasets
import pprint
import tensorflow_datasets as tfds
ratings = tfds.load("movielens/100k-ratings", split="train")
for x in ratings.take(1).as_numpy_iterator():
pprint.pprint(x)
import numpy as np
import tensorflow as tf
movie_title_lookup = tf.keras.layers.experimental.preprocessing.StringLookup()
movie_title_lookup.adapt(ratings.map(lambda x: x["movie_title"]))
print(f"Vocabulary: {movie_title_lookup.get_vocabulary()[:3]}")
movie_title_lookup(["Star Wars (1977)", "One Flew Over the Cuckoo's Nest (1975)"])
# We set up a large number of bins to reduce the chance of hash collisions.
num_hashing_bins = 200_000
movie_title_hashing = tf.keras.layers.experimental.preprocessing.Hashing(
num_bins=num_hashing_bins
)
movie_title_hashing(["Star Wars (1977)", "One Flew Over the Cuckoo's Nest (1975)"])
# Turns positive integers (indexes) into dense vectors of fixed size.
movie_title_embedding = # TODO: Your code goes here
# Let's use the explicit vocabulary lookup.
input_dim=movie_title_lookup.vocab_size(),
output_dim=32
)
movie_title_model = tf.keras.Sequential([movie_title_lookup, movie_title_embedding])
movie_title_model(["Star Wars (1977)"])
user_id_lookup = tf.keras.layers.experimental.preprocessing.StringLookup()
user_id_lookup.adapt(ratings.map(lambda x: x["user_id"]))
user_id_embedding = tf.keras.layers.Embedding(user_id_lookup.vocab_size(), 32)
user_id_model = tf.keras.Sequential([user_id_lookup, user_id_embedding])
for x in ratings.take(3).as_numpy_iterator():
print(f"Timestamp: {x['timestamp']}.")
# Feature-wise normalization of the data.
timestamp_normalization = # TODO: Your code goes here
timestamp_normalization.adapt(ratings.map(lambda x: x["timestamp"]).batch(1024))
for x in ratings.take(3).as_numpy_iterator():
print(f"Normalized timestamp: {timestamp_normalization(x['timestamp'])}.")
max_timestamp = ratings.map(lambda x: x["timestamp"]).reduce(
tf.cast(0, tf.int64), tf.maximum).numpy().max()
min_timestamp = ratings.map(lambda x: x["timestamp"]).reduce(
np.int64(1e9), tf.minimum).numpy().min()
timestamp_buckets = np.linspace(
min_timestamp, max_timestamp, num=1000)
print(f"Buckets: {timestamp_buckets[:3]}")
timestamp_embedding_model = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.Discretization(timestamp_buckets.tolist()),
tf.keras.layers.Embedding(len(timestamp_buckets) + 1, 32)
])
for timestamp in ratings.take(1).map(lambda x: x["timestamp"]).batch(1).as_numpy_iterator():
print(f"Timestamp embedding: {timestamp_embedding_model(timestamp)}.")
# Text vectorization layer.
title_text = # TODO: Your code goes here
title_text.adapt(ratings.map(lambda x: x["movie_title"]))
for row in ratings.batch(1).map(lambda x: x["movie_title"]).take(1):
print(title_text(row))
title_text.get_vocabulary()[40:45]
class UserModel(tf.keras.Model):
def __init__(self):
super().__init__()
self.user_embedding = tf.keras.Sequential([
user_id_lookup,
tf.keras.layers.Embedding(user_id_lookup.vocab_size(), 32),
])
self.timestamp_embedding = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.Discretization(timestamp_buckets.tolist()),
tf.keras.layers.Embedding(len(timestamp_buckets) + 2, 32)
])
self.normalized_timestamp = tf.keras.layers.experimental.preprocessing.Normalization()
def call(self, inputs):
# Take the input dictionary, pass it through each input layer,
# and concatenate the result.
return tf.concat([
self.user_embedding(inputs["user_id"]),
self.timestamp_embedding(inputs["timestamp"]),
self.normalized_timestamp(inputs["timestamp"])
], axis=1)
user_model = # TODO: Your code goes here
user_model.normalized_timestamp.adapt(
ratings.map(lambda x: x["timestamp"]).batch(128))
for row in ratings.batch(1).take(1):
print(f"Computed representations: {user_model(row)[0, :3]}")
class MovieModel(tf.keras.Model):
def __init__(self):
super().__init__()
max_tokens = 10_000
self.title_embedding = tf.keras.Sequential([
movie_title_lookup,
tf.keras.layers.Embedding(movie_title_lookup.vocab_size(), 32)
])
self.title_text_embedding = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.TextVectorization(max_tokens=max_tokens),
tf.keras.layers.Embedding(max_tokens, 32, mask_zero=True),
# We average the embedding of individual words to get one embedding vector
# per title.
tf.keras.layers.GlobalAveragePooling1D(),
])
def call(self, inputs):
return tf.concat([
self.title_embedding(inputs["movie_title"]),
self.title_text_embedding(inputs["movie_title"]),
], axis=1)
movie_model = # TODO: Your code goes here
movie_model.title_text_embedding.layers[0].adapt(
ratings.map(lambda x: x["movie_title"]))
for row in ratings.batch(1).take(1):
print(f"Computed representations: {movie_model(row)[0, :3]}")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Please re-run the above cell if you are getting any incompatible warnings and errors.
Step2: There are a couple of key features here
Step3: The layer itself does not have a vocabulary yet, but we can build it using our data.
Step4: Once we have this we can use the layer to translate raw tokens to embedding ids
Step5: Note that the layer's vocabulary includes one (or more!) unknown (or "out of vocabulary", OOV) tokens. This is really handy
Step6: We can do the lookup as before without the need to build vocabularies
Step7: Defining the embeddings
Step8: We can put the two together into a single layer which takes raw text in and yields embeddings.
Step9: Just like that, we can directly get the embeddings for our movie titles
Step10: We can do the same with user embeddings
Step11: Normalizing continuous features
Step12: We need to process it before we can use it. While there are many ways in which we can do this, discretization and standardization are two common ones.
Step13: Discretization
Step14: Given the bucket boundaries we can transform timestamps into embeddings
Step15: Processing text features
Step16: Let's try it out
Step17: Each title is translated into a sequence of tokens, one for each piece we've tokenized.
Step18: This looks correct
Step19: Let's try it out
Step20: Movie model
Step21: Let's try it out
|
14,990 | <ASSISTANT_TASK:>
Python Code:
import Quandl
import pandas as pd
import numpy as np
import blaze as bz
with open('../.quandl_api_key.txt', 'r') as f:
api_key = f.read()
db = Quandl.get("EOD/DB", authtoken=api_key)
bz.odo(db['Rate'].reset_index(), '../data/db.bcolz')
fx = Quandl.get("CURRFX/EURUSD", authtoken=api_key)
bz.odo(fx['Rate'].reset_index(), '../data/eurusd.bcolz')
bz.odo('../data/db.bcolz', 'sqlite:///osqf.db::db')
%load_ext sql
%%sql sqlite:///osqf.db
select * from db
d = bz.Data('../data/db.bcolz')
d.Close.max()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data source is http
Step2: Can also migrate it to a sqlite database
Step3: Can perform queries
|
14,991 | <ASSISTANT_TASK:>
Python Code:
df = sqlContext.read.json('/home/anaconda/md0/data/2016_potus/users_all')
df.registerTempTable('followers')
%matplotlib inline
import seaborn as sns
import matplotlib
import warnings
query =
select
candidate, count(*) as new_followers
from followers
group by candidate
dfp = sqlContext.sql(query).toPandas()
sns.barplot(x="candidate", y="new_followers", data=dfp, palette="Paired")
import numpy as np
query =
select
candidate as candidate,
created_at_age as age
from followers
dfp = sqlContext.sql(query).toPandas()
sns.set(style="ticks")
ax = sns.boxplot(x="candidate", y="age", data=dfp, palette="RdBu", whis=np.inf)
ax.set_yscale("log")
query =
select
candidate,
percentile(statuses_count, 0.50) as p50_status,
percentile(statuses_count, 0.99) as p99_status,
percentile(followers_count, 0.50) as p50_follow,
percentile(followers_count, 0.99) as p99_follow,
percentile(friends_count, 0.50) as p50_friend,
percentile(friends_count, 0.99) as p99_friend,
count(*) as num
from followers
where created_at_age < 120
group by candidate
sqlContext.sql(query).show()
query =
select
candidate,
percentile(statuses_count, 0.50) as p50_status,
percentile(statuses_count, 0.99) as p99_status,
percentile(followers_count, 0.50) as p50_follow,
percentile(followers_count, 0.99) as p99_follow,
percentile(friends_count, 0.50) as p50_friend,
percentile(friends_count, 0.99) as p99_friend,
count(*) as num
from followers
where created_at_age > 60*60*24
group by candidate
sqlContext.sql(query).show()
query =
select
friends_count/created_at_age as friends_per_second,
floor(created_at_age/60) as minutes_on_twitter
from followers
where created_at_age < 1200
dfp = sqlContext.sql(query).toPandas()
sns.set(style="ticks")
ax = sns.boxplot(x="minutes_on_twitter", y="friends_per_second", data=dfp, palette="Paired", whis=np.inf)
ax.set_ylim(0,1)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Number of New Followers
Step4: At First Glance...
Step8: Statuses -vs- Followers -vs- Friends
|
14,992 | <ASSISTANT_TASK:>
Python Code:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from metpy.calc import reduce_point_density
from metpy.calc import wind_components
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, current_weather, sky_cover, StationPlot, wx_code_map
from metpy.units import units
with get_test_data('station_data.txt') as f:
data = pd.read_csv(f, header=0, usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19),
names=['stid', 'lat', 'lon', 'slp', 'air_temperature', 'cloud_fraction',
'dew_point_temperature', 'weather', 'wind_dir', 'wind_speed'],
na_values=-99999)
# Drop rows with missing winds
data = data.dropna(how='any', subset=['wind_dir', 'wind_speed'])
# Set up the map projection
proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=35,
standard_parallels=[35])
# Use the cartopy map projection to transform station locations to the map and
# then refine the number of stations plotted by setting a 300km radius
point_locs = proj.transform_points(ccrs.PlateCarree(), data['lon'].values, data['lat'].values)
data = data[reduce_point_density(point_locs, 300000.)]
# Get the wind components, converting from m/s to knots as will be appropriate
# for the station plot.
u, v = wind_components((data['wind_speed'].values * units('m/s')).to('knots'),
data['wind_dir'].values * units.degree)
# Convert the fraction value into a code of 0-8 and compensate for NaN values,
# which can be used to pull out the appropriate symbol
cloud_frac = (8 * data['cloud_fraction'])
cloud_frac[np.isnan(cloud_frac)] = 10
cloud_frac = cloud_frac.astype(int)
# Map weather strings to WMO codes, which we can use to convert to symbols
# Only use the first symbol if there are multiple
wx = [wx_code_map[s.split()[0] if ' ' in s else s] for s in data['weather'].fillna('')]
# Change the DPI of the resulting figure. Higher DPI drastically improves the
# look of the text rendering.
plt.rcParams['savefig.dpi'] = 255
# Create the figure and an axes set to the projection.
fig = plt.figure(figsize=(20, 10))
add_metpy_logo(fig, 1080, 290, size='large')
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Add some various map elements to the plot to make it recognizable.
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.LAKES)
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.STATES)
ax.add_feature(cfeature.BORDERS)
# Set plot bounds
ax.set_extent((-118, -73, 23, 50))
#
# Here's the actual station plot
#
# Start the station plot by specifying the axes to draw on, as well as the
# lon/lat of the stations (with transform). We also the fontsize to 12 pt.
stationplot = StationPlot(ax, data['lon'].values, data['lat'].values, clip_on=True,
transform=ccrs.PlateCarree(), fontsize=12)
# Plot the temperature and dew point to the upper and lower left, respectively, of
# the center point. Each one uses a different color.
stationplot.plot_parameter('NW', data['air_temperature'], color='red')
stationplot.plot_parameter('SW', data['dew_point_temperature'],
color='darkgreen')
# A more complex example uses a custom formatter to control how the sea-level pressure
# values are plotted. This uses the standard trailing 3-digits of the pressure value
# in tenths of millibars.
stationplot.plot_parameter('NE', data['slp'], formatter=lambda v: format(10 * v, '.0f')[-3:])
# Plot the cloud cover symbols in the center location. This uses the codes made above and
# uses the `sky_cover` mapper to convert these values to font codes for the
# weather symbol font.
stationplot.plot_symbol('C', cloud_frac, sky_cover)
# Same this time, but plot current weather to the left of center, using the
# `current_weather` mapper to convert symbols to the right glyphs.
stationplot.plot_symbol('W', wx, current_weather)
# Add wind barbs
stationplot.plot_barb(u, v)
# Also plot the actual text of the station id. Instead of cardinal directions,
# plot further out by specifying a location of 2 increments in x and 0 in y.
stationplot.plot_text((2, 0), data['stid'])
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The setup
Step2: This sample data has way too many stations to plot all of them. The number
Step3: Now that we have the data we want, we need to perform some conversions
Step4: The payoff
|
14,993 | <ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_breast_cancer, load_digits
from sklearn.model_selection import train_test_split
cancer = load_breast_cancer()
# get deterministic random numbers
rng = np.random.RandomState(42)
noise = rng.normal(size=(len(cancer.data), 50))
# add noise features to the data
# the first 30 features are from the dataset, the next 50 are noise
X_w_noise = np.hstack([cancer.data, noise])
X_train, X_test, y_train, y_test = train_test_split(X_w_noise, cancer.target,
random_state=0, test_size=.5)
from sklearn.feature_selection import SelectPercentile
# use f_classif (the default) and SelectPercentile to select 50% of features:
select = SelectPercentile(percentile=50)
select.fit(X_train, y_train)
# transform training set:
X_train_selected = select.transform(X_train)
print(X_train.shape)
print(X_train_selected.shape)
from sklearn.feature_selection import f_classif, f_regression, chi2
F, p = f_classif(X_train, y_train)
plt.figure()
plt.plot(p, 'o')
mask = select.get_support()
print(mask)
# visualize the mask. black is True, white is False
plt.matshow(mask.reshape(1, -1), cmap='gray_r')
from sklearn.linear_model import LogisticRegression
# transform test data:
X_test_selected = select.transform(X_test)
lr = LogisticRegression()
lr.fit(X_train, y_train)
print("Score with all features: %f" % lr.score(X_test, y_test))
lr.fit(X_train_selected, y_train)
print("Score with only selected features: %f" % lr.score(X_test_selected, y_test))
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
select = SelectFromModel(RandomForestClassifier(n_estimators=100, random_state=42), threshold="median")
select.fit(X_train, y_train)
X_train_rf = select.transform(X_train)
print(X_train.shape)
print(X_train_rf.shape)
mask = select.get_support()
# visualize the mask. black is True, white is False
plt.matshow(mask.reshape(1, -1), cmap='gray_r')
X_test_rf = select.transform(X_test)
LogisticRegression().fit(X_train_rf, y_train).score(X_test_rf, y_test)
from sklearn.feature_selection import RFE
select = RFE(RandomForestClassifier(n_estimators=100, random_state=42), n_features_to_select=40)
select.fit(X_train, y_train)
# visualize the selected features:
mask = select.get_support()
plt.matshow(mask.reshape(1, -1), cmap='gray_r')
X_train_rfe = select.transform(X_train)
X_test_rfe = select.transform(X_test)
LogisticRegression().fit(X_train_rfe, y_train).score(X_test_rfe, y_test)
select.score(X_test, y_test)
import numpy as np
rng = np.random.RandomState(1)
# Generate 400 random integers in the range [0, 1]
X = rng.randint(0, 2, (200, 2))
y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0) # XOR creation
plt.scatter(X[:, 0], X[:, 1], c=plt.cm.spectral(y.astype(float)))
# %load solutions/19_univariate_vs_mb_selection.py
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We have to define a threshold on the p-value of the statistical test to decide how many features to keep. There are several strategies implemented in scikit-learn, a straight-forward one being SelectPercentile, which selects a percentile of the original features (we select 50% below)
Step2: We can also use the test statistic directly to see how relevant each feature is. As the breast cancer dataset is a classification task, we use f_classif, the F-test for classification. Below we plot the p-values associated with each of the 80 features (30 original features + 50 noise features). Low p-values indicate informative features.
Step3: Clearly most of the first 30 features have very small p-values.
Step4: Nearly all of the original 30 features were recovered.
Step5: Model-based Feature Selection
Step6: This method builds a single model (in this case a random forest) and uses the feature importances from this model.
Step7: <div class="alert alert-success">
|
14,994 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import sys
sys.path.append("/Users/rfinn/Dropbox/pythonCode/")
sys.path.append("/anaconda/lib/python2.7/site-packages")
sys.path.append("/Users/rfinn/Ureka/variants/common/lib/python2.7/site-packages")
from astropy.io import fits
#infile='/Users/rfinn/research/LocalClusters/NSAmastertables/LCS_all_size.fits'
#s=fits.getdata(infile)
#flag=s['matchflag']
%run ~/Dropbox/pythonCode/LCSanalyzeblue.py
plt.figure()
plt.plot(s.s.SERSIC_TH50[s.gim2dflag]*s.DA[s.gim2dflag],s.s.Rhlr_1[s.gim2dflag],'ko')
#plt.plot(s.s.SERSIC_TH50[s.gim2dflag]*s.DA[s.gim2dflag],s.s.Rd[s.gim2dflag],'bo')
xl=np.linspace(0,20,2)
plt.plot(xl,xl,'r-')
plt.xlabel('NSA SERSIC_TH50*DA')
plt.ylabel('GIM2D half light radius')
plt.figure()
plt.plot(s.s.Rhlr_1[s.gim2dflag],s.s.Rd[s.gim2dflag],'bo',label='Disk')
plt.plot(s.s.Rhlr_1[s.gim2dflag],s.s.Re[s.gim2dflag],'ro',label='Bulge')
xl=np.linspace(0,20,2)
plt.plot(xl,xl,'k--',label='1:1')
plt.ylabel('GIM2D half light radius')
plt.xlabel('GIM2D galaxy half light radius')
plt.legend(numpoints=1,loc='upper left')
plt.figure()
bins=np.arange(0,2.5,.1)
plt.hist(s.SIZE_RATIO_gim2d[s.sampleflag],histtype='step',hatch='///',color='r',bins=bins,label='GIM2D')
plt.hist(s.SIZE_RATIO_DISK[s.sampleflag],histtype='step',color='b',hatch='o',bins=bins,label='GIM2D Disk Only')
plt.hist(s.s.SIZE_RATIO[s.sampleflag],histtype='step',color='k',hatch='\\\\',bins=bins,label='NSA')
plt.legend(loc='upper right')
plt.xlabel('Normalized 24um Size')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting 1/2 light radius from GIM2D vs NSA
Step2: Conclusion two measures of radius are comparable, expect for the NSA galaxies with very large radii. I think I cut these out of the sample. If I used GIM2D fits, I could include them! Right now I have
Step3: CONCLUSION Both bulge and disk 1/2 light radii are less than 1/2 light radius for entire galaxy. This surprises me. I would think that the disk is more extended and flatter, so the disk should have a larger half-light radius. What am I missing?
|
14,995 | <ASSISTANT_TASK:>
Python Code:
# Import some libraries that will be necessary for working with data and displaying plots
# To visualize plots in the notebook
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.io # To read matlab files
import pylab
X = np.array([0.15, 0.41, 0.53, 0.80, 0.89, 0.92, 0.95])
s = np.array([0.09, 0.16, 0.63, 0.44, 0.55, 0.82, 0.95])
sigma_eps = 0.3
# <SOL>
plt.figure()
plt.scatter(X, s)
plt.xlabel('x')
plt.ylabel('s')
plt.show()
# </SOL>
# Note that, to use lstsq, the input matrix must be K x 1
Xcol = X[:,np.newaxis]
# Compute the ML estimate using linalg.lstsq from Numpy.
wML = np.linalg.lstsq(Xcol, s)[0]
K = len(s)
wGrid = np.arange(-0.5, 2, 0.01)
p = []
for w in wGrid:
d = s - X*w
# p.append(<FILL IN>)
p.append((1.0/(np.sqrt(2*np.pi)*sigma_eps))**K * np.exp(-np.dot(d, d)))
# Compute the likelihood for the ML parameter wML
# d = <FILL IN>
d = s-X*wML
# pML = [<FILL IN>]
pML = [(1.0/(np.sqrt(2*np.pi)*sigma_eps))**K * np.exp(-np.dot(d, d))]
# Plot the likelihood function and the optimal value
plt.figure()
plt.plot(wGrid, p)
plt.stem(wML, pML)
plt.xlabel('$w$')
plt.ylabel('Likelihood function')
plt.show()
x = np.arange(0, 1.2, 0.01)
# sML = <FILL IN>
sML = wML * x
plt.figure()
plt.scatter(X, s)
# plt.plot(<FILL IN>)
plt.plot(x, sML)
plt.xlabel('x')
plt.ylabel('s')
plt.axis('tight')
plt.show()
K = len(s)
wGrid = np.arange(0, 6, 0.01)
p = []
Px = np.prod(X)
xs = np.dot(X,s)
for w in wGrid:
# p.append(<FILL IN>)
p.append((w**K)*Px*np.exp(-w*xs))
plt.figure()
# plt.plot(<FILL IN>)
plt.plot(wGrid, p)
plt.xlabel('$w$')
plt.ylabel('Likelihood function')
plt.show()
# wML = <FILL IN>
wML = np.float(K) /xs
print(wML)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Model-based parametric regression
Step2: have been generated by a linear Gaussian model (i.e., with $z = T(x) = x$) with noise variance
Step3: 1.1. Represent a scatter plot of the data points
Step4: 1.2. Compute the ML estimate.
Step5: 1.3. Plot the likelihood as a function of parameter $w$ along the interval $-0.5\le w \le 2$, verifying that the ML estimate takes the maximum value.
Step6: 1.4. Plot the prediction function over the data scatter plot
Step7: Exercise 2
Step8: 2.3. Determine the coefficient $w_\text{ML}$ of the linear prediction function (i.e., using ${\bf Z}={\bf X}$).
|
14,996 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import numpy as np
# Create list of all typeIDs available in the FITS database
all_type_URL = 'https://fits.geonet.org.nz/type'
all_types = pd.read_json(all_type_URL).iloc[:,0]
all_typeIDs= []
for row in all_types:
all_typeIDs.append(row['typeID'])
# Specify site(s) to get data for
sites = ['RU001', 'WI222']
# Ensure list format to sites
if type(sites) != list:
site = sites
sites = []
sites.append(site)
# Prepare data lists
site_data = [[] for j in range(len(sites))]
site_data_types = [[] for j in range(len(sites))]
# Load data from FITS database and parse into data lists
for j in range(len(sites)):
for i in range(len(all_typeIDs)):
# Build query for site, typeID combination
query_suffix = 'siteID=%s&typeID=%s' % (sites[j], all_typeIDs[i])
URL = 'https://fits.geonet.org.nz/observation?' + query_suffix
# Try to load data of the given typeID for the site, if it fails then the data doesn't exist
try:
data = pd.read_csv(URL, names=['date-time', all_typeIDs[i], 'error'], header=0, parse_dates=[0], index_col=0)
if len(data.values) > 1:
site_data[j].append(data)
site_data_types[j].append(all_typeIDs[i])
except:
pass
# Return information to the operator
for i in range(len(site_data_types)):
print('Data types available for ' + sites[i] + ':\n')
for j in range(len(site_data_types[i])):
print(site_data_types[i][j])
print('\n')
plot_data_types = ['t', 'ph', 'NH3-w']
# Determine number and arrangement of subplots (max 9, less for greater legibility)
subplot_number = len(plot_data_types)
if subplot_number / 3 > 1: # if there are more than 3 subplots
rows = '3'
if subplot_number / 6 > 1: # if there are more than 6 subplots
cols = '3'
else:
cols = '2'
else:
rows = str(subplot_number)
cols = '1'
ax = [[] for i in range(len(plot_data_types))]
# Plot data
plt.figure(figsize = (10,8))
for i in range(len(site_data)): # i is site index
for j in range(len(plot_data_types)): # j is data type index
k = site_data_types[i].index(plot_data_types[j]) # match data type chosen to position in data list
if i == 0:
ax[j] = plt.subplot(int(rows + cols + str(j + 1)))
if ((i == 0) and (j == 0)):
# Set initial min/max times
minmintime = min(site_data[i][k].index.values)
maxmaxtime = max(site_data[i][k].index.values)
# Do not plot empty DataFrames (and avoid cluttering the figure legend)
if len(site_data[i][k].values) < 1:
continue
try:
ax[j].plot(site_data[i][k].loc[:, plot_data_types[j]], label = sites[i],
marker='o', linestyle=':', markersize = 1)
except:
continue
# Get min, max times of dataset
mintime = min(site_data[i][k].index.values)
maxtime = max(site_data[i][k].index.values)
# Set y label
ax[j].set_ylabel(plot_data_types[j], rotation = 90, labelpad = 5, fontsize = 12)
if ((i == 1) and (j == 0)):
# Set legend
plot, labels = ax[j].get_legend_handles_labels()
# ^ due to repetitive nature of plotting, only need to do this once
ax[j].legend(plot, labels, fontsize = 12, bbox_to_anchor=(-0.2, 1.3))
# Note: the legend may extend off the figure if there are many sites
# Set title
plot_data_typesstr = ''
for k in range(len(plot_data_types)): plot_data_typesstr += plot_data_types[k] + ', '
plot_data_typesstr = plot_data_typesstr[:-2]
ax[j].set_title('All site data for plot data types : ' + plot_data_typesstr, loc = 'left', y = 1.03,
fontsize = 16)
# Get min, max times of all data
minmintime = min(mintime, minmintime)
maxmaxtime = max(maxtime, maxmaxtime)
# Add x label
plt.xlabel('Time', rotation = 0, labelpad = 5, fontsize = 12)
# Tidy up plot extent and x-axis
for j in range(len(plot_data_types)):
ax[j].set_xlim([minmintime, maxmaxtime])
ax[j].set_xticks(np.arange(minmintime, maxmaxtime + 1000, (maxmaxtime - minmintime) / 3))
plt.show()
# Optionally, save the figure to the current working directory
import os
plt.savefig(os.getcwd() + '/test.png', format = 'png')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we will specify the site(s) we want to query for available data types.
Step2: The next code segment will query the FITS database for data of all types at the given site(s). The output will be a list of data types available at the site(s) specified.
Step3: While a list of available data types is useful, we need a fast way to view data of a given type and decide if it's what we want. The next code segment deals with plotting some data for each site. As there is a limit to how much data can be displayed on a plot, we will specify which data types we want to display for the site(s). Up to 9 data types can be specified.
|
14,997 | <ASSISTANT_TASK:>
Python Code:
%run matt_startup
%run -i matt_utils
button_qtconsole()
#import other needed modules in all used engines
#with dview.sync_imports():
# import os
filename = 'FIWT_Exp015_20150601145005.dat.npz'
def loadData():
# Read and parse raw data
global exp_data
exp_data = np.load(filename)
# Select colums
global T_cmp, da_cmp
T_cmp = exp_data['data33'][:,0]
da_cmp = np.average(exp_data['data33'][:,3:11:2], axis=1)
global T_rig, phi_rig
T_rig = exp_data['data44'][:,0]
phi_rig = exp_data['data44'][:,2]
loadData()
text_loadData()
def checkInputOutputData():
#check inputs/outputs
fig, ax = plt.subplots(2,1,True)
ax[0].plot(T_cmp,da_cmp,'r',picker=1)
ax[1].plot(T_rig,phi_rig, 'b', picker=2)
ax[0].set_ylabel('$\delta \/ / \/ ^o$')
ax[1].set_ylabel('$\phi \/ / \/ ^o/s$')
ax[1].set_xlabel('$T \/ / \/ s$', picker=True)
ax[0].set_title('Output', picker=True)
fig.canvas.mpl_connect('pick_event', onPickTime)
fig.show()
display(fig)
button_CheckData()
# Pick up focused time ranges
time_marks = [[1501.28, 1505.50, "doublet u1"],
[1507.40, 1511.80, "doublet u2"],
[1513.55, 1517.87, "doublet u3"],
[1519.70, 1523.50, "doublet u4"],
[1537.60, 1541.64, "doublet d1"],
[1543.76, 1547.90, "doublet d2"],
[1549.91, 1554.20, "doublet d3"],
[1555.86, 1560.00, "doublet d4"],
[1609.30, 1615.49, "3-2-1-1 u1"],
[1617.89, 1624.25, "3-2-1-1 u2"],
[1626.49, 1633.45, "3-2-1-1 u3"],
[1634.99, 1642.38, "3-2-1-1 u4"],
[1651.40, 1657.50, "3-2-1-1 d1"],
[1659.90, 1666.68, "3-2-1-1 d2"],
[1668.50, 1674.69, "3-2-1-1 d3"],
[1677.00, 1683.88, "3-2-1-1 d4"],
[1748.59, 1809.05, "linear sweep u1"],
[1825.89, 1885.96, "linear sweep d1"],
[1905.86, 1965.17, "exp sweep u1"],
]
# Decide DT,U,Z and their processing method
DT=0.01
process_set = {
'U':[(T_cmp, da_cmp,0),],
'Z':[(T_rig, phi_rig,3),],
'cutoff_freq': 10 #Hz
}
U_names = ['$\delta_{a,cmp} \, / \, ^o$',]
Y_names = Z_names = ['$\phi_{a,rig} \, / \, ^o$',
'$\dot{\phi}_{a,rig} \, / \, ^o/s$',]
display_data_prepare()
resample(True);
%%px --local
#update common const parameters in all engines
#problem size
Nx = 2
Nu = 1
Ny = 2
Npar = 9
#reference
S_c = 0.1254 #S_c(m2)
b_c = 0.7 #b_c(m)
g = 9.81 #g(m/s2)
V = 30 #V(m/s)
#other parameters
v_th = 0.5/57.3 #v_th(rad/s)
v_th2 = 0.5/57.3 #v_th(rad/s)
#for short
qbarSb = 0.5*1.225*V*V*S_c*b_c
b2v = b_c/(2*V)
def x0(Z,T,U,params):
return Z[0,:]/57.3
def xdot(X,t,U,params):
Cla_cmp = params[0]
Clp_cmp = params[1]
Ixx = params[2]
F_c = params[3]
f = params[4]
m_T = params[5]
l_z_T = params[6]
kBrk = params[7]
phi0 = params[8]
phi = X[0]
phi_dot = X[1]
idx = int(t/DT)
da_cmp = U[idx,0]*0.01745329
moments = -(m_T*l_z_T)*g*math.sin(phi-phi0)
abs_phi_dot = abs(phi_dot)
F = f*phi_dot
if abs_phi_dot > v_th+v_th2:
F += math.copysign(F_c, phi_dot)
elif abs_phi_dot > v_th:
F += math.copysign(F_c*(kBrk-(kBrk-1)*(abs_phi_dot-v_th)/v_th2), phi_dot)
else:
F += phi_dot/v_th*(F_c*kBrk)
moments -= F
moments += qbarSb*(Cla_cmp*da_cmp + Clp_cmp*phi_dot*b2v)
phi_dot2 = moments/Ixx
return [phi_dot, phi_dot2]
def obs(X,T,U,params):
return X*57.3
display(HTML('<b>Constant Parameters</b>'))
table = ListTable()
table.append(['Name','Value','unit'])
table.append(['$S_c$',S_c,r'$m^2$'])
table.append(['$b_c$',b_c,'$m$'])
table.append(['$g$',g,r'$ms^{-2}$'])
table.append(['$V$',V,r'$ms^{-1}$'])
display(table)
#initial guess
param0 = [
-0.3, #Cla_cmp(1/rad)
-0.5, #Clp_cmp
0.199141909329, #Ixx(kg*m2)
0.0580817418532, #F_c(N*m)
0.0407466009837, #f(N*m/(rad/s))
7.5588, #m_T(kg)
0.0444, #l_z_T(m)
1.01, #kBrk
0, #phi0(rad)
]
param_name = ['$Cla_{cmp}$',
'$Clp_{cmp}$',
'$I_{xx,rig}$',
'$F_c$',
'$f$',
'$m_T$',
'$l_{zT}$',
'$k_{Brk}$',
'$phi_0$']
param_unit = ['$rad^{-1}$',
'$1$',
'$kg\,m^2$',
'$Nm$',
r'$\frac{Nm}{rad/s}$',
'kg',
'm',
'1',
'$rad$']
NparID = 4
opt_idx = [0,1,2,8]
opt_param0 = [param0[i] for i in opt_idx]
par_del = [0.3*1e-3, 0.3*1e-3, 0.2*1e-3, 0.0174]
bounds = [(-1,-1e-6),(-2,-1e-6), (1e-6,0.5), (-0.1,0.1)]
'''
NparID = 3
opt_idx = [0,1,7]
opt_param0 = [param0[i] for i in opt_idx]
par_del = [0.3*1e-3, 0.3*1e-3, 0.0174]
bounds = [(-1,-1e-6),(-2,-1e-6), (-0.1,0.1)]
'''
display_default_params()
#select sections for training
section_idx = range(8)
display_data_for_train()
#push parameters to engines
push_opt_param()
# select 2 section from training data
idx = random.sample(section_idx, 2)
interact_guess();
display_preopt_params()
if True:
InfoMat = None
method = 'trust-ncg'
def hessian(opt_params, index):
global InfoMat
return InfoMat
dview['enable_infomat']=True
options={'gtol':1}
opt_bounds = None
else:
method = 'L-BFGS-B'
hessian = None
dview['enable_infomat']=False
options={'ftol':1e-2,'maxfun':10}
opt_bounds = bounds
cnt = 0
tmp_rslt = None
T0 = time.time()
print('#cnt, Time, |R|')
%time res = sp.optimize.minimize(fun=costfunc, x0=opt_param0, \
args=(opt_idx,), method=method, jac=True, hess=hessian, \
bounds=opt_bounds, options=options)
display_opt_params()
# show result
idx = random.sample(range(8), 2) \
+ random.sample(range(8,16), 2) \
+ random.sample(range(16,19), 2)
display_data_for_test();
update_guess();
toggle_inputs()
button_qtconsole()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data preparation
Step2: Check time sequence and inputs/outputs
Step3: Input $\delta_T$ and focused time ranges
Step4: Resample and filter data in sections
Step5: Define dynamic model to be estimated
Step6: Initial guess
Step7: Optimize using ML
Step8: Show and test results
|
14,998 | <ASSISTANT_TASK:>
Python Code:
import torch
is_cuda = True if torch.cuda.is_available() else False
print(is_cuda)
id = 1
torch.cuda.set_device(id)
print( torch.cuda.current_device() )
from __future__ import unicode_literals, print_function, division
from io import open
import glob
import os
def findFiles(path): return glob.glob(path)
print(findFiles('data/names/*.txt'))
import unicodedata
import string
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
print(unicodeToAscii('Ślusàrski'))
# Build the category_lines dictionary, a list of names per language
all_categories = []
category_lines = {}
# Split it into training set and validation set
training_lines = {}
validation_lines = {}
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
for filename in findFiles('data/names/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
num_of_training_set = int(len(lines)*0.8)
training_lines[category] = lines[:num_of_training_set]
validation_lines[category] = lines[num_of_training_set:]
n_categories = len(all_categories)
print(n_categories)
print(category_lines['Italian'][:5])
import torch
# Find letter index from all_letters, e.g. "a" = 0
def letterToIndex(letter):
return all_letters.find(letter)
# Just for demonstration, turn a letter into a <1 x n_letters> Tensor
def letterToTensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
if is_cuda:
tensor = tensor.cuda()
return tensor
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
def lineToTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
if is_cuda:
tensor = tensor.cuda()
return tensor
# Tensor here, someone else may call it vector.
print(letterToTensor('J'))
print(lineToTensor('Jones').size())
import torch.nn as nn
class BaseRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(BaseRNN, self).__init__()
self.hidden_size = hidden_size
# input to hidden
self.i2h = nn.Linear(input_size, hidden_size)
# hidden to hidden
self.h2h = nn.Linear(hidden_size, hidden_size)
#############################################
#
# Change your activation function here
#
#############################################
# self.activation = nn.Tanh()
self.activation = nn.ReLU()
# hidden to output
self.h2o = nn.Linear(hidden_size, output_size)
def step(self, letter, hidden):
i2h = self.i2h(letter)
h2h = self.h2h(hidden)
hidden = self.activation( h2h+i2h )
output = self.h2o(hidden)
return output, hidden
def forward(self, word):
hidden = self.initHidden(is_cuda)
for i in range(word.size()[0]):
# Only the last output will be used to predict
output, hidden = self.step(word[i], hidden)
return output
def initHidden(self, is_cuda=True):
if is_cuda:
return torch.zeros(1, self.hidden_size).cuda()
else:
return torch.zeros(1, self.hidden_size)
n_hidden = 128
rnn = BaseRNN(n_letters, n_hidden, n_categories)
if is_cuda:
rnn = rnn.cuda()
###############################################################################
#
# Finish a the following model, and train it
#
###############################################################################
class DeeperRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(DeeperRNN, self).__init__()
self.hidden1_size = hidden_size
self.hidden2_size = hidden_size
self.layer1 = BaseRNN(input_size, hidden_size, hidden_size)
self.layer2 = BaseRNN(hidden_size, hidden_size, output_size)
def step(self, letter, hidden1, hidden2):
############################################################
# complete this function
############################################################
output1, hidden1 = self.layer1.step(letter, hidden1)
output2, hidden2 = self.layer2.step(output1, hidden2)
return output2, hidden1, hidden2
def forward(self, word):
############################################################
# complete this function
############################################################
hidden1, hidden2 = self.initHidden(is_cuda)
for i in range(word.size()[0]):
output, hidden1, hidden2 = self.step(word[i], hidden1, hidden2)
return output
def initHidden(self, is_cuda=True):
if is_cuda:
return torch.zeros(1, self.hidden1_size).cuda(), torch.zeros(1, self.hidden2_size).cuda()
else:
return torch.zeros(1, self.hidden1_size), torch.zeros(1, self.hidden2_size)
###############################################################################
# Remember to uncomment the following two line after you finish your model and
# start to retrain your model
###############################################################################
n_hidden = 128
rnn = DeeperRNN(n_letters, n_hidden, n_categories)
rnn = rnn.cuda() if is_cuda else rnn
input = letterToTensor('A')
hidden =torch.zeros(1, n_hidden)
hidden = hidden.cuda() if is_cuda else hidden
output, next_hidden = rnn.step(input, hidden)
print(output.shape)
print(next_hidden.shape)
input = lineToTensor('Albert')
print(n_hidden)
output = rnn(input)
print(output)
print(output.shape)
def categoryFromOutput(output):
top_n, top_i = output.topk(1)
category_i = top_i[0].item()
return all_categories[category_i], category_i
print(categoryFromOutput(output))
import random
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingExample():
category = randomChoice(all_categories)
# attention: split training set
line = randomChoice(training_lines[category])
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)
category_tensor = category_tensor.cuda() if is_cuda else category_tensor
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor
def randomValidationExample():
category = randomChoice(all_categories)
# attention: split validation set
line = randomChoice(validation_lines[category])
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)
category_tensor = category_tensor.cuda() if is_cuda else category_tensor
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor
for i in range(10):
category, line, category_tensor, line_tensor = randomTrainingExample()
print('category =', category, '/ line =', line)
criterion = nn.CrossEntropyLoss()
learning_rate = 0.005 # If you set this too high, it might explode. If too low, it might not learn
def train(category_tensor, line_tensor):
output = rnn(line_tensor)
rnn.zero_grad()
loss = criterion(output, category_tensor)
loss.backward()
# Add parameters' gradients to their values, multiplied by learning rate
for p in rnn.parameters():
if hasattr(p.grad, "data"):
p.data.add_(-learning_rate, p.grad.data)
return output, loss.item()
import time
import math
n_iters = 100000
print_every = 5000
plot_every = 1000
# Keep track of losses for plotting
current_loss = 0
all_losses = []
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
start = time.time()
for iter in range(1, n_iters + 1):
category, line, category_tensor, line_tensor = randomTrainingExample()
output, loss = train(category_tensor, line_tensor)
current_loss += loss
# Print iter number, loss, name and guess
if iter % print_every == 0:
guess, guess_i = categoryFromOutput(output)
correct = '✓' if guess == category else '✗ (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))
# Add current loss avg to list of losses
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
import matplotlib.pyplot as plt
#################################################
#
# Tips: your could use plt.plot
#
#################################################
plt.title('losses')
plt.plot(all_losses)
plt.show()
# DRNN
plt.title('losses')
plt.plot(all_losses)
plt.show()
# Keep track of correct guesses in a confusion matrix
confusion_training = torch.zeros(n_categories, n_categories)
confusion_validation = torch.zeros(n_categories, n_categories)
n_confusion = 5000
# Just return an output given a line
def evaluate(line_tensor):
rnn.eval()
output = rnn(line_tensor)
return output
# Go through a bunch of examples and record which are correctly guessed
for i in range(n_confusion):
category, line, category_tensor, line_tensor = randomTrainingExample()
output = evaluate(line_tensor)
guess, guess_i = categoryFromOutput(output)
category_i = all_categories.index(category)
confusion_training[category_i][guess_i] += 1
# Go through a bunch of examples and record which are correctly guessed
for i in range(n_confusion):
category, line, category_tensor, line_tensor = randomValidationExample()
output = evaluate(line_tensor)
guess, guess_i = categoryFromOutput(output)
category_i = all_categories.index(category)
confusion_validation[category_i][guess_i] += 1
# catcul acc
right_train = 0
right_valid = 0
for i in range(n_categories):
right_train += confusion_training[i][i]
right_valid += confusion_validation[i][i]
acc_train = right_train / n_confusion
acc_valid = right_valid / n_confusion
# Normalize by dividing every row by its sum and
for i in range(n_categories):
confusion_training[i] = confusion_training[i] / confusion_training[i].sum()
confusion_validation[i] = confusion_validation[i] / confusion_validation[i].sum()
# Set up plot
fig = plt.figure()
ax1 = fig.add_subplot(121)
cax1 = ax1.matshow(confusion_training.numpy())
ax2 = fig.add_subplot(122)
cax2 = ax2.matshow(confusion_validation.numpy())
# Set up axes
ax1.set_xticklabels([''] + all_categories, rotation=90)
ax1.set_yticklabels([''] + all_categories)
ax2.set_xticklabels([''] + all_categories, rotation=90)
# sphinx_gallery_thumbnail_number = 2
plt.show()
print("Traing set Acc is", acc_train.item())
print("validation set Acc is", acc_valid.item())
def predict(input_line, n_predictions=3):
print('\n> %s' % input_line)
with torch.no_grad():
output = evaluate(lineToTensor(input_line))
output = torch.nn.functional.softmax(output, dim=1)
# Get top N categories
topv, topi = output.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i].item()
category_index = topi[0][i].item()
print('Probability (%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
predict('Dovesky')
predict('Jackson')
predict('Satoshi')
predict("Cui")
predict("Zhuang")
predict("Xue")
predict("Wang")
predict('Chen')
predict('Xiu')
predict('Jia')
import torch
import torch.nn as nn
class GenerateRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(GenerateRNN, self).__init__()
self.hidden_size = hidden_size
self.c2h = nn.Linear(n_categories, hidden_size)
self.i2h = nn.Linear(input_size, hidden_size)
self.h2h = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
self.h2o = nn.Linear(hidden_size, output_size)
self.dropout = nn.Dropout(0.2)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, category, input, hidden):
c2h = self.c2h(category)
i2h = self.i2h(input)
h2h = self.h2h(hidden)
hidden = self.activation( c2h+i2h+h2h )
dropout = self.dropout(self.h2o(hidden))
output = self.softmax(dropout)
return output, hidden
def initHidden(self, is_cuda=True):
if is_cuda:
return torch.zeros(1, self.hidden_size).cuda()
else:
return torch.zeros(1, self.hidden_size)
import random
# Random item from a list
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
# Get a random category and random line from that category
def randomTrainingPair():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
return category, line
# One-hot vector for category
def categoryTensor(category):
li = all_categories.index(category)
tensor = torch.zeros(1, n_categories)
tensor[0][li] = 1
if is_cuda:
tensor = tensor.cuda()
return tensor
# One-hot matrix of first to last letters (not including EOS) for input
def inputTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li in range(len(line)):
letter = line[li]
tensor[li][0][all_letters.find(letter)] = 1
if is_cuda:
tensor = tensor.cuda()
return tensor
# LongTensor of second letter to end (EOS) for target
def targetTensor(line):
letter_indexes = [all_letters.find(line[li]) for li in range(1, len(line))]
letter_indexes.append(n_letters - 1) # EOS
tensor = torch.LongTensor(letter_indexes)
if is_cuda:
tensor = tensor.cuda()
return tensor
# Make category, input, and target tensors from a random category, line pair
def randomTrainingExample():
category, line = randomTrainingPair()
category_tensor = categoryTensor(category)
input_line_tensor = inputTensor(line)
target_line_tensor = targetTensor(line)
return category_tensor, input_line_tensor, target_line_tensor
criterion = nn.NLLLoss()
learning_rate = 0.0005
def train(category_tensor, input_line_tensor, target_line_tensor):
target_line_tensor.unsqueeze_(-1)
hidden = rnn.initHidden(is_cuda)
rnn.zero_grad()
loss = 0
# Take care of the loss function,
# it could be visualized as the following figure
for i in range(input_line_tensor.size(0)):
output, hidden = rnn(category_tensor, input_line_tensor[i], hidden)
l = criterion(output, target_line_tensor[i])
loss += l
loss.backward()
for p in rnn.parameters():
if hasattr(p.grad, "data"):
p.data.add_(-learning_rate, p.grad.data)
return output, loss.item() / input_line_tensor.size(0)
import time
import math
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
is_cuda = False
rnn = GenerateRNN(n_letters, 128, n_letters)
rnn = rnn.cuda() if is_cuda else rnn
n_iters = 100000
print_every = 5000
plot_every = 500
all_losses = []
total_loss = 0 # Reset every plot_every iters
start = time.time()
for iter in range(1, n_iters + 1):
output, loss = train(*randomTrainingExample())
total_loss += loss
if iter % print_every == 0:
print('%s (%d %d%%) %.4f' % (timeSince(start), iter, iter / n_iters * 100, total_loss/iter))
if iter % plot_every == 0:
all_losses.append(total_loss / iter)
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.figure()
plt.plot(all_losses)
plt.show()
max_length = 20
# Sample from a category and starting letter
def sample(category, start_letter='A'):
with torch.no_grad():
# no need to track history in sampling
category_tensor = categoryTensor(category)
input = inputTensor(start_letter)
hidden = rnn.initHidden(is_cuda)
output_name = start_letter
for i in range(max_length):
output, hidden = rnn(category_tensor, input[0], hidden)
topv, topi = output.topk(1)
topi = topi[0][0]
if topi == n_letters - 1:
break
else:
letter = all_letters[topi]
output_name += letter
input = inputTensor(letter)
return output_name
# Get multiple samples from one category and multiple starting letters
def samples(category, start_letters='ABC'):
for start_letter in start_letters:
print(sample(category, start_letter))
print("\n")
samples('Russian', 'CCZZYY')
samples('German', 'CCZZYY')
samples('Spanish', 'CCZZYY')
samples('Chinese', 'CCZZYY')
samples('Chinese', 'CXJ')
samples('Chinese', 'CXJ')
samples('Chinese', 'CXJ')
import numpy as np
max_length = 20
# Sample from a category and starting letter
def sample(category, start_letter='A'):
with torch.no_grad(): # no need to track history in sampling
# initial the model and input
category_tensor = categoryTensor(category)
input = inputTensor(start_letter)
hidden = rnn.initHidden(is_cuda)
output_name = start_letter
for i in range(max_length):
# get the output from model
output, hidden = rnn(category_tensor, input[0], hidden)
# change it into probability
output = torch.exp(output)
#################################################################################
# code here
topv, topi = output.topk(5)
topv = topv / topv.sum()
index = np.random.choice(5, 1, p=topv.cpu().numpy()[0])
################################################################################
topi = topi[0][index]
if topi == n_letters - 1:
break
else:
letter = all_letters[topi]
output_name += letter
input = inputTensor(letter)
return output_name
# Get multiple samples from one category and multiple starting letters
def samples(category, start_letters='ABC'):
for start_letter in start_letters:
print(sample(category, start_letter))
print("\n")
rnn.eval()
samples('Russian', 'CYY')
samples('German', 'CCY')
samples('Spanish', 'CCZZYY')
samples('Chinese', 'CCCZZZYYY')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 选着一张空闲的卡
Step2: 1. Classifying Names with a Character-Level RNN
Step3: Now we have category_lines, a dictionary mapping each category
Step4: 1.2 Turning Names into Tensors
Step5: 1.3 Creating the Network
Step6: To run a step of this network we need to pass an input (in our case, the
Step7: For the sake of efficiency we don't want to be creating a new Tensor for
Step8: As you can see the output is a <1 x n_categories> Tensor, where
Step9: We will also want a quick way to get a training example (a name and its
Step10: 1.4.2 Training the Network
Step11: Now we just have to run that with a bunch of examples. Since the
Step12: 1.4.3 Plotting the Results
Step13: 1.5 Evaluating the Results
Step14: You can pick out bright spots off the main axis that show which
Step15: 1.7 Exercises
Step16: 2.2 Training
Step17: For each timestep (that is, for each letter in a training word) the
Step18: For convenience during training we'll make a randomTrainingExample
Step19: 2.2.2 Training the Network
Step20: The loss function could be shown as the following picture. The network forward through entire sequence to compute loss, then backward through entire sequence to compute gradient
Step21: Training is business as usual - call train a bunch of times and wait a few minutes, printing the current time and loss every print_every examples, and keeping store of an average loss per plot_every examples in all_losses for plotting later.
Step22: 2.2.3 Plotting the Losses
Step23: 2.3 Sampling the Network
Step24: 2.4 Exercises
Step25: 2、请回答:为什么这个模型这个模型训练好了之后,输入同样的参数会产生不一样的结果?并在上面的程序中验证你自己的猜测
|
14,999 | <ASSISTANT_TASK:>
Python Code:
'abc'.count('a')
[1,2,'a'].count('a')
1+2
'Fish '+'license'
__metaclass__=type #确定使用新式类
class Person:
def setName(self, name):
self.name=name
def getName(self):
return self.name
def greet(self):
print "Hello, world! I'm %s" % self.name
foo=Person()
foo.setName('Luke Skywalker')
foo.greet()
foo.name
class Secretive:
def __inaccessible(self):
print "Hello, world!"
def accessible(self):
print "The secret message is: "
self.__inaccessible()
s=Secretive()
s.__inaccessible()
s.accessible()
s._Secretive__inaccessible()
class Rectangle:
def __inf__(self):
self.width=0
self.height=0
def setSize(self,size):
self.width,self.height=size
def getSize(self):
return self.width,self.height
r=Rectangle()
r.width=10
r.height=5
r.getSize()
r.setSize((150,100))
r.width
__metaclass__=type
class Rectangle:
def __inf__(self):
self.width=0
self.height=0
def setSize(self,size):
self.width,self.height=size
def getSize(self):
return self.width,self.height
size=property(getSize,setSize)
w=Rectangle()
w.width=10
w.height=5
w.size
w.size=150,100
w.width
class FooBar:
def __init__(self):
self.somevar=42
f=FooBar()
f.somevar
class A:
def hello(self):
print "hello, I'm A"
class B(A):
pass
a=A()
b=B()
a.hello()
b.hello()
class B(A):
def hello(self):
print "hello, I'm B"
b=B()
b.hello()
class Bird:
def __init__(self):
self.hungry=True
def eat(self):
if self.hungry:
print 'Aaaah...'
self.hungry=False
else:
print 'No,thanks!'
b=Bird()
b.eat()
b.eat()
class SongBird(Bird):
def __init__(self):
self.sound='Squawk!'
def sing(self):
print self.sound
sb=SongBird()
sb.sing()
sb.eat()
class SongBird(Bird):
def __init__(self):
Bird.__init__(self)
self.sound='Squawk!'
def sing(self):
print self.sound
sb=SongBird()
sb.sing()
sb.eat()
sb.eat()
__metaclass__=type
class SongBird(Bird):
def __init__(self):
super(SongBird,self).__init__()
self.sound='Squawk!'
def sing(self):
print self.sound
sb=SongBird()
sb.sing()
sb.eat()
sb.eat()
def checkIndex(key):
所给的键能接受索引吗?
为了能被接受,键应该是一个非负的整数,如果它不是一个整数,比如是字符串,会引发TypeError;
如果它是负数,则会引发IndexError(因为序列是无限长的)。
if not isinstance(key,(int,long)):
raise TypeError
if key<0:
raise IndexError
class ArithmeticSequence:
def __init__(self,start=0,step=1):
初始化算数序列
初始值-序列中的第一个值
步长-两个相邻值之间的差别
改变-用户修改的值的字典
self.start=start
self.step=step
self.changed={} #没有项被修改
def __getitem__(self,key):
Get an item from the arithmetic sequence.
checkIndex(key)
try:
return self.changed[key] #修改了吗?
except KeyError: #否则...
return self.start+key*self.step #...计算值
def __setitem__(self,key,value):
修改算术序列中的一个项
checkIndex(key)
self.changed[key]=value
s=ArithmeticSequence(1,2)
s[4]
s[4]=2
s[4]
s[5]
del s[4]
s['four']
s[-4]
class Rectangle:
def __init__(self):
self.width=0
self.height=0
def __setattr__(self,name,value):
if name =='size':
self.width,self.height=value
else:
self.__dict__[name]=value
def __getattr__(self,name):
if name =='size':
return self.width,self.height
else:
raise AttributeError
w=Rectangle()
w.size
w.__dict__
w.size=(2,6)
w.size
w.width
hasattr(w,'size')
w.age=28
w.age
w.__dict__
class Fibs:
def __init__(self):
self.a=0
self.b=1
def next(self):
self.a,self.b=self.b,self.a+self.b
return self.a
def __iter__(self):
return self
fibs=Fibs()
for f in fibs:
if f>10:
print f
break
a=[1,2,3]
a.next()
it=iter([1,2,3])
it.next()
it.next()
it=iter([1,2,3])
it
list(it)
class C:
print 'Class C being defined...'
class MemberCounter:
members=0
def init(self):
MemberCounter.members+=1
m1=MemberCounter()
m1.init()
MemberCounter.members
m2=MemberCounter()
m2.init()
MemberCounter.members
m1.members
m2.members
m1.members='Two'
m1.members
m2.members
class Filter:
def init(self):
self.blocked=[]
def filter(self,sequence):
return [x for x in sequence if x not in self.blocked]
class SPAMFilter(Filter):
def init(self):
self.blocked=['SPAM']
f=Filter()
f.init()
f.filter([1,2,3])
s=SPAMFilter()
s.init()
s.filter(['SPAM','SPAM','SPAM','SPAM','eggs','bacon'])
class CounterList(list):
def __init__(self,*args):
super(CounterList,self).__init__(*args)
self.counter=0
def __getitem__(self,index):
self.counter+=1
return super(CounterList,self).__getitem__(index)
c1=CounterList('aaa')
c1
c1=CounterList((1,2,3))
c1
c1=CounterList({'first':1,'second':2})
c1
c1=CounterList(range(10))
c1
c1.reverse()
c1
del c1[3:6]
c1
c1.counter
c1[0]+c1[1]+c1[2]
c1.counter
issubclass(SPAMFilter,Filter)
issubclass(Filter,SPAMFilter)
SPAMFilter.__bases__
Filter.__bases__
s=SPAMFilter()
isinstance(s,SPAMFilter)
isinstance(s,str)
s.__class__
type(s)
type([1,2])
class Calculator:
def calculate(self,expression):
self.value=eval(expression)
class Talker:
def talk(self):
print 'Hi,my value is ',self.value
class TalkingCalculator(Calculator,Talker):
pass
tc=TalkingCalculator()
tc.calculate('1+2+3')
tc.talk()
hasattr(tc,'talk')
hasattr(tc,'fnord')
getattr(tc,'talk','None') #获得对象属性的值,可选择提供默认值,以便在属性不存在时使用
getattr(tc,'value','None')
setattr(tc,'name','Mr. Gumby') #与getattr相对应的函数是setattr,用来设置对象的属性及值
tc.name
tc.__dict__
def flatten(nested):
for sublist in nested:
for element in sublist:
yield element
nested=[[1,2],[3,4],5]
flatten(nested)
for num in flatten(nested):
print num
nested=[[1,2],[3,4],[5]]
list(flatten(nested))
def flatten(nested):
try:
for sublist in nested:
for element in flatten(sublist):
yield element
except TypeError:
yield nested
list(flatten([[[1],2],3,4,[5,[6,7]],8]))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 对于变量x来说,不需要知道它是字符串还是列表,就可以调用它的count方法—不用管它是什么类型(只要提供一个字符作为参数即可)。
Step2: 1.2 封装
Step3: 注意 尽管可能使用的是新版的Python,但一些功能不会在旧式类上起作用。为了确保类是新型的,需要在模块或者脚本开始的地方放置赋值语句__metaclass__=type,或者继承新式类(比如object类,也就是子类化内建类object)。新式类必然包含了更多的功能,也是之后推荐的写法,从写法上区分的话,如果当前类或者父类继承了object类,那么该类便是新式类。
Step4: 2.2 属性和方法
Step5: 现在__inaccessible从外界是无法访问的,而在内部还能使用(比如从accessible)访问:
Step6: 类的内部定义中,所有以双下划线开始的名字(方法或属性)都被“翻译”成前面加上单下划线和类名的形式。在了解了这些幕后的事情后,实际上还是能在类外访问这些私有方法,尽管不应该这么做:
Step7: 简而言之,确保其他人不会访问对象的方法和属性是不可能的,但是通过这类“名称变化术”就是他们不应该访问这些方法和属性的强有力信号。
Step8: 在上面的例子中,getSize和setSize方法是一个名为size的假想属性的访问器方法,size是由width和height构成的元组。如果有一天要改变类的实现,将size变成一个真正的属性,这样width和height就可以动态算出,那么就要把它们放到一个访问器方法中去。但如果有很多简单的属性,那么就不现实了。如果那么做就得写很多访问器方法。那么怎么解决呢?这就需要用到property函数。
Step9: 在新版的Rectangle中,property函数创建了一个属性size,其中访问器方法被当做参数(先是取值,然后是赋值)。
Step10: 很显然,size属性仍然取决于getSize和setSize中的计算。但它看起来就像普通的属性一样。实际上,property函数可以用fget,fset,fdel和doc-这四个参数来调用。如果没有参数,产生的属性既不可读,也不可写。如果只使用一个参数调用(一个取值方法),产生的属性是只读的。第三个参数(可选)是一个用于删除属性的方法。第四个参数(可选)是一个文档字符串。
Step11: (2) 重写一般方法和特殊的构造方法
Step12: 在子类中增加功能最基本的方式就是增加方法。但是也可以重写一些父类的方法来自定义继承的行为。B类也能重写这个方法。
Step13: 重写是继承机制中的一个重要内容,但是对于构造方法尤其重要。构造方法用来初始化新创建对象的状态,大多数子类不仅要拥有自己的初始化代码,还要拥有父类的初始化代码。虽然重写的机制对于所有方法来说都是一样的,但是当重写构造方法时,更可能遇到特别的问题:如果一个类的构造方法被重写,那么就需要调用父类的构造方法,否则对象可能不会被正确的初始化。如下:
Step14: 可以看到,鸟吃过了以后,就不会再饥饿。现在考虑子类SongBird,它添加了唱歌的行为。
Step15: 因为SongBird是Bird的一个子类,它继承了eat方法,但如果调用eat方法,就会产生一个问题:
Step16: 异常很清楚地说明了错误:SongBird没有hungry属性。原因是:在SongBird中,构造方法被重写,但新的构造方法没有任何关于初始化hungry属性的代码。为了达到预期的效果,SongBird的构造方法必须调用其父类Bird的构造方法来确保进行基本的初始化。有两种方法能达到这个目的,如下:
Step17: 通过将当前的实例作为self参数提供给未绑定方法,SongBird就能够使用其父类构造方法的所有实现,也就是说属性hungry能被设置。
Step22: (3) 成员访问方法
Step23: 注意,没有实现__del__方法的原因是我希望删除元素是非法的:
Step24: 这个类没有__len__方法,因为它是无限长的。
Step25: (4) __getattr__和__setattr__
Step26: 注意: __setattr__方法在所涉及的属性不是size时也会被调用。如果属性是size,那么就像前面那样执行操作,否则就要使用特殊方法__dict__,该方法包含一个字典,字典里是所有实例的属性;
Step27: 在很多情况下,__iter__被放到会在for循环中使用的对象中。
Step28: 从迭代器中得到序列:
Step29: 2.3 类的命名空间
Step30: 从上可以看出,类的定义其实就是执行代码块,这一点很有用,比如,在类的定义区并不只限使用def语句:
Step31: 上面的代码中,在类作用域内定义了一个可供所有成员(实例)访问的变量,用来计算类的成员数量。
Step32: 那么在实例中重绑定members属性呢?
Step33: 2.4 继承父类
Step34: Filter类的用处在于它可以用作其他类的父类,比如SPAMFilter类,可以将序列中“SPAM”过滤出去。
Step35: 2.4.1 子类化列表,字典和字符串
Step36: CounterList类严重依赖于它的子类化父类(list)的行为。CounterList类没有重写任何的方法,能直接调用列表的任何方法(如append、extend、index)。在两个被重写的方法中,super方法被用来调用相应的父类的方法,只有在__init__中添加了所需的初始化counter属性的行为,并在__getitem__中更新了counter属性。
Step37: 可以看到,CounterList在很多方面和列表的作用一样,但它有一个counter属性(被初始化为0),每次列表元素被访问时,它都会自增。
Step38: 如果想要知道已知类的父类(们),可以直接使用它的特殊属性__bases__:
Step39: 同样,还能使用isinstance函数检测一个对象是否是一个类的实例:
Step40: 如果只想知道一个对象属于哪个类,可以使用__class__属性或type函数:
Step41: 2.6 多个父类
Step42: 子类(TalkingCalculator)自己不做任何事,它从自己的父类继承所有的行为。这样它就成了会说话的计算器(talking calculator)。
Step43: 这种行为称为多重继承(multiple inheritance),是个非常有用的工具。
Step44: 如果要查看对象内所有存储的值,那么可以使用__dict__属性。
Step45: 3 生成器
Step46: 任何包含yield语句的函数称为生成器。除了名字不同以外,它的行为和普通的函数也有很大的差别。这就在于它不像return语句那样返回值,而是每次产生一个值。每次产生一个值(使用yield语句),函数就会被冻结:即函数停在那点等待被激活。函数被激活后就从停止的那点开始执行。
Step47: 从上可以看到,试图对一个数值5进行迭代会引发一个TypeError异常。
Step48: 3.2 递归生成器
Step49: 当flatten被调用时,有两种可能性(处理递归时大部分都是这种情况):基本情况和需要递归的情况。在基本的情况中,函数被告知展开一个元素(比如一个数字),这种情况下,for循环会引发一个TypeError异常(因为试图对一个数字进行迭代),生成器会产生一个元素。如果展开的是一个列表,那么就要进行特殊处理。程序必须遍历所有的子列表,并对他们调用flatten。然后使用另一个for循环来产生被展开的子列表的所有元素。
|