Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
---|---|---|
700 | <ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division # Gunakan print(...) dan bukan print ...
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_digits
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score, confusion_matrix, mean_squared_error
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
%matplotlib inline
RANDOM_STATE = 1337
np.random.seed(RANDOM_STATE)
X, y = load_digits(return_X_y=True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Eksplorasi Awal Data (10 poin)
|
701 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from IPython.display import display
%matplotlib inline
# Load the dataset
files = 'titanic_data.csv'
data_titanic = pd.read_csv(files)
display(data_titanic.head())
data =data_titanic
# Show the dataset
display(data.head())
data.info()
#exculding some coloumns
del data['Ticket']
del data['Cabin']
del data['Embarked']
del data['Name']
del data['PassengerId']
del data['Fare']
data.describe(include='all')
# Calculate number of missing values
data.isnull().sum()
null_female = data[pd.isnull(data['Age'])]['Sex'] == 'female'
null_male = data[pd.isnull(data['Age'])]['Sex'] == 'male'
print "Total missing age for female:",null_female.sum()
print "Total missing age for male:",null_male.sum()
notnull_age = data[pd.notnull(data['Age'])]
null_age = data[pd.isnull(data['Age'])]
from scipy.stats import ttest_ind
ttest_ind(notnull_age['Survived'], null_age['Survived'])
print "Age median values by Age and Sex:"
#we are grouping by gender and class and taking median of age so we can replace with corrresponding values instead of NaN
print data.groupby(['Sex','Pclass'], as_index=False).median().loc[:, ['Sex','Pclass', 'Age']]
print "Age values for 5 first persons in dataset:"
print data.loc[data['Age'].isnull(),['Age','Sex','Pclass']].head(5)
# apply transformation: Age missing values are filled with regard to Pclass and Sex:
data.loc[:, 'Age'] = data.groupby(['Sex','Pclass']).transform(lambda x: x.fillna(x.median()))
print data.loc[[5,17,19,26,28],['Age','Sex','Pclass']].head(5)
data['Age'] = data['Age'].fillna(data['Age'].mean())
data.describe(include='all')
data_s=data
survival_group = data_s.groupby('Survived')
survival_group.describe()
# Seriously i dont understand why age is 0.42
data_s[data_s['Age'] < 1]
import matplotlib.pyplot as plt
import seaborn as sns
# Set style for all graphs
#sns.set_style("light")
#sns.set_style("whitegrid")
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
def plot(a,i):
fig=plt.figure() #Plots in matplotlib reside within a figure object, use plt.figure to create new figure
#Create one or more subplots using add_subplot, because you can't create blank figure
ax = fig.add_subplot(1,1,1)
#Variable
ax.hist(data[a],bins = i) # Here you can play with number of bins
plt.title(a + ' distribution')
plt.xlabel(a)
plt.ylabel('Passenger Count')
plt.show()
plot("Age",30)
print "The above distribution of Age seems a little bit deviating from normal distribution"
print
plot("SibSp",8)
print "The above distribution of SibSp seems a right-skewed distribution"
plot("Parch",6)
print "The above distribution of Age seems a right-skewed distribution"
sns.factorplot(x="Sex", y="Age", data=data_s, kind="box", size=7, aspect=.8)\
.set_xticklabels(["Male","Female"])
plt.title('Boxplot of Age grouped by sex')
print "From the below plot we can see there are more elderly men than women and average age for men is higher than women"
sns.factorplot(x="Pclass", y="Age", data=data_s, kind="box", size=7, aspect=.8)\
.set_xticklabels(["1","2","3"])
plt.title('Boxplot of Age grouped by sex')
print "From the below plot we can see the average age is decreasing from calss 1 to class 3"
sns.factorplot( 'Sex' , 'Survived', data = data, kind = 'bar')
plt.title('Histogram of Survival rate grouped by Sex')
print "From the plot we can clearly see the survival rate of female is very high"
## GENDER
survivals = pd.crosstab([ data_s.Sex], data_s.Survived.astype(bool))
survivals.plot(kind='bar', stacked=False)
plt.ylabel("Passenger count")
plt.title('Histogram of Passenger count grouped by Sex and survived')
survival = data_s.groupby('Sex')['Survived']
survival.mean()
#PCLASS
survivals = pd.crosstab([data_s.Pclass], data_s.Survived.astype(bool))
survivals.plot(kind='bar', stacked=True)
plt.ylabel("Passenger count")
plt.title('Histogram of Passenger count grouped by Class')
survival=data.groupby(['Pclass'])
survival.mean()
survivals = pd.crosstab([data_s.Pclass, data_s.Sex], data_s.Survived.astype(bool))
survivals.plot(kind='bar', stacked=True)
survive=data.groupby(['Sex','Pclass'])
plt.ylabel("Passenger count")
plt.title('Histogram of passenger count grouped by sex and Class')
#survive.Survived.sum().plot(kind='barh')
survive.mean()
#Age
sns.factorplot(x="Survived", y="Age", hue='Sex', data=data_s, kind="box", size=7, aspect=.8)\
.set_xticklabels(["Expired","Survived"])
plt.title('Boxplot of Age grouped by sex and Survival')
# survive_A=data.groupby(['Sex','Age'])
#Age
# We are dividing the Age data into 3 buckets of (0-18),(18-40),(40-90)
# and labeling them as 'Childs','Adults','Seniors' respectively
data['group_age'] = pd.cut(data['Age'], bins=[0,18,40,90], labels=['Childs','Adults','Seniors'])
data.head(5)
survive_a=data.groupby(['group_age'])
survival_a = pd.crosstab([data.group_age], data_s.Survived.astype(bool))
survival_a.plot(kind='bar', stacked=True)
plt.title('Bar plot of Passenger count grouped by age categories ')
plt.ylabel("Passenger count")
# sns.factorplot(x="group_age", y="Age", hue='Sex', data=data, kind="box", size=7, aspect=.8)
survive_a.mean()
def group(d,v):
if (d == 'female') and (v >= 18):
return 'Woman'
elif v < 18:
return 'child'
elif (d == 'male') and (v >= 18):
return 'Man'
data['Category'] = data.apply(lambda row:group(row['Sex'], row['Age']), axis=1)
data.head(5)
survival_a = pd.crosstab([data.Category], data_s.Survived.astype(bool))
survival_a.plot(kind='bar', stacked=True)
plt.ylabel("Passenger count")
plt.title('Survival by Age category')
data.groupby(['Category']).mean()["Survived"]
g = sns.factorplot(x="Category", y="Survived", col="Pclass", data=data,
saturation=.5, kind="bar", ci=None, size=5, aspect=.8)
# Fix up the labels
(g.set_axis_labels('', 'Survival Rate')
.set_xticklabels(["Men", "Women","child"])
.set_titles("Class {col_name}")
.set(ylim=(0, 1))
.despine(left=True, bottom=True))
print 'Histogram of Survival rate grouped by Age Category and Class:'
# We are dividing the Age data into 3 buckets of (0-18),(18-40),(40-90)
# and labeling them as 'Childs','Adults','Seniors' respectively
data['group_age'] = pd.cut(data['Age'], bins=[0,18,40,90], labels=['Childs','Adults','Seniors'])
#finding mean Survival rate grouped by 'group_age','Sex'.
df = data.groupby(['group_age','Sex'],as_index=False).mean().loc[:,['group_age','Sex','Survived']]
f, (ax1, ax2,ax3) = plt.subplots(1, 3,figsize=(15,7))
g = sns.barplot(x="group_age", y="Survived", hue="Sex", data=df,ax=ax1)
ax1.set_title('Survival by Age and Sex')
#finding mean Survival rate grouped by 'group_age'.
data2 = data.groupby(['group_age'],as_index=False).mean().loc[:,['group_age','Survived']]
h = sns.barplot(x="group_age",y='Survived', data=data2,ax=ax2)
ax2.set_title('Survival by Age')
#finding mean Survival rate grouped by 'group_age'.
data3 = data.groupby(['group_age'],as_index=False).count().loc[:,['group_age','Survived']]
hh = sns.barplot(x="group_age",y='Survived', data=data3,ax=ax3)
ax3.set_title('Age distribution in Ship')
ax3.set_ylabel('Age Distribution')
for ax in f.axes:
plt.sca(ax)
plt.xticks(rotation=90)
plt.tight_layout()
plt.show()
data_C=data.groupby(['Category',"Pclass"]).mean()
data_C.sort("Survived")["Survived"]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Description
Step2: From the above info(),We can see columns Age, Cabin and Embarked have missing values.
Step3: lets decide should we remove missing age rows or fill the missing values with the mean, I'm going first to split the sample data into 2 samples with missing age and with age and perform a t test
Step4: Hypothesis
Step5: p value is than 0.05 which results in rejecting H0 ,so there is a significant difference in mean .So, I'm going to substitute the missing values with the mean age.
Step6: We can see that all columns have identical length.
Step7: From the above statistics
Step8: These must be new borns and all survived
Step9: From the above plot we can see that gender played an important role in survival of each
Step10: From the above plot we can clearly see individuals of different class distibuted for various ages. And the red line shows the average of age for each class
Step11: A passenger from Class 1 is about 2.5x times more likely to survive than a passenger in Class 3.
Step12: From the above plot we can see how female individuals are given 1st preference and based on class.
Step13: From the above boxplot and calculated mean
Step14: These are percentage of survivors for Group_age
Step15: Women and children are given importance in the survival of a number of people.
|
702 | <ASSISTANT_TASK:>
Python Code:
cursor.execute("SELECT * FROM monuments_all WHERE country='se-bbr'")
all_bbr = pd.io.sql.read_sql('select * from monuments_all WHERE country="se-bbr"', conn)
all_bbr.shape
table_name = "se_bbr" # I've renamed monuments_se-bbr_(se) to 'se_bbr' in local database, change to correct name
se_bbr = pd.io.sql.read_sql('select * from se_bbr', conn)
se_bbr.shape
se_bbr.keys()
se_bbr.ix[10:20, ["namn","funktion"]]
first = se_bbr.groupby("kommun")[["funktion","namn"]].first()
first.head()
first.loc[["Ale","Täby","Åre"],["funktion","namn"]]
se_bbr.namn.str.extract('(?P<name>\[\[[\w\.\|\- ]+\]\])\,? ?(?P<name2>[ \w]+)? ?(?P<name3>\[\[[\w\.\|\- ]+\]\])? ?(?P<buildId>\([\w\.\d \|\:\-;,]+\))',expand=True)
se_bbr.ix[5672]
se_bbr_namn = se_bbr.namn.str.extract('(?P<name>\[\[[\w`\.,\|\- ]+\]\])\,? ?(?P<name2>[ \w]+)? ?(?P<name3>\[\[[\w\.\|\- ]+\]\])? ?(?P<buildId>\([\w\.\d \|\:\-;,]+\))',expand=True)
len(se_bbr_namn[se_bbr_namn["name"] == pd.np.nan])
len(se_bbr_namn[se_bbr_namn["buildId"] != pd.np.nan])
se_bbr[pd.isnull(se_bbr["namn"])]
len(se_bbr_namn.name.value_counts())
len(se_bbr_namn.name2.value_counts())
len(se_bbr_namn.name3.value_counts())
se_bbr_namn.name3.value_counts()
se_bbr[se_bbr.namn.str.contains("sproge", flags=re.IGNORECASE) == True]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: monuments_all_se-bbr_(sv)
Step2: Let's check If we have extracted the two always occuring fields 'name' and 'buildId' for all objects
Step3: How many objects did we get the other fields 'name2' and 'name3' for?
|
703 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pathlib
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels)
)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_quant_model = converter.convert()
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant.tflite"
tflite_model_quant_file.write_bytes(tflite_quant_model)
!ls -lh {tflite_models_dir}
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_quant_file))
interpreter_quant.allocate_tensors()
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
import matplotlib.pylab as plt
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
# A helper function to evaluate the TF Lite model using "test" dataset.
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for test_image in test_images:
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
# Compare prediction results with ground truth labels to calculate accuracy.
accurate_count = 0
for index in range(len(prediction_digits)):
if prediction_digits[index] == test_labels[index]:
accurate_count += 1
accuracy = accurate_count * 1.0 / len(prediction_digits)
return accuracy
print(evaluate_model(interpreter))
print(evaluate_model(interpreter_quant))
import tensorflow_hub as hub
resnet_v2_101 = tf.keras.Sequential([
keras.layers.InputLayer(input_shape=(224, 224, 3)),
hub.KerasLayer("https://tfhub.dev/google/imagenet/resnet_v2_101/classification/4")
])
converter = tf.lite.TFLiteConverter.from_keras_model(resnet_v2_101)
# Convert to TF Lite without quantization
resnet_tflite_file = tflite_models_dir/"resnet_v2_101.tflite"
resnet_tflite_file.write_bytes(converter.convert())
# Convert to TF Lite with quantization
converter.optimizations = [tf.lite.Optimize.DEFAULT]
resnet_quantized_tflite_file = tflite_models_dir/"resnet_v2_101_quantized.tflite"
resnet_quantized_tflite_file.write_bytes(converter.convert())
!ls -lh {tflite_models_dir}/*.tflite
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 훈련 후 동적 범위 양자화
Step2: TensorFlow 모델 훈련하기
Step3: 예를 들어, 단일 epoch에 대해서만 모델을 훈련했기 때문에 최대 96%의 정확성으로만 훈련됩니다.
Step4: tflite 파일에 작성합니다.
Step5: 내보낼 때 모델을 양자화하려면 optimizations 플래그를 지정하여 크기를 최적화합니다.
Step6: 결과 파일의 약 1/4 크기인지 확인하세요.
Step7: TFLite 모델 실행하기
Step8: 하나의 이미지에서 모델 테스트하기
Step9: 모델 평가하기
Step10: 다음을 얻기 위해 동적 범위 양자화 모델에 대한 평가를 반복합니다.
Step11: 이 예에서 압축된 모델은 정확성에 차이가 없습니다.
|
704 | <ASSISTANT_TASK:>
Python Code:
import random
import operator as op
import optunity.metrics
import semisup_metrics as ss
import numpy as np
from matplotlib import pyplot as plt
import pickle
import csv
import util
%matplotlib inline
# fraction of positives/negatives that are known
# known_neg_frac == 0 implies PU learning
known_pos_frac = 0.1
known_neg_frac = 0.0
# load results from the paper?
# http://www.sciencedirect.com/science/article/pii/S0925231215001174
use_paper_data = True
dataset = 'covtype'
#dataset = 'sensit'
# generate a simulated data set (requires use_paper_data = False)
# number of instances per class
# the number of unlabeled instances depends on these settings + known_X_frac
# if we use a lot of data, computation times are a little long
# but the resulting estimates become excellent, which corroborates consistency
# num_pos = 20000
# num_neg = 100000
# if you simulate a smaller data set you can see some effects better
# but estimated bounds will be much wider
# because the ECDF confidence interval becomes large
num_pos = 2000
num_neg = 10000
distid = 2 # can be 1, 2 or 3, these correspond to certain curves in ROC space
# generate rankings and label vectors and compute corresponding beta
# beta is based on known_pos_frac, known_neg_frac and the number of pos and negs
# labels is a list of {True, False, None}, where None indicates unlabeled
# true_labels is a list of {True, False}
if use_paper_data:
labels, true_labels, decision_values, beta = util.load_dataset(dataset,
known_pos_frac,
known_neg_frac)
else:
labels, true_labels, decision_values, beta = util.simulate_data(distid, num_pos, num_neg,
known_pos_frac,
known_neg_frac)
print('total number of instances: \t %d' % len(labels))
print('number of known positives: \t %d' % len(list(filter(lambda x: x == True, labels))))
print('number of known negatives: \t %d' % len(list(filter(lambda x: x == False, labels))))
print('number of unlabeled instances: \t %d' % len(list(filter(lambda x: x == None, labels))))
print('number of latent positives: \t %d' % len(list(filter(lambda x: x[0] == None and x[1] == True, zip(labels, true_labels)))))
print('number of latent negatives: \t %d' % len(list(filter(lambda x: x[0] == None and x[1] == False, zip(labels, true_labels)))))
print('beta: \t %1.4f' % beta)
betahat = beta
print('true value of beta\t%1.4f' % beta)
print('point estimate of beta\t%1.4f' % betahat)
# determine the latent labels (requires manual labeling in practice)
latent_labels = [true_label for label, true_label in zip(labels, true_labels) if label is None]
# we will peek at 300 labels to estimate beta based on a 95% credible interval
num_peeks = 300
credible_interval_length = 0.95
# peek at the true label of a subset of unlabeled instances
unlabeled_ranks = [idx for idx, lab in enumerate(labels) if lab == None]
sample = np.random.choice(unlabeled_ranks, num_peeks, replace=False)
sample_labels = [true_labels[i] for i in sample]
npos = sum(sample_labels)
nneg = sample.size - npos
# compute interval based on the percentile function of the posterior
beta_interval_ppf = ss.estimate_beta_by_labeling(npos, nneg, credible_interval_length, HPD=False)
print('Credible interval via PPF: %1.4f <= beta <= %1.4f.' % (beta_interval_ppf.lower, beta_interval_ppf.upper))
print("Width of credible interval based on PPF: %1.5f.\n" % (beta_interval_ppf.upper - beta_interval_ppf.lower))
# compute interval based on the highest posterior density region (shorter)
beta_interval_hpd = ss.estimate_beta_by_labeling(npos, nneg, credible_interval_length, HPD=True)
print('Credible interval via HPD: %1.4f <= beta <= %1.4f.' % (beta_interval_hpd.lower, beta_interval_hpd.upper))
print("Width of credible interval based on HPD: %1.5f." % (beta_interval_hpd.upper - beta_interval_hpd.lower))
# continue with beta based on HPD, because this is the shortest credible interval
# note that the coverage of both intervals is equal to credible_interval_length
beta_interval = beta_interval_hpd
# sort the labels in descending order of corresponding decision values
sort_labels, sort_dv, sort_true_labels = zip(*sorted(zip(labels, decision_values, true_labels),
key=op.itemgetter(1), reverse=True))
# ranks of the known positives
known_pos_ranks = [idx for idx, lab in enumerate(sort_labels) if lab]
# compute rank ECDF of known positives
known_pos_ecdf = ss.compute_ecdf_curve(known_pos_ranks)
ci_width = 0.95 # width of the confidence band on ECDF to be used
use_bootstrap = True # use bootstrap to compute confidence band
nboot = 2000 # number of bootstrap iterations to use, not used if use_bootstrap = False
if use_bootstrap:
cdf_bounds = ss.bootstrap_ecdf_bounds(labels, decision_values, nboot=nboot, ci_width=ci_width)
else:
cdf_bounds = ss.dkw_bounds(labels, decision_values, ci_width=ci_width)
latent_positives = map(lambda x, y: x == True and y == None, true_labels, labels)
sort_lps, _ = zip(*sorted(zip(latent_positives, decision_values),
key=op.itemgetter(1), reverse=True))
latent_pos_ranks = [idx for idx, lab in enumerate(sort_lps) if lab]
latent_pos_ecdf = ss.compute_ecdf_curve(latent_pos_ranks)
# convenience plot functions
def plot_proxy():
p = plt.Rectangle((0, 0), 0, 0, color='blue', alpha=0.4)
ax = plt.gca()
ax.add_patch(p)
return p
def fix_plot_shape(fig):
ax = fig.add_subplot(111, aspect='equal')
axes = fig.gca()
axes.set_xlim([0,1])
axes.set_ylim([0,1])
xs = list(range(len(labels)))
plt.figure(1)
plt.fill_between(xs, list(map(cdf_bounds.lower, xs)), list(map(cdf_bounds.upper, xs)),
color='blue', alpha=0.4)
plt.plot(*zip(*known_pos_ecdf), color='black', linestyle='dashed', linewidth=2)
plt.plot(*zip(*latent_pos_ecdf), color='black', linewidth=2)
plot_proxy()
plt.xlabel('rank')
plt.ylabel('TPR')
plt.legend(['known positives', 'latent positives', 'expected region'], loc=4)
plt.title('Rank CDF')
plt.show()
# first, compute contingency tables based on the point estimate betahat
# presorted = True is a computational shortcut
# we can use this because we already sorted by decision values earlier
tables_point = ss.compute_contingency_tables(labels=sort_labels, decision_values=sort_dv,
reference_lb=cdf_bounds.lower,
reference_ub=cdf_bounds.upper,
beta=betahat, presorted=True)
# second, compute contingency tables based on the credible interval
# we start with a convenience function for readability
compute_tables = lambda beta: ss.compute_contingency_tables(labels=sort_labels, decision_values=sort_dv,
reference_lb=cdf_bounds.lower,
reference_ub=cdf_bounds.upper,
beta=beta,
presorted=True)
# tables based on lower bound on beta, only lower bound tables are to be used
tables_interval_lb = compute_tables(beta_interval.lower)
# tables based on upper bound on beta, only upper bound tables are to be used
tables_interval_ub = compute_tables(beta_interval.upper)
# merge into a single variable
# ss._lb_ub is a collections.namedtuple with fields 'lower' and 'upper'
tables_interval = ss._lb_ub(lower=tables_interval_lb.lower, upper=tables_interval_ub.upper)
# compute the true ROC curve (we use Optunity's ROC function)
_, roc_true = optunity.metrics.roc_auc(true_labels, decision_values, return_curve=True)
# compute the ROC curve that would be obtained by treating all unlabeled as negative
_, roc_neg = optunity.metrics.roc_auc(labels, decision_values, return_curve=True)
# we can directly use the contingency tables we already computed anyways
roc_bounds = lambda tables: ss._lb_ub(lower=ss.roc_from_cts(tables.lower),
upper=ss.roc_from_cts(tables.upper))
roc_bounds_point = roc_bounds(tables_point)
roc_bounds_interval = roc_bounds(tables_interval)
# an alternative without all these intermediate steps would be:
# roc_bounds_point = ss.roc_bounds(labels, decision_values, beta=betahat)
# labels and decision values of peeked instances in sample_labels and sample_decision_values
sample_decision_values = [decision_values[i] for i in sample]
# labels and decision values of known instances before peeking
filtered = filter(lambda x: x[0] is not None, zip(labels, decision_values))
apriori_known_labels = list(map(op.itemgetter(0), filtered))
apriori_known_dvs = list(map(op.itemgetter(1), filtered))
# all known labels and decision values after peeking
all_known_labels = apriori_known_labels + sample_labels
all_known_dvs = apriori_known_dvs + sample_decision_values
# data statistics
print('total number of known positives after peeking:\t%d' % sum(all_known_labels))
print('total number of known negatives after peeking:\t%d' % (len(all_known_labels) - sum(all_known_labels)))
# compute the resulting ROC curve
_, roc_peek = optunity.metrics.roc_auc(all_known_labels, all_known_dvs, return_curve=True)
roc_peek
# convenience function for plot legend
def plot_proxy2():
p = plt.Rectangle((0, 0), 0, 0, color='none',
edgecolor='red', hatch='xxx', alpha=0.8)
ax = plt.gca()
ax.add_patch(p)
return p
xs = [float(x) / 100 for x in range(101)]
roc_point_up = ss.zoh(*zip(*roc_bounds_point.upper))
roc_point_lo = ss.zoh(*zip(*roc_bounds_point.lower))
roc_interval_up = ss.zoh(*zip(*roc_bounds_interval.upper))
roc_interval_lo = ss.zoh(*zip(*roc_bounds_interval.lower))
fig = plt.figure(2)
fix_plot_shape(fig)
plt.plot(*zip(*roc_true), color='black', linewidth=2)
plt.fill_between(xs, list(map(roc_point_lo, xs)), list(map(roc_point_up, xs)), color='blue', alpha=0.4)
plt.fill_between(xs, list(map(roc_interval_lo, xs)), list(map(roc_interval_up, xs)),
color='none', edgecolor='red', alpha=0.8, hatch='xxx')
plt.plot(*zip(*roc_neg), color='black', linestyle='dashed')
plt.plot(*zip(*roc_peek), color='red', linestyle='solid', linewidth=2)
plot_proxy()
plot_proxy2()
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend(['true curve', 'beta=0', 'after labeling',
'expected region via betahat', 'expected region via interval'],
loc="upper left", bbox_to_anchor=(1,1))
plt.title('Receiver Operating Characteristic curve')
plt.show()
# we can directly use the contingency tables we already computed anyways
pr_bounds = lambda tables: ss._lb_ub(lower=ss.pr_from_cts(tables.lower),
upper=ss.pr_from_cts(tables.upper))
pr_bounds_point = pr_bounds(tables_point)
pr_bounds_interval = pr_bounds(tables_interval)
# an alternative without all these intermediate steps would be:
# roc_bounds_point = ss.roc_bounds(labels, decision_values, beta=betahat)
# compute the true ROC curve (we use Optunity's ROC function)
_, pr_true = optunity.metrics.pr_auc(true_labels, decision_values, return_curve=True)
# compute the ROC curve that would be obtained by treating all unlabeled as negative
_, pr_neg = optunity.metrics.pr_auc(labels, decision_values, return_curve=True)
# PR curve based on all known labels after labeling 300 unlabeled instances
# we compute this by constructing the PR curve with weighted instances
# the weighting is used to fix the empirical class contributions to our estimate of beta
# compute the weight
npos_prior = len(list(filter(lambda x: x == True, labels)))
neg_weight = float(npos + npos_prior) / npos
print("Normalizing weight for negatives: %1.3f" % neg_weight)
all_known_labels, all_known_dvs
all_known_labels_sorted, _ = zip(*sorted(zip(all_known_labels, all_known_dvs),
key=op.itemgetter(1), reverse=True))
known_after_labeling_cts = ss.compute_labeled_cts(all_known_labels_sorted)
# weigh contingency tables
weighted_cts = map(lambda ct: ss.ContingencyTable(TP=ct.TP, FN=ct.FN,
FP=ct.FP * neg_weight,
TN=ct.TN * neg_weight),
known_after_labeling_cts)
pr_after_labeling = ss.pr_from_cts(weighted_cts)
pr_point_up = ss.zoh(*zip(*pr_bounds_point.upper))
pr_point_lo = ss.zoh(*zip(*pr_bounds_point.lower))
pr_interval_up = ss.zoh(*zip(*pr_bounds_interval.upper))
pr_interval_lo = ss.zoh(*zip(*pr_bounds_interval.lower))
fig = plt.figure(3)
fix_plot_shape(fig)
plt.plot(*zip(*pr_true), color='black', linewidth=2)
plt.plot(*zip(*pr_neg), color='black', linestyle='dashed')
plt.fill_between(xs, list(map(pr_point_lo, xs)), list(map(pr_point_up, xs)), color='blue', alpha=0.4)
plt.fill_between(xs, list(map(pr_interval_lo, xs)), list(map(pr_interval_up, xs)),
color='none', edgecolor='red', alpha=0.8, hatch='xxx')
plt.plot(*zip(*pr_after_labeling), color='red', linestyle='solid', linewidth=2)
plot_proxy()
plot_proxy2()
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(['true curve', 'beta=0', 'after labeling',
'expected region via betahat', 'expected region via interval'],
loc="upper left", bbox_to_anchor=(1,1))
plt.title('Precision-Recall curve')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create or load data set
Step2: Data set characteristics
Step3: Unknown parameters
Step4: Estimate beta
Step5: Alternatively, estimate beta by peeking at the latent labels of a few unlabeled instances. This corresponds to Section 6.3 in the manuscript.
Step6: 1. Compute cumulative rank distribution of known positives
Step7: Next, we determine a confidence interval on the rank CDF of known positives. We can do this in several ways, our code provides a bootstrap approach and a method based on the Dvoretzky–Kiefer–Wolfowitz (DKW) inequality. Feel free to experiment.
Step8: For reference, we will also compute the rank ECDF of latent positives. In practical applications this is impossible, as the latent positives are by definition not known.
Step9: Plot the rank CDFs of known and latent positives. It may occur that the rank CDF of latent positives is not within the confidence interval of known positives, in which case the corresponding bounds on performance will not be strict.
Step10: 2. Compute contingency tables for each rank
Step11: 3. Compute and plot performance estimates based on the contingency tables
Step12: Compute bounds based on the proposed method.
Step13: Finally, what if we would compute bounds on the labeled instances and the ones we peeked at to estimate beta? The indices of the random sample of unlabeled data we peeked at is in sample.
Step14: Plot the resulting curves
Step15: Including the resulting ROC curve based on the peeked data.
|
705 | <ASSISTANT_TASK:>
Python Code:
from matplotlib import pyplot as plt
import numpy as np
x = np.linspace(1E-8,np.pi/6,1000)
y = x*np.sin(1./x)
plt.plot(x, y)
plt.plot(x, x)
plt.plot(x, -x)
plt.show()
from mat281_code import black_box
black_box.iplot()
# Calculando el promedio
x = np.array([1.2, 2.2, 2.6, 3.1, 3.1, 3.2, 3.3, 3.6, 3.6, 4.3])
print(x.mean())
print x.min(), x.std()
# Calculando el promedio
x = np.array([1.2, 2.2, 2.6, 3.1, 3.1, 3.2, 3.3, 3.6, 3.6, 4.3])
x_mean = x.mean()
print("Promedio angular: {0:.2f} [rad] ó {1:.2f} [deg]".format(x_mean, x_mean*180/np.pi))
plt.figure(figsize=(14,3))
plt.plot(x, 0*x, 'go', alpha=0.75)
plt.plot([x.mean(), x.mean()], [-1,1], '-g', lw=2.0)
plt.gca().set_yticks([])
plt.show()
cos_mean = np.cos(x).mean()
sin_mean = np.sin(x).mean()
trig_mean = np.arctan2(sin_mean, cos_mean)
print("Promedio espacial: {0:.2f} [rad] ó {1:.2f} [deg]".format(trig_mean, trig_mean*180/np.pi))
plt.figure(figsize=(12,12))
aux_th = np.arange(0,1000.)*2*np.pi/1000.
r = (cos_mean**2 + sin_mean**2)**.5
plt.plot(0, 0, 'k.', alpha=0.25)
plt.plot(np.cos(aux_th), np.sin(aux_th), 'k-', alpha=0.25)
plt.plot(np.cos(x), np.sin(x), 'b*', alpha=0.75, ms=16)
plt.plot(np.cos(x_mean), np.sin(x_mean), 'go', alpha=1.0, ms=16, label=r"$(\cos(\overline{\theta}),\sin(\overline{\theta}))$")
plt.plot(cos_mean, sin_mean, 'rs', alpha=1.0, ms=16, label=r"$(\overline{\cos(\theta}),\overline{\sin(\theta}))$")
plt.plot(cos_mean/r, sin_mean/r, 'rs', alpha=0.5, ms=16, label=r"$(\overline{\cos(\theta}),\overline{\sin(\theta}))$")
plt.xlim([-1.1, 1.1])
plt.ylim([-1.1, 1.1])
plt.legend(loc="lower right", fontsize=20, numpoints=1)
plt.xlabel(r"$x=cos(\theta)$ [$L$]")
plt.ylabel(r"$x=sin(\theta)$ [$L$]")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ¿Cómo?
Step2: ¿Cómo?
Step3: Ejemplo 1
Step4: Ejemplo 1
|
706 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import cPickle as pickle
from copy import deepcopy
%matplotlib inline
plt.style.use("fivethirtyeight")
sns.set()
all_graphs = pickle.load(open("train-freq-graphs.pkl",'r'))
all_labels = pickle.load(open("train-freq-labels.pkl",'r'))
def train_test_split(graph_list, label_list, test_fraction=0.20):
Randomly splits a set of graphs and labels into training and testing data sets. We need a custom function
because the dataset isn't a numeric matrix, but a list of NetworkX Graph objects.
rand_ix = np.random.random_integers(0, len(graph_list), size=int(len(graph_list) * test_fraction))
print "random indices: %s" % rand_ix
test_graphs = []
test_labels = []
train_graphs = []
train_labels = []
# first copy the chosen test values, without deleting anything since that would alter the indices
for ix in rand_ix:
test_graphs.append(graph_list[ix])
test_labels.append(label_list[ix])
# now copy the indices that are NOT in the test index list
for ix in range(0, len(graph_list)):
if ix in rand_ix:
continue
train_graphs.append(graph_list[ix])
train_labels.append(label_list[ix])
return (train_graphs, train_labels, test_graphs, test_labels)
train_graphs, train_labels, test_graphs, test_labels = train_test_split(all_graphs, all_labels, test_fraction=0.10)
print "train size: %s" % len(train_graphs)
print "test size: %s" % len(test_graphs)
def graphs_to_eigenvalue_matrix(graph_list, num_eigenvalues = None):
Given a list of NetworkX graphs, returns a numeric matrix where rows represent graphs,
and columns represent the reverse sorted eigenvalues of the Laplacian matrix for each graph,
possibly trimmed to only use the num_eigenvalues largest values. If num_eigenvalues is
unspecified, all eigenvalues are used.
# peek at the first graph and see how many eigenvalues there are
tg = graph_list[0]
n = len(nx.spectrum.laplacian_spectrum(tg, weight=None))
# we either use all of the eigenvalues, or we use the smaller of
# the requested number or the actual number (if it is smaller than requested)
if num_eigenvalues is None:
ev_used = n
else:
ev_used = min(n, num_eigenvalues)
print "(debug) eigenvalues - test graph: %s num_eigenvalues: %s ev_used: %s" % (n, num_eigenvalues, ev_used)
data_mat = np.zeros((len(graph_list),ev_used))
#print "data matrix shape: ", data_mat.shape
for ix in range(0, len(graph_list)):
spectrum = sorted(nx.spectrum.laplacian_spectrum(graph_list[ix], weight=None), reverse=True)
data_mat[ix,:] = spectrum[0:ev_used]
return data_mat
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
train_matrix = graphs_to_eigenvalue_matrix(train_graphs, num_eigenvalues=20)
test_matrix = graphs_to_eigenvalue_matrix(test_graphs, num_eigenvalues=20)
clf = GradientBoostingClassifier(n_estimators = 250)
clf.fit(train_matrix, train_labels)
pred_label = clf.predict(test_matrix)
cm = confusion_matrix(test_labels, pred_label)
cmdf = pd.DataFrame(cm)
cmdf.columns = map(lambda x: 'predicted {}'.format(x), cmdf.columns)
cmdf.index = map(lambda x: 'actual {}'.format(x), cmdf.index)
print cmdf
print classification_report(test_labels, pred_label)
print "Accuracy on test: %0.3f" % accuracy_score(test_labels, pred_label)
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
pipeline = Pipeline([
('clf', GradientBoostingClassifier())
])
params = {
'clf__learning_rate': [5.0,2.0,1.0, 0.75, 0.5, 0.25, 0.1, 0.05, 0.01],
'clf__n_estimators': [10,25,50,100,250,500]
}
grid_search = GridSearchCV(pipeline, params, n_jobs = -1, verbose = 1)
grid_search.fit(train_matrix, train_labels)
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters:")
best_params = grid_search.best_estimator_.get_params()
for param in sorted(params.keys()):
print("param: %s: %r" % (param, best_params[param]))
pred_label = grid_search.predict(test_matrix)
cm = confusion_matrix(test_labels, pred_label)
cmdf = pd.DataFrame(cm)
cmdf.columns = map(lambda x: 'predicted {}'.format(x), cmdf.columns)
cmdf.index = map(lambda x: 'actual {}'.format(x), cmdf.index)
print cmdf
print classification_report(test_labels, pred_label)
print "Accuracy on test: %0.3f" % accuracy_score(test_labels, pred_label)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The strategy, unlike our first attempt, requires a real train/test split in the dataset because we're going to fit an actual model (although a true LOO cross validation is still of course possible). But we need a train_test_split function which is able ot deal with lists of NetworkX objects.
Step4: Feature Engineering
Step5: First Classifier
Step6: Definite improvement over just using the eigenvalue distance, as expected. Since there's a small data set here, just 90 graphs to train on and 9 in the test set at 10% fraction, the results are somewhat sensitive to the random draw of the test set, but I get values anywhere from 85% to 100% accuracy, with 89% being fairly typical.
|
707 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet # IGNORE_COPYRIGHT: cleared by OSS licensing
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
BATCH_SIZE = 32
IMG_SIZE = (160, 160)
train_dataset = image_dataset_from_directory(train_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
validation_dataset = image_dataset_from_directory(validation_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
class_names = train_dataset.class_names
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
val_batches = tf.data.experimental.cardinality(validation_dataset)
test_dataset = validation_dataset.take(val_batches // 5)
validation_dataset = validation_dataset.skip(val_batches // 5)
print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset))
print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset))
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
])
for image, _ in train_dataset.take(1):
plt.figure(figsize=(10, 10))
first_image = image[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(tf.expand_dims(first_image, 0))
plt.imshow(augmented_image[0] / 255)
plt.axis('off')
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1)
# Create the base model from the pre-trained model MobileNet V2
IMG_SHAPE = IMG_SIZE + (3,)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
base_model.trainable = False
# Let's take a look at the base model architecture
base_model.summary()
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print(feature_batch_average.shape)
prediction_layer = tf.keras.layers.Dense(1)
prediction_batch = prediction_layer(feature_batch_average)
print(prediction_batch.shape)
inputs = tf.keras.Input(shape=(160, 160, 3))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
len(model.trainable_variables)
initial_epochs = 10
loss0, accuracy0 = model.evaluate(validation_dataset)
print("initial loss: {:.2f}".format(loss0))
print("initial accuracy: {:.2f}".format(accuracy0))
history = model.fit(train_dataset,
epochs=initial_epochs,
validation_data=validation_dataset)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
base_model.trainable = True
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = 100
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer = tf.keras.optimizers.RMSprop(lr=base_learning_rate/10),
metrics=['accuracy'])
model.summary()
len(model.trainable_variables)
fine_tune_epochs = 10
total_epochs = initial_epochs + fine_tune_epochs
history_fine = model.fit(train_dataset,
epochs=total_epochs,
initial_epoch=history.epoch[-1],
validation_data=validation_dataset)
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0.8, 1])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 1.0])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
loss, accuracy = model.evaluate(test_dataset)
print('Test accuracy :', accuracy)
#Retrieve a batch of images from the test set
image_batch, label_batch = test_dataset.as_numpy_iterator().next()
predictions = model.predict_on_batch(image_batch).flatten()
# Apply a sigmoid since our model returns logits
predictions = tf.nn.sigmoid(predictions)
predictions = tf.where(predictions < 0.5, 0, 1)
print('Predictions:\n', predictions.numpy())
print('Labels:\n', label_batch)
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image_batch[i].astype("uint8"))
plt.title(class_names[predictions[i]])
plt.axis("off")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 迁移学习和微调
Step2: 数据预处理
Step3: 显示训练集中的前九个图像和标签:
Step4: 由于原始数据集不包含测试集,因此您需要创建一个。为此,请使用 tf.data.experimental.cardinality 确定验证集中有多少批次的数据,然后将其中的 20% 移至测试集。
Step5: 配置数据集以提高性能
Step6: 使用数据扩充
Step7: 注:当您调用 model.fit 时,这些层仅在训练过程中才会处于有效状态。在 model.evaulate 或 model.fit 中的推断模式下使用模型时,它们处于停用状态。
Step8: 重新缩放像素值
Step9: 注:另外,您也可以使用 Rescaling 层将像素值从 [0,255] 重新缩放为 [-1, 1]。
Step10: 注:如果使用其他 tf.keras.applications,请确保查阅 API 文档以确定它们是否期望 [-1,1] 或 [0,1] 范围内的像素,或者使用随附的 preprocess_input 函数。
Step11: 此特征提取程序将每个 160x160x3 图像转换为 5x5x1280 的特征块。我们看看它对一批示例图像做了些什么:
Step12: 特征提取
Step13: 有关 BatchNormalization 层的重要说明
Step14: 添加分类头
Step15: 应用 tf.keras.layers.Dense 层将这些特征转换成每个图像一个预测。您在此处不需要激活函数,因为此预测将被视为 logit 或原始预测值。正数预测 1 类,负数预测 0 类。
Step16: 通过使用 Keras 函数式 API 将数据扩充、重新缩放、base_model 和特征提取程序层链接在一起来构建模型。如前面所述,由于我们的模型包含 BatchNormalization 层,因此请使用 training = False。
Step17: 编译模型
Step18: MobileNet 中的 250 万个参数被冻结,但在密集层中有 1200 个可训练参数。它们分为两个 tf.Variable 对象,即权重和偏差。
Step19: 训练模型
Step20: 学习曲线
Step21: 注:如果您想知道为什么验证指标明显优于训练指标,主要原因是 tf.keras.layers.BatchNormalization 和 tf.keras.layers.Dropout 等层会影响训练期间的准确率。在计算验证损失时,它们处于关闭状态。
Step22: 编译模型
Step23: 继续训练模型
Step24: 在微调 MobileNet V2 基础模型的最后几层并在这些层上训练分类器时,我们来看一下训练和验证准确率/损失的学习曲线。验证损失比训练损失高得多,因此可能存在一些过拟合。
Step25: 评估和预测
Step26: 现在,您可以使用此模型来预测您的宠物是猫还是狗。
|
708 | <ASSISTANT_TASK:>
Python Code:
# from assocplots.misc import mock_data_generation
# data_m, data_w = mock_data_generation(M=100000, seed=42)
# data_m['pval'] /= 500000.*np.exp(-(data_m['pos']-10000.)**2/50000.0) * (data_m['chr']=='4') * np.random.rand(len(data_m)) + 1.
# Load standard libraries
import numpy as np
from pandas import DataFrame
from bokeh.plotting import figure, output_notebook, show, gridplot
from bokeh.models import ColumnDataSource, widgets, CustomJS
from bokeh.models.glyphs import Circle, Square
from bokeh.models import HoverTool
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn
from bokeh.io import output_file, show, vform, vplot, hplot
from assocplots.misc import mock_data_generation
data_m, data_w = mock_data_generation(M=100000, seed=42)
data_m['pval'] /= 500000.*np.exp(-(data_m['pos']-10000.)**2/50000.0) * (data_m['chr']=='4') * np.random.rand(len(data_m)) + 1.
# # Reading data
# data_m = np.genfromtxt('HIP_MEN_chr_pos_rs_pval.txt', dtype=None, names=['chr', 'pos', 'snp', 'pval'])
# data_w = np.genfromtxt('HIP_WOMEN_chr_pos_rs_pval.txt', dtype=None, names=['chr', 'pos', 'snp', 'pval'])
from assocplots.interactive import *
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex
# cut1, cut2, data = data_reduce(data_m, data_w, N=5000)
cut1, cut2, data = data_reduce_fast(data_m, data_w, N=1000)
# You can assign any color sequence:
colors = ['#1b9e77', "#d95f02", '#7570b3', '#e7298a']
# Or choose a matplotlib colormap and extract colors from it:
cmap = plt.get_cmap('viridis')
# Select points of the colormap
colors = [cmap(i) for i in [0.0,0.33,0.67,0.90]]
# Converting RGB color format into HEX
colors = [rgb2hex(colors[i]) for i in range(len(colors))]
p1,p2,p3,p4,pq1 = mann_only_interactive(data, cut1, cut2,
chrs_plot=[str(i) for i in range(1,23)],
ms=6, # size of markers
color_sequence = colors, # any number of colors for M-plot
)
# show(vplot(p1,p2))
# show(hplot(pq1,p4))
# show(p4)
from assocplots.htmloutput import *
write_to_html([p1,p2,pq1,p4], filename='output.html', title='Title')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The actual data that we will use comes from the GIANT consortium
Step2: Next, we will read in the data for two groups.
Step3: Lastly, we run the code to generate an interactive plot and save it as an html file, which can be opened in a browser.
|
709 | <ASSISTANT_TASK:>
Python Code:
assert range_overlap([ (0.0, 1.0) ]) == (0.0, 1.0)
assert range_overlap([ (2.0, 3.0), (2.0, 4.0) ]) == (2.0, 3.0)
assert range_overlap([ (0.0, 1.0), (0.0, 2.0), (-1.0, 1.0) ]) == (0.0, 1.0)
assert range_overlap([ (0.0, 1.0), (5.0, 6.0) ]) == ???
assert range_overlap([ (0.0, 1.0), (1.0, 2.0) ]) == ???
assert range_overlap([ (0.0, 1.0), (5.0, 6.0) ]) == None
assert range_overlap([ (0.0, 1.0), (1.0, 2.0) ]) == None
def range_overlap(ranges):
Return common overlap among a set of [left, right] ranges.
max_left = 0.0
min_right = 1.0
for (left, right) in ranges:
max_left = max(max_left, left)
min_right = min(min_right, right)
return (max_left, min_right)
def test_range_overlap():
assert range_overlap([ (0.0, 1.0), (5.0, 6.0) ]) == None
assert range_overlap([ (0.0, 1.0), (1.0, 2.0) ]) == None
assert range_overlap([ (0.0, 1.0) ]) == (0.0, 1.0)
assert range_overlap([ (2.0, 3.0), (2.0, 4.0) ]) == (2.0, 3.0)
assert range_overlap([ (0.0, 1.0), (0.0, 2.0), (-1.0, 1.0) ]) == (0.0, 1.0)
assert range_overlap([]) == None
test_range_overlap()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The error is actually reassuring
Step2: What should range_overlap do in this case
Step3: Do two segments that touch at their endpoints overlap or not? Mathematicians usually say “yes”, but engineers usually say “no”. The best answer is “whatever is most useful in the rest of our program”, but again, any actual implementation of range_overlap is going to do something, and whatever it is ought to be consistent with what it does when there’s no overlap at all.
Step5: Again, we get an error because we haven’t written our function, but we’re now ready to do so
Step6: Take a moment to think about why we calculate the left endpoint of the overlap as the maximum of the input left endpoints, and the overlap right endpoint as the minimum of the input right endpoints. We’d now like to re-run our tests, but they’re scattered across three different cells. To make running them easier, let’s put them all in a function
|
710 | <ASSISTANT_TASK:>
Python Code:
import smps
import seaborn as sns
import os
import matplotlib
import matplotlib.pyplot as plt
import json
%matplotlib inline
# You can use seaborn to easily control how your plots appear
sns.set('notebook', style='ticks', font_scale=1.5, palette='dark')
smps.set()
print ("smps v{}".format(smps.__version__))
print ("seaborn v{}".format(sns.__version__))
print ("matplotlib v{}".format(matplotlib.__version__))
bos = smps.io.load_sample("boston")
# bos.dn
print (json.dumps(bos.meta, indent=4))
# print out the first 4 bins
bos.bins[0:4]
# print out the midpoints
bos.midpoints
# Display the first few rows of the DataFrame
bos.data.head(3)
bos.stats(weight='number').head()
bos.scan_stats.head()
bos.resample("5min", inplace=True)
bos.data.head(3)
X = bos.dndlogdp.index
Y = bos.midpoints
Z = bos.dndlogdp.T.values
ax = smps.plots.heatmap(X, Y, Z, cmap='viridis', fig_kws=dict(figsize=(14, 6)))
# make the x axis dates look presentable
import matplotlib.dates as dates
ax.xaxis.set_minor_locator(dates.HourLocator(byhour=[0, 6, 12, 18]))
ax.xaxis.set_major_formatter(dates.DateFormatter("%d\n%b\n%Y"))
# Go ahead and change things!
ax.set_title("Cambridge, MA Wintertime SMPS Data", y=1.02, fontsize=20);
ax = smps.plots.histplot(bos.dndlogdp, bos.bins, plot_kws={'linewidth': .01}, fig_kws=dict(figsize=(12,6)))
ax.set_title("Cambridge, MA Wintertime Size Distribution")
ax.set_ylabel("$dN/dlogD_p \; [cm^{-3}]$")
sns.despine()
dates = ["2016-11-23", "2016-11-24", "2016-11-25"]
ax = None
for i, date in enumerate(dates):
color = sns.color_palette()[i]
plot_kws = dict(alpha=0.65, color=color, linewidth=0.)
ax = smps.plots.histplot(bos.dndlogdp[date], bos.bins, ax=ax, plot_kws=plot_kws, fig_kws=dict(figsize=(12, 6)))
# Add us a legend!
ax.legend(dates, loc='best')
ax.set_ylabel("$dN/dlogD_p \; [cm^{-3}]$")
# Remove the spines
sns.despine()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the Data into an SMPS object
Step2: Explore the SMPS Object
Step3: SMPS.bins and SMPS.midpoints
Step4: SMPS.histogram and SMPS.raw
Step5: SMPS.stats
Step6: We can go ahead and resample the data by mean if we would like to! Under the hood, this method splits the raw dataframe into numeric and non-numeric columns before resampling by mean the numeric columns and the non-numerics by 'first'. If inplace=True, then it will save the resampled data and replace the current raw dataframe. Otherwise, it will return a copy of the object.
Step7: Visualization
Step8: smps.plots.histplot(histogram, bins, ax=None, plot_kws=None, fig_kws=None, **kwargs)
Step9: Example 2
|
711 | <ASSISTANT_TASK:>
Python Code:
import NotebookImport
from IPython.display import clear_output
from HIV_Age_Advancement import *
from Setup.DX_Imports import *
import statsmodels.api as sm
import seaborn as sns
sns.set_context("paper", font_scale=1.7, rc={"lines.linewidth": 2.5})
sns.set_style("white")
fig, ax = subplots(figsize=(5,4))
plot_regression(a2, p2, ax=ax)
fig.tight_layout()
fig, ax = subplots(figsize=(5,4))
plot_regression(a2.ix[ti(labs['LLQ PLASMA'] != '>LLQ')], p2, ax=ax)
series_scatter(a2.ix[ti(labs['LLQ PLASMA'] == '>LLQ')], p2, color=colors[0],
ax=ax, ann=None)
fig.tight_layout()
fig, axs = subplots(1,3, figsize=(14,4), sharey=True)
age_at_dx = (clinical['estimated duration hiv (months)'] / 12.)
age_at_dx.name = 'age_at_dx'
series_scatter(age, age_at_dx.ix[duration.index], ax=axs[0])
violin_plot_pandas(duration[duration != 'Control'], age, ax=axs[1])
violin_plot_pandas(duration, age_at_dx, ax=axs[2])
for ax in axs:
prettify_ax(ax)
fig, axs = subplots(1,3, figsize=(14,4), sharey=True)
age_at_dx = age - (clinical['estimated duration hiv (months)'] / 12.)
age_at_dx.name = 'age_at_dx'
series_scatter(age, age_at_dx.ix[duration.index], ax=axs[0])
violin_plot_pandas(duration[duration != 'Control'], age, ax=axs[1])
violin_plot_pandas(duration, age_at_dx, ax=axs[2])
for ax in axs:
prettify_ax(ax)
age_advancement = (p2 - a2).ix[duration.index].dropna()
age_advancement.name = 'age_advancement'
reg = linear_regression(age, age_advancement)
age_adj = (age_advancement - age * reg['slope']).dropna()
age_adj = age_adj - reg.intercept
age_adj.name = 'age advancment (adjusted)'
fig, axs = subplots(1,2, figsize=(10,4), sharey=True)
series_scatter(age_advancement, age, ax=axs[0])
series_scatter(age_adj, age, ax=axs[1])
for ax in axs:
prettify_ax(ax)
fig.tight_layout()
residual = (pred_c - age).ix[duration.index]
residual.name = 'residual'
reg = linear_regression(age, residual)
resid_adj = (residual - age * reg['slope']).dropna()
resid_adj = resid_adj - reg.intercept
resid_adj.name = 'residual (adjusted)'
fig, axs = subplots(1,2, figsize=(10,4), sharey=True)
series_scatter(residual, age, ax=axs[0])
series_scatter(resid_adj, age, ax=axs[1])
for ax in axs:
prettify_ax(ax)
fig.tight_layout()
#r = p2 - a2
a,b,c = residual.groupby(duration)
sp.stats.bartlett(a[1].dropna(), c[1].dropna())
sp.stats.bartlett(a[1].dropna(), b[1].dropna(), c[1].dropna())
violin_plot_pandas(duration, p2 - a2)
l2 = (labs.ix[:, labs.dtypes.isin([dtype('int64'), dtype('float64')])]
.dropna(1, how='all'))
l3 = labs.ix[:, ti(labs.apply(lambda s: len(s.unique()), axis=0) < 6)]
spearman_pandas(residual, np.log2(l2['CD4/CD8 ratio']))
pearson_pandas(residual, np.log2(l2['CD4/CD8 ratio']))
spearman_pandas(residual.ix[ti(duration=='HIV Long')],
np.log2(l2['CD4/CD8 ratio']))
spearman_pandas(resid_adj.ix[ti(duration=='HIV Short')],
np.log2(l2['CD4/CD8 ratio']))
spearman_pandas(resid_adj, np.log2(l2['CD4/CD8 ratio']))
series_scatter(residual, np.log2(l2['CD4/CD8 ratio']))
l2 = (labs.ix[:, labs.dtypes.isin([dtype('int64'), dtype('float64')])]
.dropna(1, how='all'))
l3 = labs.ix[:, ti(labs.apply(lambda s: len(s.unique()), axis=0) < 6)]
keepers = labs.index.difference(['RG065','RG175','RG279','RA182','RM285'])
keepers = keepers.intersection(duration.index)
l2 = l2.ix[keepers]
l3 = l3.ix[keepers]
duration.name = 'duration'
violin_plot_pandas(combine(labs['LLQ PLASMA'] == '>LLQ', duration=='HIV Long'),
age, order=['neither','duration','both','LLQ PLASMA'])
violin_plot_pandas(combine(labs['LLQ PLASMA'] == '>LLQ', duration=='HIV Long'),
age_advancement, order=['neither','duration','both','LLQ PLASMA'])
series_scatter(np.log(labs['rnvalue PLASMA'][labs['LLQ PLASMA'] == '>LLQ']),
age_advancement)
screen_feature(age_advancement, pearson_pandas, l2.T, align=False).head()
bins = np.floor(age_advancement / 5.)
bins = bins.clip(-1,2)
spearman_pandas(bins, l2.MCV)
fig, axs = subplots(1,2, figsize=(6,4))
bins = np.floor(age_advancement / 5.)
bins = bins.clip(-1,2).map({-1: '< 0', 0:'0-5', 1:'5+', 2:'5+'})
box_plot_pandas(bins, l2.MCV, order=['< 0','0-5','5+'], ax=axs[0])
box_plot_pandas(bins, l2['age'], order=['< 0','0-5','5+'], ax=axs[1])
for ax in axs:
prettify_ax(ax)
fig.tight_layout()
fig, ax = subplots(figsize=(5,4))
series_scatter(age_advancement, l2.MCV, ax=ax, color=colors[3],
edgecolor='black')
prettify_ax(ax)
fig.tight_layout()
fig.savefig(FIGDIR + 'mcv_age_advancement.png', dpi=300)
screen_feature(age_advancement, spearman_pandas, cell_counts.T, align=False)
fig, ax = subplots(1,1, figsize=(4,3))
rr = cell_counts.NK
k = pred_c.index
hiv = duration != 'Control'
sns.regplot(*match_series(residual.ix[k], rr.ix[ti(hiv==0)]),
ax=ax, label='HIV+')
sns.regplot(*match_series(residual.ix[k], rr.ix[ti(hiv>0)]),
ax=ax, label='Control')
prettify_ax(ax)
age_adj.name = 'age_advancement'
hiv = (duration != 'Control').astype(float)
hiv.name = 'HIV'
age.name = 'bio_age'
duration_t = clinical['estimated duration hiv (months)'] / 12.
duration.name = 'duration'
monocytes = labs['Monocyte %']
monocytes.name = 'monocytes'
df = process_factors([age_advancement, duration, age, age_at_dx,
l2.MCV, l2.MCH, cell_counts.NK, cell_counts.CD4T,
monocytes], standardize=True)
fmla = robjects.Formula('age_advancement ~ bio_age + MCV + NK')
m = robjects.r.lm(fmla, df)
s = robjects.r.summary(m)
print '\n\n'.join(str(s).split('\n\n')[-3:])
hiv = (duration != 'Control').astype(float)
hiv.name = 'HIV'
age.name = 'chron_age'
pred_c.name = 'bio_age'
hiv = (duration != 'Control').astype(float)
hiv.name = 'HIV'
df = process_factors([residual, hiv, age, cell_counts.NK, cell_counts.CD4T,
cell_counts.CD8T, cell_counts.Bcell, cell_counts.Mono,
cell_counts.Gran], standardize=False)
fmla = robjects.Formula('residual ~ chron_age + HIV + NK + CD4T + CD8T + '
'Bcell + Mono + Gran')
m = robjects.r.lm(fmla, df)
s = robjects.r.summary(m)
print '\n\n'.join(str(s).split('\n\n')[-3:])
hiv = (duration != 'Control').astype(float)
hiv.name = 'HIV'
df = process_factors([residual, hiv, pred_c, age, cell_counts.NK,
cell_counts.CD4T, cell_counts.CD8T,
cell_counts.Bcell, cell_counts.Mono,
cell_counts.Gran], standardize=False)
fmla = robjects.Formula('residual ~ bio_age + HIV + NK')
m = robjects.r.lm(fmla, df)
s = robjects.r.summary(m)
print '\n\n'.join(str(s).split('\n\n')[-3:])
hiv = (duration != 'Control').astype(float)
hiv.name = 'HIV'
age.name = 'chron_age'
pred_c.name = 'bio_age'
df = process_factors([residual, hiv, pred_c, age, cell_counts.NK,
cell_counts.CD4T, cell_counts.CD8T,
cell_counts.Bcell, cell_counts.Mono,
cell_counts.Gran])
fmla = robjects.Formula('bio_age ~ chron_age + NK + CD4T + CD8T + '
'Bcell + Mono + Gran')
m = robjects.r.lm(fmla, df)
s = robjects.r.summary(m)
print '\n\n'.join(str(s).split('\n\n')[-3:])
1.4299 / 2.3176
hiv = (duration != 'Control').astype(float)
hiv.name = 'HIV'
age.name = 'chron_age'
pred_c.name = 'bio_age'
df = process_factors([residual, hiv, pred_c, age, cell_counts.NK,
cell_counts.CD4T, cell_counts.CD8T,
cell_counts.Bcell, cell_counts.Mono,
cell_counts.Gran])
fmla = robjects.Formula('residual ~ chron_age + NK + CD4T + CD8T + '
'Bcell + Mono + Gran')
m = robjects.r.lm(fmla, df)
s = robjects.r.summary(m)
print '\n\n'.join(str(s).split('\n\n')[-3:])
rmse = lambda v: (v ** 2).mean() ** .5
v = robjects.r.residuals(m)
r2 = pd.Series(pandas2ri.ri2py(v), index=list(v.names[0]))
r2.name = 'residual'
hiv = (duration != 'Control').astype(float)
hiv.name = 'HIV'
df = process_factors([r2, hiv, pred_c, cell_counts.NK, cell_counts.CD4T,
cell_counts.CD8T, cell_counts.Bcell, cell_counts.Mono,
cell_counts.Gran])
fmla = robjects.Formula('residual ~ HIV')
m1 = robjects.r.lm(fmla, df)
s = robjects.r.summary(m1)
print '\n\n'.join(str(s).split('\n\n')[-3:])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Looking at Predicted Time of Onset
Step2: Interestingly a lot of the paitents off the diagnonal in the recently diagnosed group have detectable HIV rna in the blood plasma.
Step3: Further inspection of age
Step4: Adjust out the age effect in age advancement
Step5: Look at Confounders
Step6: Cell composition from mixture model estimates
Step7: While we see a significant effect of NK cell concentration with increasing age advancment, this does not seem to be specific to HIV+ patients.
Step8: Multivariate modeling of confounders
Step9: Modeling residuals of aging model with HIV and cell composition
Step10: Looking at residuals of model fit with cell composition
|
712 | <ASSISTANT_TASK:>
Python Code:
import torch
x = torch.empty(5, 3)
print(x)
type(x)
x = torch.rand(5, 3)
print(x)
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
x = torch.tensor([5.5, 3])
print(x)
x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes
print(x)
x = torch.randn_like(x, dtype=torch.float) # override dtype!
print(x) # result has the same size
print(x.size())
y = torch.rand(5, 3)
print(x + y)
print(torch.add(x, y))
result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)
# adds x to y
y.add_(x)
print(y)
print(x[:, 1])
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # the size -1 is inferred from other dimensions
print(x.size(), y.size(), z.size())
x = torch.randn(1)
print(x)
print(x.item())
a = torch.ones(5)
print(a)
b = a.numpy()
print(b)
a.add_(1)
print(a)
print(b)
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b)
# let us run this cell only if CUDA is available
# We will use ``torch.device`` objects to move tensors in and out of GPU
if torch.cuda.is_available():
device = torch.device("cuda") # a CUDA device object
y = torch.ones_like(x, device=device) # directly create a tensor on GPU
x = x.to(device) # or just use strings ``.to("cuda")``
z = x + y
print(z)
print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together!
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <div class="alert alert-info"><h4>Note</h4><p>An uninitialized matrix is declared,
Step2: Construct a randomly initialized matrix
Step3: Construct a matrix filled zeros and of dtype long
Step4: Construct a tensor directly from data
Step5: or create a tensor based on an existing tensor. These methods
Step6: Get its size
Step7: <div class="alert alert-info"><h4>Note</h4><p>``torch.Size`` is in fact a tuple, so it supports all tuple operations.</p></div>
Step8: Addition
Step9: Addition
Step10: Addition
Step11: <div class="alert alert-info"><h4>Note</h4><p>Any operation that mutates a tensor in-place is post-fixed with an ``_``.
Step12: Resizing
Step13: If you have a one element tensor, use .item() to get the value as a
Step14: Read later
Step15: See how the numpy array changed in value.
Step16: Converting NumPy Array to Torch Tensor
Step17: All the Tensors on the CPU except a CharTensor support converting to
|
713 | <ASSISTANT_TASK:>
Python Code:
from IPython.display import HTML # Allows us to embed HTML into our notebook.
HTML('<iframe width="800" height="400" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=a%20%3D%20%5B1,%203,%205%5D%0Ab%20%3D%20a%0Aprint%28%22a%20%3D%20%7B0%7D%20and%20has%20id%20%7B1%7D%22.format%28a,%20id%28a%29%29%29%0Aprint%28%22b%20%3D%20%7B0%7D%20and%20has%20id%20%7B1%7D%22.format%28b,%20id%28b%29%29%29%0Aprint%28%22Is%20b%20a%3F%20%7B0%7D%22.format%28b%20is%20a%29%29%0A%0Aa.append%287%29%0Aprint%28%22a%20%3D%20%7B%7D%22.format%28a%29%29%0Aprint%28%22b%20%3D%20%7B%7D%22.format%28b%29%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=false&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe>')
HTML('<iframe width="1000" height="500" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=a%20%3D%20%5B2,%203,%204%5D%0Ac1%20%3D%202.0**2.0%0Ac2%20%3D%20%5Bi**2.0%20for%20i%20in%20a%5D%0Aprint%28c2%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=false&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe>')
HTML('<iframe width="1000" height="500" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=s%20%3D%20\'The%20lost%20world...\'%0Alen_of_s%20%3D%20len%28s%29%0Amy_len%20%3D%20len%0Amy_len_of_s%20%3D%20my_len%28s%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=false&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe>')
HTML('<iframe width="1000" height="500" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=def%20check_oddness%28x%29%3A%0A%20%20%20%20if%20x%252%20%3D%3D%200%3A%0A%20%20%20%20%20%20%20%20return%20x%20/%202.0%0A%20%20%20%20else%3A%0A%20%20%20%20%20%20%20%20return%20%28x%20-%201.0%29%20/%202.0%0A%0Aa%20%3D%206.0%0An1%20%3D%20check_oddness%28a%29%0A%0Ab%20%3D%2015.0%0An2%20%3D%20check_oddness%28b%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=false&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe>')
def f(x):
print(id(x))
d={'a':17.0, 'b':35.0}
print(id(d))
f(d)
import numpy as np
c = 5000.0
def do_integral(function):
c = 13.0
# Some algorithm for carrying out an integration
print(c)
x = np.linspace(-1.0, 1.0, 100)
y = x * x
do_integral(y)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: So what is going on? Well, Python variables are reference variables. You could say "the variable a (b) is assigned to a list" rather than "the list is assigned to the variable a (b)".
Step2: Functions and Environments
Step3: Defining your own environment
Step4: Model of Evaluation
Step5: A few more comments
|
714 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nims-kma', 'sandbox-2', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
715 | <ASSISTANT_TASK:>
Python Code:
import pypsa, os
import numpy as np
network = pypsa.examples.scigrid_de(from_master=True)
for line_name in ["316", "527", "602"]:
network.lines.loc[line_name, "s_nom"] = 1200
now = network.snapshots[0]
branch_outages = network.lines.index[:15]
network.sclopf(now, branch_outages=branch_outages, solver_name="cbc")
network.generators_t.p_set = network.generators_t.p_set.reindex(
columns=network.generators.index
)
network.generators_t.p_set.loc[now] = network.generators_t.p.loc[now]
network.storage_units_t.p_set = network.storage_units_t.p_set.reindex(
columns=network.storage_units.index
)
network.storage_units_t.p_set.loc[now] = network.storage_units_t.p.loc[now]
p0_test = network.lpf_contingency(now, branch_outages=branch_outages)
p0_test
max_loading = (
abs(p0_test.divide(network.passive_branches().s_nom, axis=0)).describe().loc["max"]
)
max_loading
np.allclose(max_loading, np.ones((len(max_loading))))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There are some infeasibilities without line extensions.
Step2: Performing security-constrained linear OPF
Step3: For the PF, set the P to the optimised P.
Step4: Check no lines are overloaded with the linear contingency analysis
Step5: Check loading as per unit of s_nom in each contingency
|
716 | <ASSISTANT_TASK:>
Python Code:
Image("res4.gif")
import numpy as np
import matplotlib.pyplot as plt
import pymc3 as pm
import pandas as pd
import seaborn as sns
sns.set()
%matplotlib inline
## setup the model
# these are the values and precision of each
Datasheets = {'R1':(6.0, 0.01),
'R2':(8.0, 0.01),
'R3':(4.0, 0.01),
'R4':(12.0, 0.05),
'V1':(6.0, 0.01),} # 1% on the 12V power supply
with pm.Model() as model:
BoundedNormal = pm.Bound(pm.Normal, lower=0.0)
# http://docs.pymc.io/api/distributions/continuous.html#pymc3.distributions.continuous.Normal
# in Bayes world these are considered prior distributions, they are based on previous information
# that is gained in some other manner, from the datasheet in this case.
R1 = BoundedNormal('R1', mu=Datasheets['R1'][0], sd=Datasheets['R1'][0]*Datasheets['R1'][1])
R2 = BoundedNormal('R2', mu=Datasheets['R2'][0], sd=Datasheets['R2'][0]*Datasheets['R2'][1])
R3 = BoundedNormal('R3', mu=Datasheets['R3'][0], sd=Datasheets['R3'][0]*Datasheets['R3'][1])
R4 = BoundedNormal('R4', mu=Datasheets['R4'][0], sd=Datasheets['R4'][0]*Datasheets['R4'][1])
# don't bound the voltage as negative is possilbe
V1 = pm.Normal('V1', mu=Datasheets['V1'][0], sd=Datasheets['V1'][0]*Datasheets['V1'][1])
# Match should all be done on paper first to get the full answer, but we will do steps here because one can.
# all at once would be much faster.
# just add them, we will not get info on R2_3 at the output unless we wrap them in pm.Deterministic
R2_3 = R2+R3
# R2_3 = pm.Deterministic('R2_3', R2+R3)
# now get the resistance answer, and we want details
R_eff = pm.Deterministic('R_eff', 1/(1/R2_3 + 1/R4))
# total current is then just I=V/R
I_t = pm.Deterministic('I_t', V1/R_eff)
# and I_1 and I_2
I_1 = pm.Deterministic('I_1', I_t*R2_3/R4)
I_2 = pm.Deterministic('I_2', I_t-I_1)
# makes it all a bit clearner to start in a good place
start = pm.find_MAP()
# run a fair number of samples, I have a 8 core machine so run 6
trace = pm.sample(5000, start=start, njobs=6)
I_t_perc = np.percentile(trace['I_t'], (2.5, 97.5))
I_t_mean = trace['I_t'].mean()
print('I_t = {:.4} +/- {:.4}'.format(I_t_mean, I_t_perc[1]-I_t_mean))
print('I_t = {:.4} +/- {:.4}%'.format(I_t_mean, (I_t_perc[1]-I_t_mean)/I_t_mean*100))
pm.summary(trace, varnames=('R_eff', 'I_t', 'I_1', 'I_2'))
pm.traceplot(trace, combined=True, varnames=('R_eff', 'I_t'));
# setup the model
# these are the values and precision of each
Datasheets = {'R1':(6.0, 0.01),
'R2':(8.0, 0.01),
'R3':(4.0, 0.01),
'R4':(12.0, 0.05),
'V1':(6.0, 0.05),} # 5% on the 12V power supply
measuremnts_R1 = [5.987, 5.987] # we measured it twice
with pm.Model() as model:
# http://docs.pymc.io/api/distributions/continuous.html#pymc3.distributions.continuous.Normal
# in Bayes world these are considered prior distributions, they are based on previous information
# that is gained in some other manner, from the datasheet in this case.
# R1 = pm.Normal('R1', mu=Datasheets['R1'][0], sd=Datasheets['R1'][0]*Datasheets['R1'][1])
R2 = pm.Normal('R2', mu=Datasheets['R2'][0], sd=Datasheets['R2'][0]*Datasheets['R2'][1])
R3 = pm.Normal('R3', mu=Datasheets['R3'][0], sd=Datasheets['R3'][0]*Datasheets['R3'][1])
R4 = pm.Normal('R4', mu=Datasheets['R4'][0], sd=Datasheets['R4'][0]*Datasheets['R4'][1])
# don't bound the voltage as negative is possilbe
V1 = pm.Normal('V1', mu=Datasheets['V1'][0], sd=Datasheets['V1'][0]*Datasheets['V1'][1])
# so on R1 we took some measurments and so have to build it a bit differently
# use the datasheet for prior
R1_mean = pm.Normal('R1_mean', mu=Datasheets['R1'][0], sd=Datasheets['R1'][0]*Datasheets['R1'][1])
R1 = pm.Normal('R1', mu=R1_mean, sd=Datasheets['R1'][0]*Datasheets['R1'][1], observed=measuremnts_R1)
# Match should all be done on paper first to get the full answer, but we will do steps here because one can.
# all at once would be much faster.
# just add them, we will not get info on R2_3 at the output unless we wrap them in pm.Deterministic
R2_3 = R2+R3
# R2_3 = pm.Deterministic('R2_3', R2+R3)
# now get the resistance answer, and we want details
R_eff = pm.Deterministic('R_eff', 1/(1/R2_3 + 1/R4))
# total current is then just I=V/R
I_t = pm.Deterministic('I_t', V1/R_eff)
# and I_1 and I_2
I_1 = pm.Deterministic('I_1', I_t*R2_3/R4)
I_2 = pm.Deterministic('I_2', I_t-I_1)
# makes it all a bit clearner to start in a good place
start = pm.find_MAP()
# run a fair number of samples, I have a 8 core machine so run 6
trace = pm.sample(5000, start=start, njobs=6)
pm.summary(trace, varnames=('R_eff', 'I_t', 'I_1', 'I_2'))
pm.traceplot(trace, combined=True, varnames=('R_eff', 'I_t'));
I_t_perc = np.percentile(trace['I_t'], (2.5, 97.5))
I_t_mean = trace['I_t'].mean()
print('I_t = {:.4} +/- {:.4}'.format(I_t_mean, I_t_perc[1]-I_t_mean))
print('I_t = {:.4} +/- {:.4}%'.format(I_t_mean, (I_t_perc[1]-I_t_mean)/I_t_mean*100))
# setup the model
# these are the values and precision of each
Datasheets = {'R1':(6.0, 0.01),
'R2':(8.0, 0.01),
'R3':(4.0, 0.01),
'R4':(12.0, 0.01), # better resistor
'V1':(6.0, 0.05),} # 5% on the 12V power supply
with pm.Model() as model:
BoundedNormal = pm.Bound(pm.Normal, lower=0.0)
# http://docs.pymc.io/api/distributions/continuous.html#pymc3.distributions.continuous.Normal
# in Bayes world these are considered prior distributions, they are based on previous information
# that is gained in some other manner, from the datasheet in this case.
R1 = BoundedNormal('R1', mu=Datasheets['R1'][0], sd=Datasheets['R1'][0]*Datasheets['R1'][1])
R2 = BoundedNormal('R2', mu=Datasheets['R2'][0], sd=Datasheets['R2'][0]*Datasheets['R2'][1])
R3 = BoundedNormal('R3', mu=Datasheets['R3'][0], sd=Datasheets['R3'][0]*Datasheets['R3'][1])
R4 = BoundedNormal('R4', mu=Datasheets['R4'][0], sd=Datasheets['R4'][0]*Datasheets['R4'][1])
# don't bound the voltage as negative is possilbe
V1 = pm.Normal('V1', mu=Datasheets['V1'][0], sd=Datasheets['V1'][0]*Datasheets['V1'][1])
# Match should all be done on paper first to get the full answer, but we will do steps here because one can.
# all at once would be much faster.
# just add them, we will not get info on R2_3 at the output unless we wrap them in pm.Deterministic
R2_3 = R2+R3
# R2_3 = pm.Deterministic('R2_3', R2+R3)
# now get the resistance answer, and we want details
R_eff = pm.Deterministic('R_eff', 1/(1/R2_3 + 1/R4))
# total current is then just I=V/R
I_t = pm.Deterministic('I_t', V1/R_eff)
# and I_1 and I_2
I_1 = pm.Deterministic('I_1', I_t*R2_3/R4)
I_2 = pm.Deterministic('I_2', I_t-I_1)
# makes it all a bit clearner to start in a good place
start = pm.find_MAP()
# run a fair number of samples, I have a 8 core machine so run 6
trace = pm.sample(5000, start=start, njobs=6)
I_t_perc = np.percentile(trace['I_t'], (2.5, 97.5))
I_t_mean = trace['I_t'].mean()
print('I_t = {:.4} +/- {:.4}'.format(I_t_mean, I_t_perc[1]-I_t_mean))
print('I_t = {:.4} +/- {:.4}%'.format(I_t_mean, (I_t_perc[1]-I_t_mean)/I_t_mean*100))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For this simple circuit the questions are
Step2: Components
Step3: Explore and explain the results
Step4: We also good visual diagnostics in terms of traceplots. The right side shows the draws, this should always look like hash, the left side is the Kernel Density Esimator of the output distribution. For this problem it should look pretty darn Normal.
Step5: Now lets say that we made some measurments of R1 and we can use those to contrain things
Step6: Results
Step7: How about we buy a better R4?
|
717 | <ASSISTANT_TASK:>
Python Code:
from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)
def encode_text_dummy(df,name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = "{}-{}".format(name,x)
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
# Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue).
def encode_text_index(df,name):
le = preprocessing.LabelEncoder()
df[name] = le.fit_transform(df[name])
return le.classes_
# Encode a numeric column as zscores
def encode_numeric_zscore(df,name,mean=None,sd=None):
if mean is None:
mean = df[name].mean()
if sd is None:
sd = df[name].std()
df[name] = (df[name]-mean)/sd
# Convert all missing values in the specified column to the median
def missing_median(df, name):
med = df[name].median()
df[name] = df[name].fillna(med)
# Convert a Pandas dataframe to the x,y inputs that TensorFlow needs
def to_xy(df,target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column. Is it really this hard? :(
target_type = df[target].dtypes
target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
# Classification
return df.as_matrix(result).astype(np.float32),df.as_matrix([target]).astype(np.int32)
else:
# Regression
return df.as_matrix(result).astype(np.float32),df.as_matrix([target]).astype(np.float32)
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
# Plot a confusion matrix.
# cm is the confusion matrix, names are the names of the classes.
def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(names))
plt.xticks(tick_marks, names, rotation=45)
plt.yticks(tick_marks, names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Plot an ROC. pred - the predictions, y - the expected output.
def plot_roc(pred,y):
fpr, tpr, _ = roc_curve(y_test, pred)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC)')
plt.legend(loc="lower right")
plt.show()
# Plot a lift curve. pred - the predictions, y - the expected output.
def chart_regression(pred,y):
t = pd.DataFrame({'pred' : pred.flatten(), 'y' : y_test.flatten()})
t.sort_values(by=['y'],inplace=True)
a = plt.plot(t['y'].tolist(),label='expected')
b = plt.plot(t['pred'].tolist(),label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
import os
import pandas as pd
from sklearn.cross_validation import train_test_split
import tensorflow.contrib.learn as skflow
import numpy as np
from sklearn import metrics
path = "./data/"
filename = os.path.join(path,"wcbreast_wdbc.csv")
df = pd.read_csv(filename,na_values=['NA','?'])
# Encode feature vector
df.drop('id',axis=1,inplace=True)
encode_numeric_zscore(df,'mean_radius')
encode_text_index(df,'mean_texture')
encode_text_index(df,'mean_perimeter')
encode_text_index(df,'mean_area')
encode_text_index(df,'mean_smoothness')
encode_text_index(df,'mean_compactness')
encode_text_index(df,'mean_concavity')
encode_text_index(df,'mean_concave_points')
encode_text_index(df,'mean_symmetry')
encode_text_index(df,'mean_fractal_dimension')
encode_text_index(df,'se_radius')
encode_text_index(df,'se_texture')
encode_text_index(df,'se_perimeter')
encode_text_index(df,'se_area')
encode_text_index(df,'se_smoothness')
encode_text_index(df,'se_compactness')
encode_text_index(df,'se_concavity')
encode_text_index(df,'se_concave_points')
encode_text_index(df,'se_symmetry')
encode_text_index(df,'se_fractal_dimension')
encode_text_index(df,'worst_radius')
encode_text_index(df,'worst_texture')
encode_text_index(df,'worst_perimeter')
encode_text_index(df,'worst_area')
encode_text_index(df,'worst_smoothness')
encode_text_index(df,'worst_compactness')
encode_text_index(df,'worst_concavity')
encode_text_index(df,'worst_concave_points')
encode_text_index(df,'worst_symmetry')
encode_text_index(df,'worst_fractal_dimension')
diagnosis = encode_text_index(df,'diagnosis')
num_classes = len(diagnosis)
# Create x & y for training
# Create the x-side (feature vectors) of the training
x, y = to_xy(df,'diagnosis')
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
# Create a deep neural network with 3 hidden layers of 10, 20, 10
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=num_classes,
steps=10000)
# Early stopping
early_stop = skflow.monitors.ValidationMonitor(x_test, y_test,
early_stopping_rounds=200, print_steps=50, n_classes=num_classes)
# Fit/train neural network
classifier.fit(x_train, y_train, early_stop)
# Measure accuracy
score = metrics.accuracy_score(y, classifier.predict(x))
print("Final accuracy: {}".format(score))
import numpy as np
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
pred = classifier.predict(x_test)
# Compute confusion matrix
cm = confusion_matrix(y_test, pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm, diagnosis)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, diagnosis, title='Normalized confusion matrix')
plt.show()
pred = classifier.predict_proba(x_test)
pred = pred[:,1] # Only positive cases
# print(pred[:,1])
plot_roc(pred,y_test)
import os
import pandas as pd
from sklearn.cross_validation import train_test_split
import tensorflow.contrib.learn as skflow
import numpy as np
path = "./data/"
filename = os.path.join(path,"iris.csv")
df = pd.read_csv(filename,na_values=['NA','?'])
# Encode feature vector
encode_numeric_zscore(df,'petal_w')
encode_numeric_zscore(df,'petal_l')
encode_numeric_zscore(df,'sepal_w')
encode_numeric_zscore(df,'sepal_l')
species = encode_text_index(df,"species")
num_classes = len(species)
# Create x & y for training
# Create the x-side (feature vectors) of the training
x, y = to_xy(df,'species')
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=45)
# as much as I would like to use 42, it gives a perfect result, and a boring confusion matrix!
# Create a deep neural network with 3 hidden layers of 10, 20, 10
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=num_classes,
steps=10000)
# Early stopping
early_stop = skflow.monitors.ValidationMonitor(x_test, y_test,
early_stopping_rounds=200, print_steps=50, n_classes=num_classes)
# Fit/train neural network
classifier.fit(x_train, y_train, early_stop)
import numpy as np
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
pred = classifier.predict(x_test)
# Compute confusion matrix
cm = confusion_matrix(y_test, pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm, species)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, species, title='Normalized confusion matrix')
plt.show()
import tensorflow.contrib.learn as skflow
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
path = "./data/"
filename_read = os.path.join(path,"auto-mpg.csv")
df = pd.read_csv(filename_read,na_values=['NA','?'])
# create feature vector
missing_median(df, 'horsepower')
df.drop('name',1,inplace=True)
encode_numeric_zscore(df, 'horsepower')
encode_numeric_zscore(df, 'weight')
encode_numeric_zscore(df, 'cylinders')
encode_numeric_zscore(df, 'displacement')
encode_numeric_zscore(df, 'acceleration')
encode_text_dummy(df, 'origin')
# Encode to a 2D matrix for training
x,y = to_xy(df,['mpg'])
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
# Create a deep neural network with 3 hidden layers of 50, 25, 10
regressor = skflow.TensorFlowDNNRegressor(hidden_units=[50, 25, 10], steps=5000)
# Early stopping
early_stop = skflow.monitors.ValidationMonitor(x_test, y_test,
early_stopping_rounds=200, print_steps=50)
# Fit/train neural network
regressor.fit(x_train, y_train, early_stop)
pred = regressor.predict(x_test)
chart_regression(pred,y_test)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Toolkit
Step2: Binary Classification
Step3: Confusion Matrix
Step4: The above two confusion matrixes show the same network. The bottom (normalized) is the type you will normally see. Notice the two labels. The label "B" means benign (no cancer) and the label "M" means malignant (cancer). The left-right (x) axis are the predictions, the top-bottom) are the expected outcomes. A perfect model (that never makes an error) has a dark blue diagonal that runs from top-left to bottom-right.
Step5: Classification
Step6: See the strong diagonal? Iris is easy. See the light blue near the bottom? Sometimes virginica is confused for versicolor.
|
718 | <ASSISTANT_TASK:>
Python Code:
# change these to try this notebook out
PROJECT = 'munn-sandbox'
BUCKET = 'munn-sandbox'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['TFVERSION'] = '2.1'
import shutil
import pandas as pd
import tensorflow as tf
from google.cloud import bigquery
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow_hub import KerasLayer
from tensorflow.keras.layers import Dense, Input, Lambda
from tensorflow.keras.models import Model
print(tf.__version__)
%matplotlib inline
DATASET_NAME = "titles_full.csv"
COLUMNS = ['title', 'source']
titles_df = pd.read_csv(DATASET_NAME, header=None, names=COLUMNS)
titles_df.head()
CLASSES = {
'github': 0,
'nytimes': 1,
'techcrunch': 2
}
N_CLASSES = len(CLASSES)
def encode_labels(sources):
classes = [CLASSES[source] for source in sources]
one_hots = to_categorical(classes, num_classes=N_CLASSES)
return one_hots
encode_labels(titles_df.source[:4])
N_TRAIN = int(len(titles_df) * 0.80)
titles_train, sources_train = (
titles_df.title[:N_TRAIN], titles_df.source[:N_TRAIN])
titles_valid, sources_valid = (
titles_df.title[N_TRAIN:], titles_df.source[N_TRAIN:])
X_train, Y_train = titles_train.values, encode_labels(sources_train)
X_valid, Y_valid = titles_valid.values, encode_labels(sources_valid)
X_train[:3]
SWIVEL = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1"
swivel_module = KerasLayer(SWIVEL, output_shape=[20], input_shape=[], dtype=tf.string, trainable=True)
def build_model(hub_module, model_name):
inputs = Input(shape=[], dtype=tf.string, name="text")
module = hub_module(inputs)
h1 = Dense(16, activation='relu', name="h1")(module)
outputs = Dense(N_CLASSES, activation='softmax', name='outputs')(h1)
model = Model(inputs=inputs, outputs=[outputs], name=model_name)
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
def train_and_evaluate(train_data, val_data, model, batch_size=5000):
tf.random.set_seed(33)
X_train, Y_train = train_data
history = model.fit(
X_train, Y_train,
epochs=100,
batch_size=batch_size,
validation_data=val_data,
callbacks=[EarlyStopping()],
)
return history
txtcls_model = build_model(swivel_module, model_name='txtcls_swivel')
txtcls_model.summary()
# set up train and validation data
train_data = (X_train, Y_train)
val_data = (X_valid, Y_valid)
txtcls_history = train_and_evaluate(train_data, val_data, txtcls_model)
history = txtcls_history
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
pd.DataFrame(history.history)[['accuracy', 'val_accuracy']].plot()
txtcls_model.predict(x=["YouTube introduces Video Chapters to make it easier to navigate longer videos"])
tf.saved_model.save(txtcls_model, './txtcls_swivel/')
!saved_model_cli show \
--tag_set serve \
--signature_def serving_default \
--dir ./txtcls_swivel/
@tf.function(input_signature=[tf.TensorSpec([None], dtype=tf.string)])
def source_name(text):
labels = tf.constant(['github', 'nytimes', 'techcrunch'], dtype=tf.string)
probs = txtcls_model(text, training=False)
indices = tf.argmax(probs, axis=1)
pred_source = tf.gather(params=labels, indices=indices)
pred_confidence = tf.reduce_max(probs, axis=1)
return {'source': pred_source,
'confidence': pred_confidence}
shutil.rmtree('./txtcls_swivel', ignore_errors=True)
txtcls_model.save('./txtcls_swivel', signatures={'serving_default': source_name})
!saved_model_cli show \
--tag_set serve \
--signature_def serving_default \
--dir ./txtcls_swivel/
title1 = "House Passes Sweeping Policing Bill Targeting Racial Bias and Use of Force"
title2 = "YouTube introduces Video Chapters to make it easier to navigate longer videos"
title3 = "As facebook turns 10 zuckerberg wants to change how tech industry works"
restored = tf.keras.models.load_model('./txtcls_swivel')
infer = restored.signatures['serving_default']
outputs = infer(text=tf.constant([title1, title2, title3]))
print(outputs['source'].numpy())
print(outputs['confidence'].numpy())
%%bash
MODEL_NAME="txtcls"
MODEL_VERSION="swivel"
MODEL_LOCATION="./txtcls_swivel/"
gcloud ai-platform models create ${MODEL_NAME}
gcloud ai-platform versions create ${MODEL_VERSION} \
--model ${MODEL_NAME} \
--origin ${MODEL_LOCATION} \
--staging-bucket gs://${BUCKET} \
--runtime-version=2.1
%load_ext google.cloud.bigquery
%%bigquery --project $PROJECT
SELECT * FROM `txtcls_eval.swivel`
%%writefile input.json
{"text": "YouTube introduces Video Chapters to make it easier to navigate longer videos"}
!gcloud ai-platform predict \
--model txtcls \
--json-instances input.json \
--version swivel
%%writefile input.json
{"text": "A Filmmaker Put Away for Tax Fraud Takes Us Inside a British Prison"}
!gcloud ai-platform predict \
--model txtcls \
--json-instances input.json \
--version swivel
%%writefile input.json
{"text": "A native Mac app wrapper for WhatsApp Web"}
!gcloud ai-platform predict \
--model txtcls \
--json-instances input.json \
--version swivel
%%writefile input.json
{"text": "Astronauts Dock With Space Station After Historic SpaceX Launch"}
!gcloud ai-platform predict \
--model txtcls \
--json-instances input.json \
--version swivel
%%writefile input.json
{"text": "House Passes Sweeping Policing Bill Targeting Racial Bias and Use of Force"}
!gcloud ai-platform predict \
--model txtcls \
--json-instances input.json \
--version swivel
%%writefile input.json
{"text": "Scrollability"}
!gcloud ai-platform predict \
--model txtcls \
--json-instances input.json \
--version swivel
%%writefile input.json
{"text": "iOS 14 lets deaf users set alerts for important sounds, among other clever accessibility perks"}
!gcloud ai-platform predict \
--model txtcls \
--json-instances input.json \
--version swivel
%%bigquery --project $PROJECT
SELECT * FROM `txtcls_eval.swivel`
%%bigquery --project $PROJECT
UPDATE `txtcls_eval.swivel`
SET
groundtruth = '{"predictions": [{"source": "techcrunch"}]}'
WHERE
raw_data = '{"instances": [{"text": "YouTube introduces Video Chapters to make it easier to navigate longer videos"}]}';
%%bigquery --project $PROJECT
UPDATE `txtcls_eval.swivel`
SET
groundtruth = '{"predictions": [{"source": "nytimes"}]}'
WHERE
raw_data = '{"instances": [{"text": "A Filmmaker Put Away for Tax Fraud Takes Us Inside a British Prison"}]}';
%%bigquery --project $PROJECT
UPDATE `txtcls_eval.swivel`
SET
groundtruth = '{"predictions": [{"source": "github"}]}'
WHERE
raw_data = '{"instances": [{"text": "A native Mac app wrapper for WhatsApp Web"}]}';
%%bigquery --project $PROJECT
UPDATE `txtcls_eval.swivel`
SET
groundtruth = '{"predictions": [{"source": "nytimes"}]}'
WHERE
raw_data = '{"instances": [{"text": "Astronauts Dock With Space Station After Historic SpaceX Launch"}]}';
%%bigquery --project $PROJECT
UPDATE `txtcls_eval.swivel`
SET
groundtruth = '{"predictions": [{"source": "nytimes"}]}'
WHERE
raw_data = '{"instances": [{"text": "House Passes Sweeping Policing Bill Targeting Racial Bias and Use of Force"}]}';
%%bigquery --project $PROJECT
UPDATE `txtcls_eval.swivel`
SET
groundtruth = '{"predictions": [{"source": "github"}]}'
WHERE
raw_data = '{"instances": [{"text": "Scrollability"}]}';
%%bigquery --project $PROJECT
UPDATE `txtcls_eval.swivel`
SET
groundtruth = '{"predictions": [{"source": "techcrunch"}]}'
WHERE
raw_data = '{"instances": [{"text": "iOS 14 lets deaf users set alerts for important sounds, among other clever accessibility perks"}]}';
%%bigquery --project $PROJECT
SELECT * FROM `txtcls_eval.swivel`
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.metrics import classification_report
%%bigquery --project $PROJECT
SELECT
model,
model_version,
time,
REGEXP_EXTRACT(raw_data, r'.*"text": "(.*)"') AS text,
REGEXP_EXTRACT(raw_prediction, r'.*"source": "(.*?)"') AS prediction,
REGEXP_EXTRACT(raw_prediction, r'.*"confidence": (0.\d{2}).*') AS confidence,
REGEXP_EXTRACT(groundtruth, r'.*"source": "(.*?)"') AS groundtruth,
FROM
`txtcls_eval.swivel`
query = '''
SELECT
model,
model_version,
time,
REGEXP_EXTRACT(raw_data, r'.*"text": "(.*)"') AS text,
REGEXP_EXTRACT(raw_prediction, r'.*"source": "(.*?)"') AS prediction,
REGEXP_EXTRACT(raw_prediction, r'.*"confidence": (0.\d{2}).*') AS confidence,
REGEXP_EXTRACT(groundtruth, r'.*"source": "(.*?)"') AS groundtruth,
FROM
`txtcls_eval.swivel`
'''
client = bigquery.Client()
df_results = client.query(query).to_dataframe()
df_results.head(20)
prediction = list(df_results.prediction)
groundtruth = list(df_results.groundtruth)
precision, recall, fscore, support = score(groundtruth, prediction)
from tabulate import tabulate
sources = list(CLASSES.keys())
results = list(zip(sources, precision, recall, fscore, support))
print(tabulate(results, headers = ['source', 'precision', 'recall', 'fscore', 'support'],
tablefmt='orgtbl'))
print(classification_report(y_true=groundtruth, y_pred=prediction))
cm = confusion_matrix(groundtruth, prediction, labels=sources)
ax= plt.subplot()
sns.heatmap(cm, annot=True, ax = ax, cmap="Blues")
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('Confusion Matrix')
ax.xaxis.set_ticklabels(sources)
ax.yaxis.set_ticklabels(sources)
plt.savefig("./txtcls_cm.png")
now = pd.Timestamp.now(tz='UTC')
one_week_ago = now - pd.DateOffset(weeks=1)
one_month_ago = now - pd.DateOffset(months=1)
df_prev_week = df_results[df_results.time > one_week_ago]
df_prev_month = df_results[df_results.time > one_month_ago]
df_prev_month
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Train and deploy the model
Step2: We one-hot encode the label...
Step3: ...and create a train/test split.
Step4: Swivel Model
Step5: The build_model function is written so that the TF Hub module can easily be exchanged with another module.
Step6: Train and evaluation the model
Step7: For training we'll call train_and_evaluate on txtcls_model.
Step8: Calling predicition from model head produces output from final dense layer. This final layer is used to compute categorical cross-entropy when training.
Step9: We can save the model artifacts in the local directory called ./txtcls_swivel.
Step10: ....and examine the model's serving default signature. As expected the model takes as input a text string (e.g. an article title) and retrns a 3-dimensional vector of floats (i.e. the softmax output layer).
Step11: To simplify the returned predictions, we'll modify the model signature so that the model outputs the predicted article source (either nytimes, techcrunch, or github) rather than the final softmax layer. We'll also return the 'confidence' of the model's prediction. This will be the softmax value corresonding to the predicted article source.
Step12: Now, we'll re-save the new Swivel model that has this updated model signature by referencing the source_name function for the model's serving_default.
Step13: Examine the model signature to confirm the changes
Step14: Now when we call predictions using the updated serving input function, the model will return the predicted article source as a readable string, and the model's confidence for that prediction.
Step15: Deploy the model for online serving
Step16: Set up the Evaluation job on CAIP
Step17: Now, every time this model version receives an online prediction request, this information will be captured and stored in the BQ table. Note, this happens everytime because we set the sampling proportion to 100%.
Step18: Summarizing the results from our model
Step19: Provide the ground truth for the raw prediction input
Step20: We can confirm that the ground truch has been properly added to the table.
Step21: Compute evaluation metrics
Step22: Using regex we can extract the model predictions, to have an easier to read format
Step23: Or a full classification report from the sklearn library
Step24: Can also examine a confusion matrix
Step25: Examine eval metrics by model version or timestamp
|
719 | <ASSISTANT_TASK:>
Python Code:
import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
# we'll skip the "face" and "buttonpress" conditions, to save memory:
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4}
epochs = mne.Epochs(raw, events, tmin=-0.3, tmax=0.7, event_id=event_dict,
preload=True)
evoked = epochs['auditory/left'].average()
del raw # reduce memory usage
evoked.plot()
print(evoked.data[:2, :3]) # first 2 channels, first 3 timepoints
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
print(evoked_eeg.ch_names)
new_order = ['EEG 002', 'MEG 2521', 'EEG 003']
evoked_subset = evoked.copy().reorder_channels(new_order)
print(evoked_subset.ch_names)
sample_data_evk_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis-ave.fif')
evokeds_list = mne.read_evokeds(sample_data_evk_file, verbose=False)
print(evokeds_list)
print(type(evokeds_list))
for evok in evokeds_list:
print(evok.comment)
right_vis = mne.read_evokeds(sample_data_evk_file, condition='Right visual')
print(right_vis)
print(type(right_vis))
evokeds_list[0].plot(picks='eeg')
evokeds_list[0].apply_baseline((None, 0))
evokeds_list[0].plot(picks='eeg')
left_right_aud = epochs['auditory'].average()
print(left_right_aud)
left_aud = epochs['auditory/left'].average()
right_aud = epochs['auditory/right'].average()
print([evok.nave for evok in (left_aud, right_aud)])
left_right_aud = mne.combine_evoked([left_aud, right_aud], weights='nave')
assert left_right_aud.nave == left_aud.nave + right_aud.nave
for ix, trial in enumerate(epochs[:3].iter_evoked()):
channel, latency, value = trial.get_peak(ch_type='eeg',
return_amplitude=True)
latency = int(round(latency * 1e3)) # convert to milliseconds
value = int(round(value * 1e6)) # convert to µV
print('Trial {}: peak of {} µV at {} ms in channel {}'
.format(ix, value, latency, channel))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating Evoked objects from Epochs
Step2: Basic visualization of Evoked objects
Step3: Like the plot() methods for
Step4: To select based on time in seconds, the
Step5: Similarities among the core data structures
Step6: Notice that
Step7: If you want to load only some of the conditions present in a .fif file,
Step8: Above, when we created an
Step9: This can be remedied by either passing a baseline parameter to
Step10: Notice that
Step11: This approach will weight each epoch equally and create a single
Step12: However, this may not always be the case; if for statistical reasons it is
Step13: Keeping track of nave is important for inverse imaging, because it is
|
720 | <ASSISTANT_TASK:>
Python Code:
!pip3 install tensorflow_hub
%%bash
pip install --upgrade tensorflow
# Import helpful libraries and setup our project, bucket, and region
import os
import tensorflow as tf
import tensorflow_hub as hub
PROJECT = "cloud-training-demos" # REPLACE WITH YOUR PROJECT ID
BUCKET = "cloud-training-demos-ml" # REPLACE WITH YOUR BUCKET NAME
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# do not change these
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = "1.13"
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/hybrid_recommendation/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do preprocessing notebook
gsutil -m cp -R gs://cloud-training-demos/courses/machine_learning/deepdive/10_recommendation/hybrid_recommendation gs://${BUCKET}
fi
from tensorflow.python.lib.io import file_io
# Get number of content ids from text file in Google Cloud Storage
with file_io.FileIO(tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocab_counts/content_id_vocab_count.txt*".format(BUCKET))[0], mode = 'r') as ifp:
number_of_content_ids = int([x for x in ifp][0])
print("number_of_content_ids = {}".format(number_of_content_ids))
# Get number of categories from text file in Google Cloud Storage
with file_io.FileIO(tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocab_counts/category_vocab_count.txt*".format(BUCKET))[0], mode = 'r') as ifp:
number_of_categories = int([x for x in ifp][0])
print("number_of_categories = {}".format(number_of_categories))
# Get number of authors from text file in Google Cloud Storage
with file_io.FileIO(tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocab_counts/author_vocab_count.txt*".format(BUCKET))[0], mode = 'r') as ifp:
number_of_authors = int([x for x in ifp][0])
print("number_of_authors = {}".format(number_of_authors))
# Get mean months since epoch from text file in Google Cloud Storage
with file_io.FileIO(tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocab_counts/months_since_epoch_mean.txt*".format(BUCKET))[0], mode = 'r') as ifp:
mean_months_since_epoch = float([x for x in ifp][0])
print("mean_months_since_epoch = {}".format(mean_months_since_epoch))
# Determine CSV and label columns
NON_FACTOR_COLUMNS = "next_content_id,visitor_id,content_id,category,title,author,months_since_epoch".split(',')
FACTOR_COLUMNS = ["user_factor_{}".format(i) for i in range(10)] + ["item_factor_{}".format(i) for i in range(10)]
CSV_COLUMNS = NON_FACTOR_COLUMNS + FACTOR_COLUMNS
LABEL_COLUMN = "next_content_id"
# Set default values for each CSV column
NON_FACTOR_DEFAULTS = [["Unknown"],["Unknown"],["Unknown"],["Unknown"],["Unknown"],["Unknown"],[mean_months_since_epoch]]
FACTOR_DEFAULTS = [[0.0] for i in range(10)] + [[0.0] for i in range(10)] # user and item
DEFAULTS = NON_FACTOR_DEFAULTS + FACTOR_DEFAULTS
# Create input function for train and eval
def read_dataset(filename, mode, batch_size = 512):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(records = value_column, record_defaults = DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename = filename)
# Create dataset from file list
dataset = tf.data.TextLineDataset(filenames = file_list).map(map_func = decode_csv)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(count = num_epochs).batch(batch_size = batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Create feature columns to be used in model
def create_feature_columns(args):
# Create content_id feature column
content_id_column = tf.feature_column.categorical_column_with_hash_bucket(
key = "content_id",
hash_bucket_size = number_of_content_ids)
# Embed content id into a lower dimensional representation
embedded_content_column = tf.feature_column.embedding_column(
categorical_column = content_id_column,
dimension = args["content_id_embedding_dimensions"])
# Create category feature column
categorical_category_column = tf.feature_column.categorical_column_with_vocabulary_file(
key = "category",
vocabulary_file = tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocabs/category_vocab.txt*".format(args["bucket"]))[0],
num_oov_buckets = 1)
# Convert categorical category column into indicator column so that it can be used in a DNN
indicator_category_column = tf.feature_column.indicator_column(categorical_column = categorical_category_column)
# Create title feature column using TF Hub
embedded_title_column = hub.text_embedding_column(
key = "title",
module_spec = "https://tfhub.dev/google/nnlm-de-dim50-with-normalization/1",
trainable = False)
# Create author feature column
author_column = tf.feature_column.categorical_column_with_hash_bucket(
key = "author",
hash_bucket_size = number_of_authors + 1)
# Embed author into a lower dimensional representation
embedded_author_column = tf.feature_column.embedding_column(
categorical_column = author_column,
dimension = args["author_embedding_dimensions"])
# Create months since epoch boundaries list for our binning
months_since_epoch_boundaries = list(range(400, 700, 20))
# Create months_since_epoch feature column using raw data
months_since_epoch_column = tf.feature_column.numeric_column(
key = "months_since_epoch")
# Create bucketized months_since_epoch feature column using our boundaries
months_since_epoch_bucketized = tf.feature_column.bucketized_column(
source_column = months_since_epoch_column,
boundaries = months_since_epoch_boundaries)
# Cross our categorical category column and bucketized months since epoch column
crossed_months_since_category_column = tf.feature_column.crossed_column(
keys = [categorical_category_column, months_since_epoch_bucketized],
hash_bucket_size = len(months_since_epoch_boundaries) * (number_of_categories + 1))
# Convert crossed categorical category and bucketized months since epoch column into indicator column so that it can be used in a DNN
indicator_crossed_months_since_category_column = tf.feature_column.indicator_column(
categorical_column = crossed_months_since_category_column)
# Create user and item factor feature columns from our trained WALS model
user_factors = [tf.feature_column.numeric_column(key = "user_factor_" + str(i)) for i in range(10)]
item_factors = [tf.feature_column.numeric_column(key = "item_factor_" + str(i)) for i in range(10)]
# Create list of feature columns
feature_columns = [embedded_content_column,
embedded_author_column,
indicator_category_column,
embedded_title_column,
indicator_crossed_months_since_category_column] + user_factors + item_factors
return feature_columns
# Create custom model function for our custom estimator
def model_fn(features, labels, mode, params):
# TODO: Create neural network input layer using our feature columns defined above
# TODO: Create hidden layers by looping through hidden unit list
# TODO: Compute logits (1 per class) using the output of our last hidden layer
# TODO: Find the predicted class indices based on the highest logit (which will result in the highest probability)
predicted_classes =
# Read in the content id vocabulary so we can tie the predicted class indices to their respective content ids
with file_io.FileIO(tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocabs/content_id_vocab.txt*".format(BUCKET))[0], mode = "r") as ifp:
content_id_names = tf.constant(value = [x.rstrip() for x in ifp])
# Gather predicted class names based predicted class indices
predicted_class_names = tf.gather(params = content_id_names, indices = predicted_classes)
# If the mode is prediction
if mode == tf.estimator.ModeKeys.PREDICT:
# Create predictions dict
predictions_dict = {
"class_ids": tf.expand_dims(input = predicted_classes, axis = -1),
"class_names" : tf.expand_dims(input = predicted_class_names, axis = -1),
"probabilities": tf.nn.softmax(logits = logits),
"logits": logits
}
# Create export outputs
export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions_dict)}
return tf.estimator.EstimatorSpec( # return early since we"re done with what we need for prediction mode
mode = mode,
predictions = predictions_dict,
loss = None,
train_op = None,
eval_metric_ops = None,
export_outputs = export_outputs)
# Continue on with training and evaluation modes
# Create lookup table using our content id vocabulary
table = tf.contrib.lookup.index_table_from_file(
vocabulary_file = tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocabs/content_id_vocab.txt*".format(BUCKET))[0])
# Look up labels from vocabulary table
labels = table.lookup(keys = labels)
# TODO: Compute loss using the correct type of softmax cross entropy since this is classification and our labels (content id indices) and probabilities are mutually exclusive
loss =
# If the mode is evaluation
if mode == tf.estimator.ModeKeys.EVAL:
# Compute evaluation metrics of total accuracy and the accuracy of the top k classes
accuracy = tf.metrics.accuracy(labels = labels, predictions = predicted_classes, name = "acc_op")
top_k_accuracy = tf.metrics.mean(values = tf.nn.in_top_k(predictions = logits, targets = labels, k = params["top_k"]))
map_at_k = tf.metrics.average_precision_at_k(labels = labels, predictions = predicted_classes, k = params["top_k"])
# Put eval metrics into a dictionary
eval_metric_ops = {
"accuracy": accuracy,
"top_k_accuracy": top_k_accuracy,
"map_at_k": map_at_k}
# Create scalar summaries to see in TensorBoard
tf.summary.scalar(name = "accuracy", tensor = accuracy[1])
tf.summary.scalar(name = "top_k_accuracy", tensor = top_k_accuracy[1])
tf.summary.scalar(name = "map_at_k", tensor = map_at_k[1])
return tf.estimator.EstimatorSpec( # return early since we"re done with what we need for evaluation mode
mode = mode,
predictions = None,
loss = loss,
train_op = None,
eval_metric_ops = eval_metric_ops,
export_outputs = None)
# Continue on with training mode
# If the mode is training
assert mode == tf.estimator.ModeKeys.TRAIN
# Create a custom optimizer
optimizer = tf.train.AdagradOptimizer(learning_rate = params["learning_rate"])
# Create train op
train_op = optimizer.minimize(loss = loss, global_step = tf.train.get_global_step())
return tf.estimator.EstimatorSpec( # final return since we"re done with what we need for training mode
mode = mode,
predictions = None,
loss = loss,
train_op = train_op,
eval_metric_ops = None,
export_outputs = None)
# Create serving input function
def serving_input_fn():
feature_placeholders = {
colname : tf.placeholder(dtype = tf.string, shape = [None]) \
for colname in NON_FACTOR_COLUMNS[1:-1]
}
feature_placeholders["months_since_epoch"] = tf.placeholder(dtype = tf.float32, shape = [None])
for colname in FACTOR_COLUMNS:
feature_placeholders[colname] = tf.placeholder(dtype = tf.float32, shape = [None])
features = {
key: tf.expand_dims(tensor, -1) \
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features = features, receiver_tensors = feature_placeholders)
# Create train and evaluate loop to combine all of the pieces together.
tf.logging.set_verbosity(tf.logging.INFO)
def train_and_evaluate(args):
estimator = tf.estimator.Estimator(
model_fn = model_fn,
model_dir = args["output_dir"],
params = {
"feature_columns": create_feature_columns(args),
"hidden_units": args["hidden_units"],
"n_classes": number_of_content_ids,
"learning_rate": args["learning_rate"],
"top_k": args["top_k"],
"bucket": args["bucket"]
}
)
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset(filename = args["train_data_paths"], mode = tf.estimator.ModeKeys.TRAIN, batch_size = args["batch_size"]),
max_steps = args["train_steps"])
exporter = tf.estimator.LatestExporter(name = "exporter", serving_input_receiver_fn = serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset(filename = args["eval_data_paths"], mode = tf.estimator.ModeKeys.EVAL, batch_size = args["batch_size"]),
steps = None,
start_delay_secs = args["start_delay_secs"],
throttle_secs = args["throttle_secs"],
exporters = exporter)
tf.estimator.train_and_evaluate(estimator = estimator, train_spec = train_spec, eval_spec = eval_spec)
# Call train and evaluate loop
import shutil
outdir = "hybrid_recommendation_trained"
shutil.rmtree(path = outdir, ignore_errors = True) # start fresh each time
arguments = {
"bucket": BUCKET,
"train_data_paths": "gs://{}/hybrid_recommendation/preproc/features/train.csv*".format(BUCKET),
"eval_data_paths": "gs://{}/hybrid_recommendation/preproc/features/eval.csv*".format(BUCKET),
"output_dir": outdir,
"batch_size": 128,
"learning_rate": 0.1,
"hidden_units": [256, 128, 64],
"content_id_embedding_dimensions": 10,
"author_embedding_dimensions": 10,
"top_k": 10,
"train_steps": 1000,
"start_delay_secs": 30,
"throttle_secs": 30
}
train_and_evaluate(arguments)
%%writefile requirements.txt
tensorflow_hub
%%bash
echo "bucket=${BUCKET}"
rm -rf hybrid_recommendation_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/hybrid_recommendations_module
python -m trainer.task \
--bucket=${BUCKET} \
--train_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/train.csv* \
--eval_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/eval.csv* \
--output_dir=${OUTDIR} \
--batch_size=128 \
--learning_rate=0.1 \
--hidden_units="256 128 64" \
--content_id_embedding_dimensions=10 \
--author_embedding_dimensions=10 \
--top_k=10 \
--train_steps=1000 \
--start_delay_secs=30 \
--throttle_secs=60
%%bash
OUTDIR=gs://${BUCKET}/hybrid_recommendation/small_trained_model
JOBNAME=hybrid_recommendation_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/hybrid_recommendations_module/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--train_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/train.csv* \
--eval_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/eval.csv* \
--output_dir=${OUTDIR} \
--batch_size=128 \
--learning_rate=0.1 \
--hidden_units="256 128 64" \
--content_id_embedding_dimensions=10 \
--author_embedding_dimensions=10 \
--top_k=10 \
--train_steps=1000 \
--start_delay_secs=30 \
--throttle_secs=30
%%writefile hyperparam.yaml
trainingInput:
hyperparameters:
goal: MAXIMIZE
maxTrials: 5
maxParallelTrials: 1
hyperparameterMetricTag: accuracy
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 64
scaleType: UNIT_LINEAR_SCALE
- parameterName: learning_rate
type: DOUBLE
minValue: 0.01
maxValue: 0.1
scaleType: UNIT_LINEAR_SCALE
- parameterName: hidden_units
type: CATEGORICAL
categoricalValues: ["1024 512 256", "1024 512 128", "1024 256 128", "512 256 128", "1024 512 64", "1024 256 64", "512 256 64", "1024 128 64", "512 128 64", "256 128 64", "1024 512 32", "1024 256 32", "512 256 32", "1024 128 32", "512 128 32", "256 128 32", "1024 64 32", "512 64 32", "256 64 32", "128 64 32"]
- parameterName: content_id_embedding_dimensions
type: INTEGER
minValue: 5
maxValue: 250
scaleType: UNIT_LOG_SCALE
- parameterName: author_embedding_dimensions
type: INTEGER
minValue: 5
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
%%bash
OUTDIR=gs://${BUCKET}/hybrid_recommendation/hypertuning
JOBNAME=hybrid_recommendation_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/hybrid_recommendations_module/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
--config=hyperparam.yaml \
-- \
--bucket=${BUCKET} \
--train_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/train.csv* \
--eval_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/eval.csv* \
--output_dir=${OUTDIR} \
--batch_size=128 \
--learning_rate=0.1 \
--hidden_units="256 128 64" \
--content_id_embedding_dimensions=10 \
--author_embedding_dimensions=10 \
--top_k=10 \
--train_steps=1000 \
--start_delay_secs=30 \
--throttle_secs=30
%%bash
OUTDIR=gs://${BUCKET}/hybrid_recommendation/big_trained_model
JOBNAME=hybrid_recommendation_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/hybrid_recommendations_module/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--train_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/train.csv* \
--eval_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/eval.csv* \
--output_dir=${OUTDIR} \
--batch_size=128 \
--learning_rate=0.1 \
--hidden_units="256 128 64" \
--content_id_embedding_dimensions=10 \
--author_embedding_dimensions=10 \
--top_k=10 \
--train_steps=10000 \
--start_delay_secs=30 \
--throttle_secs=30
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now reset the notebook's session kernel! Since we're no longer using Cloud Dataflow, we'll be using the python3 kernel from here on out so don't forget to change the kernel if it's still python2.
Step2: <h2> Create hybrid recommendation system model using TensorFlow </h2>
Step3: Create input function for training and evaluation to read from our preprocessed CSV files.
Step4: Next, we will create our feature columns using our read in features.
Step5: Now we'll create our model function
Step6: Now create a serving input function
Step7: Now that all of the pieces are assembled let's create and run our train and evaluate loop
Step8: Run train_and_evaluate!
Step9: Run on module locally
Step10: Run on Google Cloud AI Platform
Step11: Let's add some hyperparameter tuning!
Step12: Now that we know the best hyperparameters, run a big training job!
|
721 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import itertools
from qutip import *
from numpy import *
n_Pi = 13 # 8 pi pulse area
Om_list = np.linspace(0.001, n_Pi, 80) # driving strengths
wd_list_e = np.array([-1, 0, 1]) # laser offsets in meV
wd_list = wd_list_e*1.5 # in angular frequency
tlist = np.linspace(0, 50, 40) # tmax ~ 2x FWHM
# normalized Gaussian pulse shape, ~10ps long in energy
t0 = 17 / (2 * np.sqrt(2 * np.log(2)))
#pulse_shape = np.exp(-(tlist - 24) ** 2 / (2 * t0 ** 2))
pulse_shape = '0.0867 * exp(-(t - 24) ** 2 / (2 * {0} ** 2))'.format(t0)
# initial state
psi0 = fock(2, 1) # ground state
# system's atomic lowering operator
sm = sigmam()
# Hamiltonian components
H_S = -sm.dag() * sm # self-energy, varies with drive frequency
H_I = sm + sm.dag()
# we ignore spontaneous emission since the pulse is much faster than
# the decay time
c_ops = []
# operator that couples the quantum dot to acoustic phonons
a_op = sm.dag()*sm
# This spectrum is a displaced gaussian multiplied by w^3, which
# models coupling to LA phonons. The electron and hole
# interactions contribute constructively.
# fitting parameters ae/ah
ah = 1.9e-9 # m
ae = 3.5e-9 # m
# GaAs material parameters
De = 7
Dh = -3.5
v = 5110 # m/s
rho_m = 5370 # kg/m^3
hbar = 1.05457173e-34 # Js
T = 4.2 # Kelvin, temperature
# results in ~3THz angular frequency width, w in THz
# zero T spectrum, for w>0
J = 1.6*1e-13*w**3/(4*numpy.pi**2*rho_m*hbar*v**5) * \
(De*numpy.exp(-(w*1e12*ae/(2*v))**2) -
Dh*numpy.exp(-(w*1e12*ah/(2*v))**2))**2
# for temperature dependence, the 'negative' frequency
# components correspond to absorption vs emission
# w > 0:
JT_p = J*(1 + numpy.exp(-w*0.6582119/(T*0.086173)) / \
(1-numpy.exp(-w*0.6582119/(T*0.086173))))
# w < 0:
JT_m = -J*numpy.exp(w*0.6582119/(T*0.086173)) / \
(1-numpy.exp(w*0.6582119/(T*0.086173)))
# the Bloch-Redfield solver requires the spectra to be
# formatted as a string
spectra_cb =' 1.6*1e-13*w**3/(4*pi**2*5370*1.05457173e-34*5110**5) * ' + \
'(7*exp(-(w*1e12*3.5e-9/(2*5110))**2) +' + \
'3.5*exp(-(w*1e12*1.9e-9 /(2*5110))**2))**2 *' + \
'((1 + exp(-w*0.6582119/(4.2*0.086173)) /' + \
'(1+1e-9-exp(-w*0.6582119/(4.2*0.086173))))*(w>=0)' + \
'-exp(w*0.6582119/(4.2*0.086173)) /' + \
'(1+1e-9-exp(w*0.6582119/(4.2*0.086173)))*(w<0))'
spec_list = np.linspace(-5, 10, 200)
plt.figure(figsize=(8, 5))
plt.plot(spec_list, [eval(spectra_cb.replace('w', str(_))) for _ in spec_list])
plt.xlim(-5, 10)
plt.xlabel('$\omega$ [THz]')
plt.ylabel('$J(\omega)$ [THz]')
plt.title('Quantum-dot-phonon interaction spectrum');
# we will calculate the dot population expectation value
e_ops = [sm.dag()*sm]
# define callback for parallelization
def brme_step(args):
wd = args[0]
Om = args[1]
H = [wd * H_S, [Om * H_I, pulse_shape]]
# calculate the population after the pulse interaction has
# finished using the Bloch-Redfield time-dependent solver
return qutip.brmesolve(H, psi0, tlist, [[a_op, spectra_cb]],
e_ops,options=Options(rhs_reuse=True)).expect[0][-1]
# use QuTiP's builtin parallelized for loop, parfor
results = parfor(brme_step, itertools.product(wd_list, Om_list))
# unwrap the results into a 2d array
inv_mat_X = np.array(results).reshape((len(wd_list), len(Om_list)))
plt.figure(figsize=(8, 5))
plt.plot(Om_list, inv_mat_X[0])
plt.plot(Om_list, inv_mat_X[1])
plt.plot(Om_list, inv_mat_X[2])
plt.legend(['laser detuning, -1 meV',
'laser detuning, 0 meV',
'laser detuning, +1 meV'], loc=4)
plt.xlim(0, 13)
plt.xlabel('Pulse area [$\pi$]')
plt.ylabel('Excited state population')
plt.title('Effects of phonon dephasing for different pulse detunings');
from qutip.ipynbtools import version_table
version_table()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: Setup the operators, Hamiltonian, and initial state
Step4: Below, we define the terms specific to the Bloch-Redfield solver's system-environmental coupling. The quantum dot couples to acoustic phonons in its solid-state environment through a dispersive electron-phonon interaction of the form
Step5: Visualize the dot-phonon interaction spectrum
Step6: Calculate the pulse-system interaction dynamics
Step7: Visualize the quantum dot's initialization fidelity
Step8: Versions
|
722 | <ASSISTANT_TASK:>
Python Code:
import vcsn
from IPython.display import Latex
def diffs(r, ss):
eqs = []
for s in ss:
eqs.append(r'\frac{{\partial}}{{\partial {0}}} {1}& = {2}'
.format(s,
r.format('latex'),
r.derivation(s).format('latex')))
return Latex(r'''\begin{{aligned}}
{0}
\end{{aligned}}'''.format(r'\\'.join(eqs)))
b = vcsn.context('lal_char(ab), b')
r = b.expression('[ab]{3}')
r.derivation('a')
diffs(r, ['a', 'aa', 'aaa', 'aaaa'])
q = vcsn.context('lal_char(abc), q')
r = q.expression('(<1/6>a*+<1/3>b*)*')
diffs(r, ['a', 'aa', 'ab', 'b', 'ba', 'bb'])
r.derived_term()
r = q.expression('[ab](<2>[ab])', 'associative')
r
r.derivation('a')
r.derivation('a', True)
r.derivation('a').split()
r.derived_term()
r.derived_term('breaking_derivation')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Classical expressions
Step2: Or, using the diffs function we defined above
Step3: Weighted Expressions
Step4: And this is tightly connected with the construction of the derived-term automaton.
Step5: Breaking derivation
Step6: Again, this is tightly connected with both flavors of the derived-term automaton.
|
723 | <ASSISTANT_TASK:>
Python Code:
PROJECT_ID = "YOUR PROJECT ID"
BUCKET_NAME = "gs://YOUR BUCKET NAME"
REGION = "YOUR REGION"
SERVICE_ACCOUNT = "YOUR SERVICE ACCOUNT"
content_name = "tf-keras-txt-cls-dist-single-worker-gpus-local-mode-cont"
BASE_IMAGE_URI = "us-docker.pkg.dev/vertex-ai/training/tf-gpu.2-5:latest"
SCRIPT_PATH = "trainer/task.py"
OUTPUT_IMAGE_NAME = "gcr.io/{}/{}:latest".format(PROJECT_ID, content_name)
ARGS = "--epochs 5 --batch-size 16 --local-mode"
! gcloud ai custom-jobs local-run \
--executor-image-uri=$BASE_IMAGE_URI \
--script=$SCRIPT_PATH \
--output-image-uri=$OUTPUT_IMAGE_NAME \
-- \
$ARGS
custom_container_image_uri = OUTPUT_IMAGE_NAME
! docker push $custom_container_image_uri
! gcloud container images list --repository "gcr.io"/$PROJECT_ID
! pip install -r requirements.txt
from google.cloud import aiplatform
aiplatform.init(
project=PROJECT_ID,
staging_bucket=BUCKET_NAME,
location=REGION,
)
tensorboard = aiplatform.Tensorboard.create(
display_name=content_name,
)
display_name = content_name
gcs_output_uri_prefix = f"{BUCKET_NAME}/{display_name}"
machine_type = "n1-standard-8"
accelerator_count = 4
accelerator_type = "NVIDIA_TESLA_P100"
args = [
"--epochs",
"100",
"--batch-size",
"128",
"--num-gpus",
f"{accelerator_count}",
]
custom_container_training_job = aiplatform.CustomContainerTrainingJob(
display_name=display_name,
container_uri=custom_container_image_uri,
)
custom_container_training_job.run(
args=args,
base_output_dir=gcs_output_uri_prefix,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
tensorboard=tensorboard.resource_name,
service_account=SERVICE_ACCOUNT,
)
print(f"Custom Training Job Name: {custom_container_training_job.resource_name}")
print(f"GCS Output URI Prefix: {gcs_output_uri_prefix}")
! gsutil ls $gcs_output_uri_prefix
! gsutil rm -rf $gcs_output_uri_prefix
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Local Training with Vertex Local Mode and Auto Packaging
Step2: Vertex Training using Vertex SDK and Vertex Local Mode Container
Step3: Initialize Vertex SDK
Step4: Create a Vertex Tensorboard Instance
Step5: Option
Step6: Training Output Artifact
Step7: Clean Up Artifact
|
724 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
csv = np.genfromtxt('thorium_test_2019-02-19_D3S.csv', delimiter= ",").T
summed = np.sum(csv[:-1], axis=1) # gets rid of last value
plt.plot(summed)
plt.yscale('log')
plt.show()
Pb_shift = 250
Pb_range = 250
Pb_sample = summed[Pb_shift: Pb_shift + Pb_range]
plt.plot(Pb_sample)
plt.show()
Pb_mu = np.mean(Pb_sample)
Pb_sig = np.std(Pb_sample)
def func(x, a, m, s, c):
return a * np.exp(-(x - m)**2 / (2 * s**2)) + c
Pb_xdata = range(Pb_range)
trydata = func(Pb_sample, np.max(Pb_sample), Pb_mu, Pb_sig, np.max(Pb_sample) + 50)
p0 = [4000,140,75,1500]
Pb_popt, Pb_pcov = curve_fit(func, Pb_xdata, Pb_sample, p0)
print(Pb_popt)
plt.plot(Pb_xdata, Pb_sample)
plt.plot(Pb_xdata, func(Pb_xdata, *Pb_popt))
plt.plot(int(Pb_popt[1]), func(Pb_xdata, *Pb_popt)[int(Pb_popt[1])], 'ro')
Pb_channel = Pb_shift + int(Pb_popt[1])
plt.show()
plt.plot(summed)
plt.plot(Pb_channel, summed[Pb_channel], 'r.')
plt.yscale('log')
print(Pb_channel)
plt.show()
Ac_shift = 830
Ac_range = 230
Ac_sample = summed[Ac_shift: Ac_shift + Ac_range]
plt.plot(Ac_sample)
plt.show()
Ac_mu = np.mean(Ac_sample)
Ac_sig = np.std(Ac_sample)
Ac_xdata = range(Ac_range)
Ac_trydata = func(Ac_sample, np.max(Ac_sample), Ac_mu, Ac_sig, np.max(Ac_sample) + 50)
Ac_p0 = [700,100,25,300]
Ac_popt, Ac_pcov = curve_fit(func, Ac_xdata, Ac_sample, Ac_p0)
print(Ac_popt)
plt.plot(Ac_xdata, Ac_sample)
plt.plot(Ac_xdata, func(Ac_xdata, *Ac_popt))
plt.plot(int(Ac_popt[1]), func(Ac_xdata, *Ac_popt)[int(Ac_popt[1])], 'ro')
Ac_channel = Ac_shift + int(Ac_popt[1])
plt.show()
plt.plot(summed)
plt.plot(Ac_channel, summed[Ac_channel], 'r.')
plt.yscale('log')
print(Ac_channel)
plt.show()
Ti_shift = 2200
Ti_range = 500
Ti_sample = summed[Ti_shift: Ti_shift + Ti_range]
plt.plot(Ti_sample)
plt.show()
Ti_mu = np.mean(Ti_sample)
Ti_sig = np.std(Ti_sample)
Ti_xdata = range(Ti_range)
Ti_trydata = func(Ti_sample, np.max(Ti_sample), Ti_mu, Ti_sig, np.max(Ti_sample) + 50)
Ti_p0 = [80,200,20,50]
Ti_popt, Ti_pcov = curve_fit(func, Ti_xdata, Ti_sample, Ti_p0)
print(Ti_popt)
plt.plot(Ti_xdata, Ti_sample)
plt.plot(Ti_xdata, func(Ti_xdata, *Ti_popt))
plt.plot(int(Ti_popt[1]), func(Ti_xdata, *Ti_popt)[int(Ti_popt[1])], 'ro')
maxish = max(Ti_sample[int(Ti_popt[1]) - 5: int(Ti_popt[1]) + 5])
x_maxish = np.argmax(Ti_sample[int(Ti_popt[1]) - 5: int(Ti_popt[1]) + 5]) + int(Ti_popt[1]) - 5
plt.plot(x_maxish, maxish, 'r^')
Ti_channel = Ti_shift + x_maxish
plt.show()
plt.plot(summed)
plt.plot(Ti_channel, summed[Ti_channel], 'r.')
plt.yscale('log')
print(Ti_channel)
plt.show()
z_shift = 3525
z_range = 300
z_sample = summed[z_shift: z_shift + z_range]
plt.plot(z_sample)
plt.show()
z_mu = np.mean(z_sample)
z_sig = np.std(z_sample)
z_xdata = range(z_range)
z_trydata = func(z_sample, np.max(z_sample), z_mu, z_sig, np.max(z_sample) + 50)
z_p0 = [900,180,20,15]
z_popt, z_pcov = curve_fit(func, z_xdata, z_sample, z_p0)
print(z_popt)
plt.plot(z_xdata, z_sample)
plt.plot(z_xdata, func(z_xdata, *z_popt))
plt.plot(int(z_popt[1]), func(z_xdata, *z_popt)[int(z_popt[1])], 'ro')
z_maxish = max(z_sample[int(z_popt[1]) - 5: int(z_popt[1]) + 5])
x_z_maxish = np.argmax(z_sample[int(z_popt[1]) - 5: int(z_popt[1]) + 5]) + int(z_popt[1]) - 5
plt.plot(x_z_maxish, z_maxish, 'r^')
z_channel = z_shift + x_z_maxish
plt.show()
plt.plot(summed)
plt.plot(z_channel, summed[z_channel], 'r.')
plt.yscale('log')
print(z_channel)
plt.show()
plt.plot(summed)
plt.plot(Pb_channel, summed[Pb_channel], 'r.')
plt.plot(Ac_channel, summed[Ac_channel], 'r.')
plt.plot(Ti_channel, summed[Ti_channel], 'r.')
plt.plot(z_channel, summed[z_channel], 'r.')
plt.yscale('log')
print(Pb_channel, Ac_channel, Ti_channel, z_channel)
print(summed[Pb_channel], summed[Ac_channel], summed[Ti_channel], summed[z_channel])
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we find the peak at Pb-212 ~ 238 keV
Step2: This is good enough for now but we can fix l8tr if needed
Step3: expected
Step4: Expected
Step5: This one's a lot noisier than the others.
Step6: I didn't like how the channel dot wasn't on top of the peaks so I just changed the x position to be within 10 indices of the mean index.
Step7: Okay but what about the peak at ~3700?
Step8: Same here I didn't like how the fit doesnt touch the top of the peaks but irl it makes only aesthetic difference since the offset is so little.
Step9: Ok now with all the values
|
725 | <ASSISTANT_TASK:>
Python Code:
import sys # system module
import pandas as pd # data package
import matplotlib as mpl # graphics package
import matplotlib.pyplot as plt # graphics module
import datetime as dt # date and time module
# check versions (overkill, but why not?)
print('Python version:', sys.version)
print('Pandas version: ', pd.__version__)
print('Matplotlib version: ', mpl.__version__)
print('Today: ', dt.date.today())
# This is an IPython command. It puts plots here in the notebook, rather than a separate window.
%matplotlib inline
# US GDP and consumption
gdp = [13271.1, 13773.5, 14234.2, 14613.8, 14873.7, 14830.4, 14418.7,
14783.8, 15020.6, 15369.2, 15710.3]
pce = [8867.6, 9208.2, 9531.8, 9821.7, 10041.6, 10007.2, 9847.0, 10036.3,
10263.5, 10449.7, 10699.7]
year = list(range(2003,2014)) # use range for years 2003-2013
# create dataframe from dictionary
us = pd.DataFrame({'gdp': gdp, 'pce': pce}, index=year)
print(us.head(3))
# GDP per capita (World Bank data, 2013, thousands of USD)
code = ['USA', 'FRA', 'JPN', 'CHN', 'IND', 'BRA', 'MEX']
country = ['United States', 'France', 'Japan', 'China', 'India',
'Brazil', 'Mexico']
gdppc = [53.1, 36.9, 36.3, 11.9, 5.4, 15.0, 16.5]
wbdf = pd.DataFrame({'gdppc': gdppc, 'country': country}, index=code)
wbdf
# Fama-French
import pandas.io.data as web
# read annual data from website and rename variables
ff = web.DataReader('F-F_Research_Data_factors', 'famafrench')[1]
ff.columns = ['xsm', 'smb', 'hml', 'rf']
ff['rm'] = ff['xsm'] + ff['rf']
ff = ff[['rm', 'rf']] # extract rm and rf (return on market, riskfree rate, percent)
ff.head(5)
# This is an IPython command: it puts plots here in the notebook, rather than a separate window.
%matplotlib inline
# try this with US GDP
us.plot()
# do GDP alone
us['gdp'].plot()
# bar chart
us.plot(kind='bar')
us.plot
# scatter plot
# we need to be explicit about the x and y variables: x = 'gdp', y = 'pce'
us.plot.scatter('gdp', 'pce')
# now try a few things with the Fama-French data
ff.plot()
ff.plot()
ff.plot(kind='hist', bins=20, subplots=True)
# "smoothed" histogram
ff.plot(kind='kde', subplots=True, sharex=True) # smoothed histogram ("kernel density estimate")
# import pyplot module of Matplotlib
import matplotlib.pyplot as plt
plt.plot(us.index, us['gdp'])
# we can do two lines together
plt.plot(us.index, us['gdp'])
plt.plot(us.index, us['pce'])
# or a bar chart
plt.bar(us.index, us['gdp'], align='center')
# we can also add things to plots
plt.plot(us.index, us['gdp'])
plt.plot(us.index, us['pce'])
plt.title('US GDP', fontsize=14, loc='left') # add title
plt.ylabel('Billions of 2009 USD') # y axis label
plt.xlim(2002.5, 2013.5) # shrink x axis limits
plt.tick_params(labelcolor='red') # change tick labels to red
plt.legend(['GDP', 'Consumption']) # more descriptive variable names
# create fig and ax objects
fig, ax = plt.subplots()
# let's try that again, this time with content
# create objects
fig, axe = plt.subplots()
# add things by applying methods to ax
us.plot(ax=axe)
# Fama-French example
fig, ax = plt.subplots()
ff.plot(ax=ax,
kind='line', # line plot
color=['blue', 'magenta'], # line color
title='Fama-French market and riskfree returns')
fig, ax = plt.subplots()
us.plot(ax=ax)
ax.set_title('US GDP and Consumption', fontsize=14, loc='left')
ax.set_ylabel('Billions of 2013 USD')
ax.legend(['Real GDP', 'Consumption'], loc=0) # more descriptive variable names
ax.set_xlim(2002.5, 2013.5) # expand x axis limits
ax.tick_params(labelcolor='red') # change tick labels to red
ax.set_ylim(0)
# this creates a 2-dimensional ax
fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True)
print('Object ax has dimension', len(ax))
# now add some content
fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=True)
us['gdp'].plot(ax=ax[0], color='green') # first plot
us['pce'].plot(ax=ax[1], color='red') # second plot
# data input
import pandas as pd
url = 'http://dx.doi.org/10.1787/888932937035'
pisa = pd.read_excel(url,
skiprows=18, # skip the first 18 rows
skipfooter=7, # skip the last 7
parse_cols=[0,1,9,13], # select columns
index_col=0, # set index = first column
header=[0,1] # set variable names
)
pisa = pisa.dropna() # drop blank lines
pisa.columns = ['Math', 'Reading', 'Science'] # simplify variable names
# bar chart of math scores
fig, ax = plt.subplots()
pisa['Math'].plot(kind='barh', ax=ax)
fig.
# make the plot taller
fig, ax = plt.subplots(figsize=(4, 13)) # note figsize
pisa['Math'].plot(kind='barh', ax=ax)
ax.set_title('PISA Math Score', loc='left')
fig, ax = plt.subplots()
pisa['Math'].plot(kind='barh', ax=ax, figsize=(4,13))
ax.set_title('PISA Math Score', loc='left')
ax.get_children()[36].set_color('r')
# load packages (redundancy is ok)
import pandas as pd # data management tools
from pandas.io import wb # World Bank api
import matplotlib.pyplot as plt # plotting tools
# variable list (GDP, GDP per capita, life expectancy)
var = ['NY.GDP.PCAP.PP.KD', 'NY.GDP.MKTP.PP.KD', 'SP.DYN.LE00.IN']
# country list (ISO codes)
iso = ['USA', 'FRA', 'JPN', 'CHN', 'IND', 'BRA', 'MEX']
year = 2013
# get data from World Bank
df = wb.download(indicator=var, country=iso, start=year, end=year)
# massage data
df = df.reset_index(level='year', drop=True)
df.columns = ['gdppc', 'gdp', 'life'] # rename variables
df['pop'] = df['gdp']/df['gdppc'] # population
df['gdp'] = df['gdp']/10**12 # convert to trillions
df['gdppc'] = df['gdppc']/10**3 # convert to thousands
df['order'] = [5, 3, 1, 4, 2, 6, 0] # reorder countries
df = df.sort_values(by='order', ascending=False)
df
# GDP bar chart
fig, ax = plt.subplots()
df['gdp'].plot(ax=ax, kind='barh', alpha=0.5)
ax.set_title('GDP', loc='left', fontsize=14)
ax.set_xlabel('Trillions of US Dollars')
ax.set_ylabel('')
# ditto for GDP per capita (per person)
fig, ax = plt.subplots()
df['gdppc'].plot(ax=ax, kind='barh', color='m', alpha=0.50) # 'm' == 'magenta'
ax.set_title('GDP Per Capita', loc='left', fontsize=14)
ax.set_xlabel('Thousands of US Dollars')
ax.set_ylabel('')
# ditto for GDP per capita (per person)
fig, ax = plt.subplots()
df['gdppc'].plot(ax=ax, kind='barh', color='b', alpha=0.5)
ax.set_title('GDP Per Capita', loc='left', fontsize=14)
ax.set_xlabel('Thousands of US Dollars')
ax.set_ylabel('')
# Tufte-like axes
ax.spines['left'].set_position(('outward', 7))
ax.spines['bottom'].set_position(('outward', 7))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# scatterplot of life expectancy vs gdp per capita
fig, ax = plt.subplots()
ax.scatter(df['gdppc'], df['life'], # x,y variables
s=df['pop']/10**6, # size of bubbles
alpha=0.5)
ax.set_title('Life expectancy vs. GDP per capita', loc='left', fontsize=14)
ax.set_xlabel('GDP Per Capita')
ax.set_ylabel('Life Expectancy')
ax.text(58, 66, 'Bubble size represents population', horizontalalignment='right')
# We'll look at this chart under a variety of styles.
# Let's make a function so we don't have to repeat the
# code to create
def gdp_bar():
fig, ax = plt.subplots()
df['gdp'].plot(ax=ax, kind='barh', alpha=0.5)
ax.set_title('Real GDP', loc='left', fontsize=14)
ax.set_xlabel('Trillions of US Dollars')
ax.set_ylabel('')
gdp_bar()
plt.style.available
plt.style.use('fivethirtyeight')
gdp_bar()
plt.style.use('ggplot')
gdp_bar()
plt.xkcd()
gdp_bar()
mpl.rcParams.update(mpl.rcParamsDefault)
%matplotlib inline
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Comment. When you run the code cell above, its output appears below it.
Step2: Create dataframes to play with
Step3: Comment. In the previous cell, we used the print() function to produce output. Here we just put the name of the dataframe. The latter displays the dataframe -- and formats it nicely -- if it's the last statement in the cell.
Step4: Comment. The warning in pink tells us that the Pandas DataReader will be spun off into a separate package in the near future.
Step5: Digression
Step6: Exercise. Show that we get the output from us.plot.bar().
Step7: Exercise. Enter us.plot(kind='bar') and us.plot.bar() in separate cells. Show that they produce the same bar chart.
Step8: Exercise. What do each of the arguments do in the code below?
Step9: Exercise. Let's see if we can dress up the histogram a little. Try adding, one at a time, the arguments title='Fama-French returns', grid=True, and legend=False. What does the documentation say about them? What do they do?
Step10: Exercise. What is the x variable here? The y variable?
Step11: Exercise. Experiment with
Step12: Comment. All of these statements must be in the same cell for this to work.
Step13: Exercise. What do we have here? What type are fig and ax?
Step14: Comment. Both of these statements must be in the same cell.
Step15: Exercise. Let's see if we can teach ourselves the rest
Step16: (Your results may differ, but we really enjoyed that.)
Step17: Examples
Step18: Comment. Yikes! That's horrible! What can we do about it?
Step19: Comment. What if we wanted to make the US bar red? This is ridiculously complicated, but we used our Google fu and found a solution. Remember
Step20: Exercise. Create the same graph for the Reading score.
Step21: And just because it's fun, here's an example of Tufte-like axes from Matplotlib examples. If you want to do this yourself, copy the last six line and prepare yourself to sink some time into it.
Step22: Exercise (challenging). Make the ticks point out.
Step23: Exercise. Make the bubble a little larger.
Step24: Exercise. Create the same graph with this statement at the top
Step25: Comment. Ignore the seaborn styles, that's a package we don't have yet.
Step26: Comment. For aficionados, the always tasteful xkcd style.
Step27: Comment. We reset the style with these two lines
|
726 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import regularizers
print(tf.__version__)
!pip install git+https://github.com/tensorflow/docs
import tensorflow_docs as tfdocs
import tensorflow_docs.modeling
import tensorflow_docs.plots
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import pathlib
import shutil
import tempfile
logdir = pathlib.Path(tempfile.mkdtemp())/"tensorboard_logs"
shutil.rmtree(logdir, ignore_errors=True)
gz = tf.keras.utils.get_file('HIGGS.csv.gz', 'http://mlphysics.ics.uci.edu/data/higgs/HIGGS.csv.gz')
FEATURES = 28
ds = tf.data.experimental.CsvDataset(gz,[float(),]*(FEATURES+1), compression_type="GZIP")
def pack_row(*row):
label = row[0]
features = tf.stack(row[1:],1)
return features, label
packed_ds = ds.batch(10000).map(pack_row).unbatch()
for features,label in packed_ds.batch(1000).take(1):
print(features[0])
plt.hist(features.numpy().flatten(), bins = 101)
N_VALIDATION = int(1e3)
N_TRAIN = int(1e4)
BUFFER_SIZE = int(1e4)
BATCH_SIZE = 500
STEPS_PER_EPOCH = N_TRAIN//BATCH_SIZE
validate_ds = packed_ds.take(N_VALIDATION).cache()
train_ds = packed_ds.skip(N_VALIDATION).take(N_TRAIN).cache()
train_ds
validate_ds = validate_ds.batch(BATCH_SIZE)
train_ds = train_ds.shuffle(BUFFER_SIZE).repeat().batch(BATCH_SIZE)
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
0.001,
decay_steps=STEPS_PER_EPOCH*1000,
decay_rate=1,
staircase=False)
def get_optimizer():
return tf.keras.optimizers.Adam(lr_schedule)
step = np.linspace(0,100000)
lr = lr_schedule(step)
plt.figure(figsize = (8,6))
plt.plot(step/STEPS_PER_EPOCH, lr)
plt.ylim([0,max(plt.ylim())])
plt.xlabel('Epoch')
_ = plt.ylabel('Learning Rate')
def get_callbacks(name):
return [
tfdocs.modeling.EpochDots(),
tf.keras.callbacks.EarlyStopping(monitor='val_binary_crossentropy', patience=200),
tf.keras.callbacks.TensorBoard(logdir/name),
]
def compile_and_fit(model, name, optimizer=None, max_epochs=10000):
if optimizer is None:
optimizer = get_optimizer()
model.compile(optimizer=optimizer,
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[
tf.keras.losses.BinaryCrossentropy(
from_logits=True, name='binary_crossentropy'),
'accuracy'])
model.summary()
history = model.fit(
train_ds,
steps_per_epoch = STEPS_PER_EPOCH,
epochs=max_epochs,
validation_data=validate_ds,
callbacks=get_callbacks(name),
verbose=0)
return history
tiny_model = tf.keras.Sequential([
layers.Dense(16, activation='elu', input_shape=(FEATURES,)),
layers.Dense(1)
])
size_histories = {}
size_histories['Tiny'] = compile_and_fit(tiny_model, 'sizes/Tiny')
plotter = tfdocs.plots.HistoryPlotter(metric = 'binary_crossentropy', smoothing_std=10)
plotter.plot(size_histories)
plt.ylim([0.5, 0.7])
small_model = tf.keras.Sequential([
# `input_shape` is only required here so that `.summary` works.
layers.Dense(16, activation='elu', input_shape=(FEATURES,)),
layers.Dense(16, activation='elu'),
layers.Dense(1)
])
size_histories['Small'] = compile_and_fit(small_model, 'sizes/Small')
medium_model = tf.keras.Sequential([
layers.Dense(64, activation='elu', input_shape=(FEATURES,)),
layers.Dense(64, activation='elu'),
layers.Dense(64, activation='elu'),
layers.Dense(1)
])
size_histories['Medium'] = compile_and_fit(medium_model, "sizes/Medium")
large_model = tf.keras.Sequential([
layers.Dense(512, activation='elu', input_shape=(FEATURES,)),
layers.Dense(512, activation='elu'),
layers.Dense(512, activation='elu'),
layers.Dense(512, activation='elu'),
layers.Dense(1)
])
size_histories['large'] = compile_and_fit(large_model, "sizes/large")
plotter.plot(size_histories)
a = plt.xscale('log')
plt.xlim([5, max(plt.xlim())])
plt.ylim([0.5, 0.7])
plt.xlabel("Epochs [Log Scale]")
#docs_infra: no_execute
# Load the TensorBoard notebook extension
%load_ext tensorboard
# Open an embedded TensorBoard viewer
%tensorboard --logdir {logdir}/sizes
display.IFrame(
src="https://tensorboard.dev/experiment/vW7jmmF9TmKmy3rbheMQpw/#scalars&_smoothingWeight=0.97",
width="100%", height="800px")
shutil.rmtree(logdir/'regularizers/Tiny', ignore_errors=True)
shutil.copytree(logdir/'sizes/Tiny', logdir/'regularizers/Tiny')
regularizer_histories = {}
regularizer_histories['Tiny'] = size_histories['Tiny']
l2_model = tf.keras.Sequential([
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001),
input_shape=(FEATURES,)),
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001)),
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001)),
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001)),
layers.Dense(1)
])
regularizer_histories['l2'] = compile_and_fit(l2_model, "regularizers/l2")
plotter.plot(regularizer_histories)
plt.ylim([0.5, 0.7])
result = l2_model(features)
regularization_loss=tf.add_n(l2_model.losses)
dropout_model = tf.keras.Sequential([
layers.Dense(512, activation='elu', input_shape=(FEATURES,)),
layers.Dropout(0.5),
layers.Dense(512, activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, activation='elu'),
layers.Dropout(0.5),
layers.Dense(1)
])
regularizer_histories['dropout'] = compile_and_fit(dropout_model, "regularizers/dropout")
plotter.plot(regularizer_histories)
plt.ylim([0.5, 0.7])
combined_model = tf.keras.Sequential([
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu', input_shape=(FEATURES,)),
layers.Dropout(0.5),
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu'),
layers.Dropout(0.5),
layers.Dense(1)
])
regularizer_histories['combined'] = compile_and_fit(combined_model, "regularizers/combined")
plotter.plot(regularizer_histories)
plt.ylim([0.5, 0.7])
display.IFrame(
src="https://tensorboard.dev/experiment/fGInKDo8TXes1z7HQku9mw/#scalars&_smoothingWeight=0.97",
width = "100%",
height="800px")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overfit and underfit
Step2: The Higgs dataset
Step3: The tf.data.experimental.CsvDataset class can be used to read csv records directly from a gzip file with no intermediate decompression step.
Step4: That csv reader class returns a list of scalars for each record. The following function repacks that list of scalars into a (feature_vector, label) pair.
Step5: TensorFlow is most efficient when operating on large batches of data.
Step6: Inspect some of the records from this new packed_ds.
Step7: To keep this tutorial relatively short, use just the first 1,000 samples for validation, and the next 10,000 for training
Step8: The Dataset.skip and Dataset.take methods make this easy.
Step9: These datasets return individual examples. Use the Dataset.batch method to create batches of an appropriate size for training. Before batching, also remember to use Dataset.shuffle and Dataset.repeat on the training set.
Step10: Demonstrate overfitting
Step11: The code above sets a tf.keras.optimizers.schedules.InverseTimeDecay to hyperbolically decrease the learning rate to 1/2 of the base rate at 1,000 epochs, 1/3 at 2,000 epochs, and so on.
Step12: Each model in this tutorial will use the same training configuration. So set these up in a reusable way, starting with the list of callbacks.
Step13: Similarly each model will use the same Model.compile and Model.fit settings
Step14: Tiny model
Step15: Now check how the model did
Step16: Small model
Step17: Medium model
Step18: And train the model using the same data
Step19: Large model
Step20: And, again, train the model using the same data
Step21: Plot the training and validation losses
Step22: Note
Step23: You can view the results of a previous run of this notebook on TensorBoard.dev.
Step24: If you want to share TensorBoard results you can upload the logs to TensorBoard.dev by copying the following into a code-cell.
Step25: Add weight regularization
Step26: l2(0.001) means that every coefficient in the weight matrix of the layer will add 0.001 * weight_coefficient_value**2 to the total loss of the network.
Step27: As demonstrated in the diagram above, the "L2" regularized model is now much more competitive with the "Tiny" model. This "L2" model is also much more resistant to overfitting than the "Large" model it was based on despite having the same number of parameters.
Step28: This implementation works by adding the weight penalties to the model's loss, and then applying a standard optimization procedure after that.
Step29: It's clear from this plot that both of these regularization approaches improve the behavior of the "Large" model. But this still doesn't beat even the "Tiny" baseline.
Step30: This model with the "Combined" regularization is obviously the best one so far.
|
727 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# import all shogun classes
from shogun import *
import random
import numpy as np
import matplotlib.pyplot as plt
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
from math import exp
# plot likelihood for three different noise lebels $\sigma$ (which is not yet squared)
sigmas=np.array([0.5,1,2])
# likelihood instance
lik=GaussianLikelihood()
# A set of labels to consider
lab=RegressionLabels(np.linspace(-4.0,4.0, 200))
# A single 1D Gaussian response function, repeated once for each label
# this avoids doing a loop in python which would be slow
F=np.zeros(lab.get_num_labels())
# plot likelihood for all observations noise levels
plt.figure(figsize=(12, 4))
for sigma in sigmas:
# set observation noise, this is squared internally
lik.set_sigma(sigma)
# compute log-likelihood for all labels
log_liks=lik.get_log_probability_f(lab, F)
# plot likelihood functions, exponentiate since they were computed in log-domain
plt.plot(lab.get_labels(), list(map(exp,log_liks)))
plt.ylabel("$p(y_i|f_i)$")
plt.xlabel("$y_i$")
plt.title("Regression Likelihoods for different observation noise levels")
_=plt.legend(["sigma=$%.1f$" % sigma for sigma in sigmas])
def generate_regression_toy_data(n=50, n_test=100, x_range=15, x_range_test=20, noise_var=0.4):
# training and test sine wave, test one has more points
X_train = np.random.rand(n)*x_range
X_test = np.linspace(0,x_range_test, 500)
# add noise to training observations
y_test = np.sin(X_test)
y_train = np.sin(X_train)+np.random.randn(n)*noise_var
return X_train, y_train, X_test, y_test
X_train, y_train, X_test, y_test = generate_regression_toy_data()
plt.figure(figsize=(16,4))
plt.plot(X_train, y_train, 'ro')
plt.plot(X_test, y_test)
plt.legend(["Noisy observations", "True model"])
plt.title("One-Dimensional Toy Regression Data")
plt.xlabel("$\mathbf{x}$")
_=plt.ylabel("$\mathbf{y}$")
# bring data into shogun representation (features are 2d-arrays, organised as column vectors)
feats_train=features(X_train.reshape(1,len(X_train)))
feats_test=features(X_test.reshape(1,len(X_test)))
labels_train=RegressionLabels(y_train)
# compute covariances for different kernel parameters
taus=np.asarray([.1,4.,32.])
Cs=np.zeros(((len(X_train), len(X_train), len(taus))))
for i in range(len(taus)):
# compute unscalled kernel matrix (first parameter is maximum size in memory and not very important)
kernel=GaussianKernel(10, taus[i])
kernel.init(feats_train, feats_train)
Cs[:,:,i]=kernel.get_kernel_matrix()
# plot
plt.figure(figsize=(16,5))
for i in range(len(taus)):
plt.subplot(1,len(taus),i+1)
plt.imshow(Cs[:,:,i], interpolation="nearest")
plt.xlabel("Covariate index")
plt.ylabel("Covariate index")
_=plt.title("tau=%.1f" % taus[i])
plt.figure(figsize=(16,5))
plt.suptitle("Random Samples from GP prior")
for i in range(len(taus)):
plt.subplot(1,len(taus),i+1)
# sample a bunch of latent functions from the Gaussian Process
# note these vectors are stored row-wise
F=Statistics.sample_from_gaussian(np.zeros(len(X_train)), Cs[:,:,i], 3)
for j in range(len(F)):
# sort points to connect the dots with lines
sorted_idx=X_train.argsort()
plt.plot(X_train[sorted_idx], F[j,sorted_idx], '-', markersize=6)
plt.xlabel("$\mathbf{x}_i$")
plt.ylabel("$f(\mathbf{x}_i)$")
_=plt.title("tau=%.1f" % taus[i])
plt.figure(figsize=(16,5))
plt.suptitle("Random Samples from GP posterior")
for i in range(len(taus)):
plt.subplot(1,len(taus),i+1)
# create inference method instance with very small observation noise to make
inf=ExactInferenceMethod(GaussianKernel(10, taus[i]), feats_train, ZeroMean(), labels_train, GaussianLikelihood())
C_post=inf.get_posterior_covariance()
m_post=inf.get_posterior_mean()
# sample a bunch of latent functions from the Gaussian Process
# note these vectors are stored row-wise
F=Statistics.sample_from_gaussian(m_post, C_post, 5)
for j in range(len(F)):
# sort points to connect the dots with lines
sorted_idx=sorted(range(len(X_train)),key=lambda x:X_train[x])
plt.plot(X_train[sorted_idx], F[j,sorted_idx], '-', markersize=6)
plt.plot(X_train, y_train, 'r*')
plt.xlabel("$\mathbf{x}_i$")
plt.ylabel("$f(\mathbf{x}_i)$")
_=plt.title("tau=%.1f" % taus[i])
# helper function that plots predictive distribution and data
def plot_predictive_regression(X_train, y_train, X_test, y_test, means, variances):
# evaluate predictive distribution in this range of y-values and preallocate predictive distribution
y_values=np.linspace(-3,3)
D=np.zeros((len(y_values), len(X_test)))
# evaluate normal distribution at every prediction point (column)
for j in range(np.shape(D)[1]):
# create gaussian distributio instance, expects mean vector and covariance matrix, reshape
gauss=GaussianDistribution(np.array(means[j]).reshape(1,), np.array(variances[j]).reshape(1,1))
# evaluate predictive distribution for test point, method expects matrix
D[:,j]=np.exp(gauss.log_pdf_multiple(y_values.reshape(1,len(y_values))))
plt.pcolor(X_test,y_values,D)
plt.colorbar()
plt.contour(X_test,y_values,D)
plt.plot(X_test,y_test, 'b', linewidth=3)
plt.plot(X_test,means, 'm--', linewidth=3)
plt.plot(X_train, y_train, 'ro')
plt.legend(["Truth", "Prediction", "Data"])
plt.figure(figsize=(18,10))
plt.suptitle("GP inference for different kernel widths")
for i in range(len(taus)):
plt.subplot(len(taus),1,i+1)
# create GP instance using inference method and train
# use Shogun objects from above
inf.put('kernel', GaussianKernel(10,taus[i]))
gp=GaussianProcessRegression(inf)
gp.train()
# predict labels for all test data (note that this produces the same as the below mean vector)
means = gp.apply(feats_test)
# extract means and variance of predictive distribution for all test points
means = gp.get_mean_vector(feats_test)
variances = gp.get_variance_vector(feats_test)
# note: y_predicted == means
# plot predictive distribution and training data
plot_predictive_regression(X_train, y_train, X_test, y_test, means, variances)
_=plt.title("tau=%.1f" % taus[i])
# re-create inference method and GP instance to start from scratch, use other Shogun structures from above
inf = ExactInferenceMethod(GaussianKernel(10, taus[i]), feats_train, ZeroMean(), labels_train, GaussianLikelihood())
gp = GaussianProcessRegression(inf)
# evaluate our inference method for its derivatives
grad = GradientEvaluation(gp, feats_train, labels_train, GradientCriterion(), False)
grad.put('differentiable_function', inf)
# handles all of the above structures in memory
grad_search = GradientModelSelection(grad)
# search for best parameters and store them
best_combination = grad_search.select_model()
# apply best parameters to GP, train
best_combination.apply_to_machine(gp)
# we have to "cast" objects to the specific kernel interface we used (soon to be easier)
best_width=GaussianKernel.obtain_from_generic(inf.get_kernel()).get_width()
best_scale=inf.get_scale()
best_sigma=GaussianLikelihood.obtain_from_generic(inf.get_model()).get_sigma()
print("Selected tau (kernel bandwidth):", best_width)
print("Selected gamma (kernel scaling):", best_scale)
print("Selected sigma (observation noise):", best_sigma)
# train gp
gp.train()
# extract means and variance of predictive distribution for all test points
means = gp.get_mean_vector(feats_test)
variances = gp.get_variance_vector(feats_test)
# plot predictive distribution
plt.figure(figsize=(18,5))
plot_predictive_regression(X_train, y_train, X_test, y_test, means, variances)
_=plt.title("Maximum Likelihood II based inference")
# two classification likelihoods in Shogun
logit=LogitLikelihood()
probit=ProbitLikelihood()
# A couple of Gaussian response functions, 1-dimensional here
F=np.linspace(-5.0,5.0)
# Single observation label with +1
lab=BinaryLabels(np.array([1.0]))
# compute log-likelihood for all values in F
log_liks_logit=np.zeros(len(F))
log_liks_probit=np.zeros(len(F))
for i in range(len(F)):
# Shogun expects a 1D array for f, not a single number
f=np.array(F[i]).reshape(1,)
log_liks_logit[i]=logit.get_log_probability_f(lab, f)
log_liks_probit[i]=probit.get_log_probability_f(lab, f)
# in fact, loops are slow and Shogun offers a method to compute the likelihood for many f. Much faster!
log_liks_logit=logit.get_log_probability_fmatrix(lab, F.reshape(1,len(F)))
log_liks_probit=probit.get_log_probability_fmatrix(lab, F.reshape(1,len(F)))
# plot the sigmoid functions, note that Shogun computes it in log-domain, so we have to exponentiate
plt.figure(figsize=(12, 4))
plt.plot(F, np.exp(log_liks_logit))
plt.plot(F, np.exp(log_liks_probit))
plt.ylabel("$p(y_i|f_i)$")
plt.xlabel("$f_i$")
plt.title("Classification Likelihoods")
_=plt.legend(["Logit", "Probit"])
def generate_classification_toy_data(n_train=100, mean_a=np.asarray([0, 0]), std_dev_a=1.0, mean_b=3, std_dev_b=0.5):
# positive examples are distributed normally
X1 = (np.random.randn(n_train, 2)*std_dev_a+mean_a).T
# negative examples have a "ring"-like form
r = np.random.randn(n_train)*std_dev_b+mean_b
angle = np.random.randn(n_train)*2*np.pi
X2 = np.array([r*np.cos(angle)+mean_a[0], r*np.sin(angle)+mean_a[1]])
# stack positive and negative examples in a single array
X_train = np.hstack((X1,X2))
# label positive examples with +1, negative with -1
y_train = np.zeros(n_train*2)
y_train[:n_train] = 1
y_train[n_train:] = -1
return X_train, y_train
def plot_binary_data(X_train, y_train):
plt.plot(X_train[0, np.argwhere(y_train == 1)], X_train[1, np.argwhere(y_train == 1)], 'ro')
plt.plot(X_train[0, np.argwhere(y_train == -1)], X_train[1, np.argwhere(y_train == -1)], 'bo')
X_train, y_train=generate_classification_toy_data()
plot_binary_data(X_train, y_train)
_=plt.title("2D Toy classification problem")
# for building combinations of arrays
from itertools import product
# convert training data into Shogun representation
train_features = features(X_train)
train_labels = BinaryLabels(y_train)
# generate all pairs in 2d range of testing data (full space), discretisation resultion is n_test
n_test=50
x1 = np.linspace(X_train[0,:].min()-1, X_train[0,:].max()+1, n_test)
x2 = np.linspace(X_train[1,:].min()-1, X_train[1,:].max()+1, n_test)
X_test = np.asarray(list(product(x1, x2))).T
# convert testing features into Shogun representation
test_features = features(X_test)
# create Gaussian kernel with width = 2.0
kernel = GaussianKernel(10, 2)
# create zero mean function
zero_mean = ZeroMean()
# you can easily switch between probit and logit likelihood models
# by uncommenting/commenting the following lines:
# create probit likelihood model
# lik = ProbitLikelihood()
# create logit likelihood model
lik = LogitLikelihood()
# you can easily switch between Laplace and EP approximation by
# uncommenting/commenting the following lines:
# specify Laplace approximation inference method
#inf = LaplacianInferenceMethod(kernel, train_features, zero_mean, train_labels, lik)
# specify EP approximation inference method
inf = EPInferenceMethod(kernel, train_features, zero_mean, train_labels, lik)
# EP might not converge, we here allow that without errors
inf.set_fail_on_non_convergence(False)
# create and train GP classifier, which uses Laplace approximation
gp = GaussianProcessClassification(inf)
gp.train()
test_labels=gp.apply(test_features)
# plot data and decision boundary
plot_binary_data(X_train, y_train)
plt.pcolor(x1, x2, test_labels.get_labels().reshape(n_test, n_test))
_=plt.title('Decision boundary')
# obtain probabilities for
p_test = gp.get_probabilities(test_features)
# create figure
plt.title('Training data, predictive probability and decision boundary')
# plot training data
plot_binary_data(X_train, y_train)
# plot decision boundary
plt.contour(x1, x2, np.reshape(p_test, (n_test, n_test)), levels=[0.5], colors=('black'))
# plot probabilities
plt.pcolor(x1, x2, p_test.reshape(n_test, n_test))
_=plt.colorbar()
# generate some non-negative kernel widths
widths=2**np.linspace(-5,6,20)
# compute marginal likelihood under Laplace apprixmation for every width
# use Shogun objects from above
marginal_likelihoods=np.zeros(len(widths))
for i in range(len(widths)):
# note that GP training is automatically done/updated if a parameter is changed. No need to call train again
kernel.set_width(widths[i])
marginal_likelihoods[i]=-inf.get_negative_log_marginal_likelihood()
# plot marginal likelihoods as a function of kernel width
plt.plot(np.log2(widths), marginal_likelihoods)
plt.title("Log Marginal likelihood for different kernels")
plt.xlabel("Kernel Width in log-scale")
_=plt.ylabel("Log-Marginal Likelihood")
print("Width with largest marginal likelihood:", widths[marginal_likelihoods.argmax()])
# again, use Shogun objects from above, but a few extremal widths
widths_subset=np.array([widths[0], widths[marginal_likelihoods.argmax()], widths[len(widths)-1]])
plt.figure(figsize=(18, 5))
for i in range(len(widths_subset)):
plt.subplot(1,len(widths_subset),i+1)
kernel.set_width(widths_subset[i])
# obtain and plot predictive distribution
p_test = gp.get_probabilities(test_features)
title_str="Width=%.2f, " % widths_subset[i]
if i is 0:
title_str+="too complex, overfitting"
elif i is 1:
title_str+="just right"
else:
title_str+="too smooth, underfitting"
plt.title(title_str)
plot_binary_data(X_train, y_train)
plt.contour(x1, x2, np.reshape(p_test, (n_test, n_test)), levels=[0.5], colors=('black'))
plt.pcolor(x1, x2, p_test.reshape(n_test, n_test))
_=plt.colorbar()
# re-create inference method and GP instance to start from scratch, use other Shogun structures from above
inf = EPInferenceMethod(kernel, train_features, zero_mean, train_labels, lik)
# EP might not converge, we here allow that without errors
inf.set_fail_on_non_convergence(False)
gp = GaussianProcessClassification(inf)
# evaluate our inference method for its derivatives
grad = GradientEvaluation(gp, train_features, train_labels, GradientCriterion(), False)
grad.put('differentiable_function', inf)
# handles all of the above structures in memory
grad_search = GradientModelSelection(grad)
# search for best parameters and store them
best_combination = grad_search.select_model()
# apply best parameters to GP
best_combination.apply_to_machine(gp)
# we have to "cast" objects to the specific kernel interface we used (soon to be easier)
best_width=GaussianKernel.obtain_from_generic(inf.get_kernel()).get_width()
best_scale=inf.get_scale()
print("Selected kernel bandwidth:", best_width)
print("Selected kernel scale:", best_scale)
# train gp
gp.train()
# visualise predictive distribution
p_test = gp.get_probabilities(test_features)
plot_binary_data(X_train, y_train)
plt.contour(x1, x2, np.reshape(p_test, (n_test, n_test)), levels=[0.5], colors=('black'))
plt.pcolor(x1, x2, p_test.reshape(n_test, n_test))
_=plt.colorbar()
# parameter space, increase resolution if you want finer plots, takes long though
resolution=5
widths=2**np.linspace(-4,10,resolution)
scales=2**np.linspace(-5,10,resolution)
# re-create inference method and GP instance to start from scratch, use other Shogun structures from above
inf = EPInferenceMethod(kernel, train_features, zero_mean, train_labels, lik)
# EP might not converge, we here allow that without errors
inf.set_fail_on_non_convergence(False)
gp = GaussianProcessClassification(inf)
inf.set_tolerance(1e-3)
# compute marginal likelihood for every parameter combination
# use Shogun objects from above
marginal_likelihoods=np.zeros((len(widths), len(scales)))
for i in range(len(widths)):
for j in range(len(scales)):
kernel.set_width(widths[i])
inf.set_scale(scales[j])
marginal_likelihoods[i,j]=-inf.get_negative_log_marginal_likelihood()
# contour plot of marginal likelihood as a function of kernel width and scale
plt.contour(np.log2(widths), np.log2(scales), marginal_likelihoods)
plt.colorbar()
plt.xlabel("Kernel width (log-scale)")
plt.ylabel("Kernel scale (log-scale)")
_=plt.title("Log Marginal Likelihood")
# plot our found best parameters
_=plt.plot([np.log2(best_width)], [np.log2(best_scale)], 'r*', markersize=20)
# for measuring runtime
import time
# simple regression data
X_train, y_train, X_test, y_test = generate_regression_toy_data(n=1000)
# bring data into shogun representation (features are 2d-arrays, organised as column vectors)
feats_train=features(X_train.reshape(1,len(X_train)))
feats_test=features(X_test.reshape(1,len(X_test)))
labels_train=RegressionLabels(y_train)
# inducing features (here: a random grid over the input space, try out others)
n_inducing=10
#X_inducing=linspace(X_train.min(), X_train.max(), n_inducing)
X_inducing=np.random.rand(int(X_train.min())+n_inducing)*X_train.max()
feats_inducing=features(X_inducing.reshape(1,len(X_inducing)))
# create FITC inference method and GP instance
inf = FITCInferenceMethod(GaussianKernel(10, best_width), feats_train, ZeroMean(), labels_train, \
GaussianLikelihood(best_sigma), feats_inducing)
gp = GaussianProcessRegression(inf)
start=time.time()
gp.train()
means = gp.get_mean_vector(feats_test)
variances = gp.get_variance_vector(feats_test)
print("FITC inference took %.2f seconds" % (time.time()-start))
# exact GP
start=time.time()
inf_exact = ExactInferenceMethod(GaussianKernel(10, best_width), feats_train, ZeroMean(), labels_train, \
GaussianLikelihood(best_sigma))
inf_exact.set_scale(best_scale)
gp_exact = GaussianProcessRegression(inf_exact)
gp_exact.train()
means_exact = gp_exact.get_mean_vector(feats_test)
variances_exact = gp_exact.get_variance_vector(feats_test)
print "Exact inference took %.2f seconds" % (time.time()-start)
# comparison plot FITC and exact inference, plot 95% confidence of both predictive distributions
plt.figure(figsize=(18,5))
plt.plot(X_test, y_test, color="black", linewidth=3)
plt.plot(X_test, means, 'r--', linewidth=3)
plt.plot(X_test, means_exact, 'b--', linewidth=3)
plt.plot(X_train, y_train, 'ro')
plt.plot(X_inducing, np.zeros(len(X_inducing)), 'g*', markersize=15)
# tube plot of 95% confidence
error=1.96*np.sqrt(variances)
plt.plot(X_test,means-error, color='red', alpha=0.3, linewidth=3)
plt.fill_between(X_test,means-error,means+error,color='red', alpha=0.3)
error_exact=1.96*np.sqrt(variances_exact)
plt.plot(X_test,means_exact-error_exact, color='blue', alpha=0.3, linewidth=3)
plt.fill_between(X_test,means_exact-error_exact,means_exact+error_exact,color='blue', alpha=0.3)
# plot upper confidence lines later due to legend
plt.plot(X_test,means+error, color='red', alpha=0.3, linewidth=3)
plt.plot(X_test,means_exact+error_exact, color='blue', alpha=0.3, linewidth=3)
plt.legend(["True", "FITC prediction", "Exact prediction", "Data", "Inducing points", "95% FITC", "95% Exact"])
_=plt.title("Comparison FITC and Exact Regression")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some Formal Background (Skip if you just want code examples)
Step2: Apart from its apealling form, this curve has the nice property of given rise to analytical solutions to the required integrals. Recall these are given by
Step3: First, we compute the kernel matrix $\mathbf{C}_\boldsymbol{\theta}$ using the <a href="http
Step4: This matrix, as any kernel or covariance matrix, is positive semi-definite and symmetric. It can be viewed as a similarity matrix. Here, elements on the diagonal (corresponding to $\mathbf{x}=\mathbf{x}'$) have largest similarity. For increasing kernel bandwidth $\tau$, more and more elements are similar. This matrix fully specifies a distribution over functions $f(\mathbf{x})
Step5: Note how the functions are exactly evaluated at the training covariates $\mathbf{x}_i$ which are randomly distributed on the x-axis. Even though these points do not visualise the full functions (we can only evaluate them at a finite number of points, but we connected the points with lines to make it more clear), this reveils that larger values of the kernel bandwidth $\tau$ lead to smoother latent Gaussian functions.
Step6: Note how the above function samples are constrained to go through our training data labels (up to observation noise), as much as their smoothness allows them. In fact, these are already samples from the predictive distribution, which gives a probability for a label $\mathbf{y}^$ for any covariate $\mathbf{x}^$. These distributions are Gaussian (!), nice to look at and extremely useful to understand the GP's underlying model. Let's plot them. We finally use the Shogun class <a href="http
Step7: The question now is
Step8: Now we can output the best parameters and plot the predictive distribution for those.
Step9: Now the predictive distribution is very close to the true data generating process.
Step10: Note how the logit function maps any input value to $[0,1]$ in a continuous way. The other plot above is for another classification likelihood is implemented in Shogun is the Gaussian CDF function
Step11: We will now pass this data into Shogun representation, and use the standard Gaussian kernel (or squared exponential covariance function (<a href="http
Step12: This is already quite nice. The nice thing about Gaussian Processes now is that they are Bayesian, which means that have a full predictive distribution, i.e., we can plot the probability for a point belonging to a class. These can be obtained via the interface of <a href="http
Step13: If you are interested in the marginal likelihood $p(\mathbf{y}|\boldsymbol{\theta})$, for example for the sake of comparing different model parameters $\boldsymbol{\theta}$ (more in model-selection later), it is very easy to compute it via the interface of <a href="http
Step14: This plot clearly shows that there is one kernel width (aka hyper-parameter element $\theta$) for that the marginal likelihood is maximised. If one was interested in the single best parameter, the above concept can be used to learn the best hyper-parameters of the GP. In fact, this is possible in a very efficient way since we have a lot of information about the geometry of the marginal likelihood function, as for example its gradient
Step15: In the above plots, it is quite clear that the maximum of the marginal likelihood corresponds to the best single setting of the parameters. To give some more intuition
Step16: This now gives us a trained Gaussian Process with the best hyper-parameters. In the above setting, this is the s <a href="http
Step17: Note how nicely this predictive distribution matches the data generating distribution. Also note that the best kernel bandwidth is different to the one we saw in the above plot. This is caused by the different kernel scalling that was also learned automatically. The kernel scaling, roughly speaking, corresponds to the sharpness of the changes in the surface of the predictive likelihood. Since we have two hyper-parameters, we can plot the surface of the marginal likelihood as a function of both of them. This is sometimes interesting, for example when this surface has multiple maximum (corresponding to multiple "best" parameter settings), and thus might be useful for analysis. It is expensive however.
Step18: Our found maximum nicely matches the result of the "grid-search". The take home message for this is
|
728 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import os.path as op
import cPickle as pickle
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.close('all')
datadir = op.join(os.getcwd(), 'data')
#Oxygen consumption data
with open(op.join(datadir, 'o2data.pkl'), 'rb') as INPUT:
dfo2 = pickle.load(INPUT)
#Mitochondrial Network Topology data
with open(op.join(datadir, 'munged_dataframe.pkl'), 'rb') as INPUT:
df = pickle.load(INPUT)
#Mitochondrial tubule heterogeneity data
with open(op.join(datadir, 'lagedges.pkl'), 'rb') as INPT:
dflags = pickle.load(INPT)
df['lags_1'] = dflags
#Cell size data
with open(op.join(datadir, 'cellVolume.pkl'), 'rb') as INPT:
dfsize = pickle.load(INPT)
dfsize.index = dfsize.cell
plt.rcParams['font.family'] = 'DejaVu Sans'
plt.rcParams['savefig.dpi'] = 100
sns.set(style='whitegrid', context='paper',
rc={'patch.linewidth':1,
'patch.edgecolor':(.25,.25,.25),
'lines.linewidth': 1})
dfvol = pd.DataFrame({'Vol': dfsize.loc[:, 'Vol'],
'mitolen': df.loc[:, 'mito_totlen'],
'media': df.loc[:, 'media']})
dfvol['mitovol'] = np.pi * (.15)**2 * dfvol.mitolen
dfvol['Vol Ratio'] = dfvol.mitovol / dfvol.Vol
# Topology (by cell)
dfcellcon = df.ix[:, ['mito_cell_avedyr',
'cell_coefvar_r',
'mito_beta_geo',
'mito_beta_top',
'mito_phi',
'mito_pk3',
'mito_avgdeg',
'mito_edgenum']]
dfcellcon = pd.concat([dfcellcon,
dfvol.loc[:, 'Vol Ratio'],
df.loc[:, 'media']], axis=1)
dfcellcon = dfcellcon[dfcellcon.mito_cell_avedyr <= 2000] # exclude hi YPE's
dfcellcon[r'$\Delta \Psi$ Unscaled'] = dfcellcon["mito_cell_avedyr"]
dfcellcon['Number of Edges'] = dfcellcon.mito_edgenum
dfcellcon['Average Degree'] = dfcellcon.mito_avgdeg
dfcellcon['O2 per mito vol'] = ''
dfcellcon['OCR per cell'] = ''
f, (ax1, ax0) = plt.subplots(2, 1,
figsize=(4.5, 3.5),
sharex=True)
sns.violinplot('media',
'Vol Ratio',
data=dfcellcon,
hue='media',
ax=ax0)
ax0.set_xlabel('')
ax0.set_ylabel('Volume Ratio')
ax0.get_legend().set_visible(False)
A = dfo2.groupby('type').quantile(0.025).values.flatten()
B = dfo2.groupby('type').quantile(0.975).values.flatten()
C = dfo2.groupby('type').quantile(0.5).values.flatten()
g = sns.barplot(x='type',
y='OCRmito',
estimator=np.median,
ci=None,
ecolor=[.25, .25, .25],
data=dfo2,
yerr=[C-A, B-C],
ax=ax1)
ax1.set_ylabel(r'OCR per mito vol /$\mu m^{3}$')
ax1.set_xlabel('')
f, ([ax0, ax1], [ax3, ax2]) = plt.subplots(2, 2,
figsize=(5.5, 4.25),
sharex=True)
ax0.yaxis.set_visible(False)
ax0.grid(False)
g = sns.barplot(x='type',
y='OCRmito',
estimator=np.median,
ci=None,
ecolor=[.25, .25, .25],
data=dfo2,
yerr=[C-A, B-C],
ax=ax1)
ax1.set_ylabel(r'OCR per mito vol /$\mu m^{3}$')
ax1.set_xlabel('')
ax1.set_yticks(np.arange(0, .015, .005))
sns.boxplot('media',
'mito_cell_avedyr',
data=dfcellcon,
notch=True,
#hue='media',
ax=ax2)
ax2.set_ylabel(r'$\Delta \Psi$ ')
ax2.set_ylim(0, 2000)
ax2.set_xlabel('')
#ax2.get_legend().set_visible(False)
sns.boxplot('media',
'Vol Ratio',
data=dfcellcon,
#hue='media',
notch=True,
#estimator=np.median,
ax=ax3)
ax3.set_ylabel('Vol Ratio')
ax3.set_ylim(0)
ax3.set_xlabel('')
#ax3.get_legend().set_visible(False)
sns.despine(bottom=True)
plt.tight_layout(w_pad=3)
plt.savefig('panel.svg')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set defaults for plotting
Step2: Dataframe and bins initialization
Step3: Plotting O2 respiration rate and mitochondrial volume ratio as a function of carbon sources show that cells in fermentation conditions (YPD) have lower mitochondrial respiration rate and content. For cells grown in respiratory conditions (YPE, YPL and YPR) the relationship between mitochondrial content and respiration levels is mixed.
Step4: Cells in fermentation conditions (YPD) have lower mitochondrial ΔΨ, O2 and mitochondrial content compared to those in respiration. For cells grown in respiratory conditions (YPE, YPL and YPR) the relationship between mitochondrial content, respiration and ΔΨ is mixed.
|
729 | <ASSISTANT_TASK:>
Python Code:
from lib.ozapfdis.git_tc import log_numstat
GIT_REPO_DIR = "../../dropover_git/"
git_log = log_numstat(GIT_REPO_DIR)[['sha', 'file', 'author']]
git_log.head()
prod_code = git_log.copy()
prod_code = prod_code[prod_code.file.str.endswith(".java")]
prod_code = prod_code[prod_code.file.str.startswith("backend/src/main")]
prod_code = prod_code[~prod_code.file.str.endswith("package-info.java")]
prod_code.head()
prod_code['hit'] = 1
prod_code.head()
commit_matrix = prod_code.reset_index().pivot_table(
index='file',
columns='sha',
values='hit',
fill_value=0)
commit_matrix.iloc[0:5,50:55]
from sklearn.metrics.pairwise import cosine_distances
dissimilarity_matrix = cosine_distances(commit_matrix)
dissimilarity_matrix[:5,:5]
import pandas as pd
dissimilarity_df = pd.DataFrame(
dissimilarity_matrix,
index=commit_matrix.index,
columns=commit_matrix.index)
dissimilarity_df.iloc[:5,:2]
%matplotlib inline
import seaborn as sns
sns.heatmap(
dissimilarity_df,
xticklabels=False,
yticklabels=False
);
modules = dissimilarity_df.copy()
modules.index = modules.index.str.split("/").str[6]
modules.index.name = 'module'
modules.columns = modules.index
modules.iloc[25:30,25:30]
import matplotlib.pyplot as plt
plt.figure(figsize=[10,9])
sns.heatmap(modules.iloc[:180,:180]);
from sklearn.manifold import MDS
# uses a fixed seed for random_state for reproducibility
model = MDS(dissimilarity='precomputed', random_state=0)
dissimilarity_2d = model.fit_transform(dissimilarity_df)
dissimilarity_2d[:5]
plt.figure(figsize=(8,8))
x = dissimilarity_2d[:,0]
y = dissimilarity_2d[:,1]
plt.scatter(x, y);
dissimilarity_2d_df = pd.DataFrame(
dissimilarity_2d,
index=commit_matrix.index,
columns=["x", "y"])
dissimilarity_2d_df.head()
prod_code.groupby(['file', 'author'])['hit'].count().groupby(['file', 'author']).max()
dissimilarity_2d_df['module'] = dissimilarity_2d_df.index.str.split("/").str[6]
plot_data = pd.DataFrame(index=dissimilarity_2d_df['module'])
plot_data['value'] = tuple(zip(dissimilarity_2d_df['x'], dissimilarity_2d_df['y']))
plot_data['label'] = dissimilarity_2d_df.index
plot_data['data'] = plot_data[['label', 'value']].to_dict('records')
plot_dict = plot_data.groupby(plot_data.index).data.apply(list)
plot_dict
import pygal
xy_chart = pygal.XY(stroke=False)
[xy_chart.add(entry[0], entry[1]) for entry in plot_dict.iteritems()]
# uncomment to create the interactive chart
# xy_chart.render_in_browser()
xy_chart
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In our case, we only want to check the modularization of our software for Java production code. So we just leave the files that are belonging to the main source code. What to keep here exactly is very specific to your own project. With Jupyter and pandas, we can make our decisions for this transparent and thus retraceable.
Step2: Analysis
Step3: Now, we can transform the data as we need it
Step4: As already mentioned in a previous blog post, we are now able to look at our problem from a mathematician' s perspective. What we have here now with the commit_matrix is a collection of n-dimensional vectors. Each vector represents a filename and the components/dimensions of such a vector are the commits with either the value 0 or 1.
Step5: To be able to better understand the result, we add the file names from the commit_matrix as index and column index to the dissimilarity_matrix.
Step6: Now, we see the result in a better representation
Step7: Because of the alphabetically ordered filenames and the "feature-first" architecture of the software under investigation, we get the first glimpse of how changes within modules are occurring together and which are not.
Step8: Then, we can create another heatmap that shows the name of the modules on both axes for further evaluation. We also just take a look at a subset of the data for representational reasons.
Step9: Discussion
Step10: The result is a 2D matrix that we can plot with matplotlib to get a first glimpse of the distribution of the calculated distances.
Step11: With the plot above, we see that the 2D transformation somehow worked. But we can't see
Step12: Author
Step13: OK, here comes the ugly part
Step14: With this nice little data structure, we can fill pygal's XY chart and create an interactive chart.
|
730 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
with open('reviews.txt', 'r') as f:
reviews = f.read()
with open('labels.txt', 'r') as f:
labels = f.read()
reviews[:2000]
from string import punctuation
all_text = ''.join([c for c in reviews if c not in punctuation])
reviews = all_text.split('\n')
all_text = ' '.join(reviews)
words = all_text.split()
all_text[:2000]
words[:100]
from collections import Counter
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
reviews_ints = []
for each in reviews:
reviews_ints.append([vocab_to_int[word] for word in each.split()])
labels = labels.split('\n')
labels = np.array([1 if each == 'positive' else 0 for each in labels])
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
# Filter out that review with 0 length
reviews_ints = [each for each in reviews_ints if len(each) > 0]
seq_len = 200
features = np.zeros((len(reviews), seq_len), dtype=int)
for i, row in enumerate(reviews_ints):
features[i, -len(row):] = np.array(row)[:seq_len]
features[:10,:100]
split_frac = 0.8
split_idx = int(len(features)*0.8)
train_x, val_x = features[:split_idx], features[split_idx:]
train_y, val_y = labels[:split_idx], labels[split_idx:]
test_idx = int(len(val_x)*0.5)
val_x, test_x = val_x[:test_idx], val_x[test_idx:]
val_y, test_y = val_y[:test_idx], val_y[test_idx:]
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
lstm_size = 256
lstm_layers = 3
batch_size = 500
learning_rate = 0.001
n_words = len(vocab)
# Create the graph object
graph = tf.Graph()
# Add nodes to the graph
with graph.as_default():
inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs')
labels_ = tf.placeholder(tf.int32, [None, None], name='labels')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Size of the embedding vectors (number of units in the embedding layer)
embed_size = 300
with graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs_)
with graph.as_default():
# Your basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
with graph.as_default():
outputs, final_state = tf.nn.dynamic_rnn(cell, embed,
initial_state=initial_state)
with graph.as_default():
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
cost = tf.losses.mean_squared_error(labels_, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
with graph.as_default():
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def get_batches(x, y, batch_size=100):
n_batches = len(x)//batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for ii in range(0, len(x), batch_size):
yield x[ii:ii+batch_size], y[ii:ii+batch_size]
epochs = 10
with graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss))
if iteration%25==0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(val_x, val_y, batch_size):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
saver.save(sess, "checkpoints/sentiment.ckpt")
# test_acc = []
# with tf.Session(graph=graph) as sess:
# saver.restore(sess, tf.train.latest_checkpoint('/output/checkpoints'))
# test_state = sess.run(cell.zero_state(batch_size, tf.float32))
# for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):
# feed = {inputs_: x,
# labels_: y[:, None],
# keep_prob: 1,
# initial_state: test_state}
# batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
# test_acc.append(batch_acc)
# print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data preprocessing
Step2: Encoding the words
Step3: Encoding the labels
Step4: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.
Step5: Exercise
Step6: Training, Validation, Test
Step7: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like
Step8: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.
Step9: Embedding
Step10: LSTM cell
Step11: RNN forward pass
Step12: Output
Step13: Validation accuracy
Step14: Batching
Step15: Training
Step16: Testing
|
731 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pprint as pp
hdfs = pd.HDFStore("../../data/raw/henrik/TestMessungen_NEU.hdf")
hdfs.keys
df1 = hdfs.get('/x1/t1/trx_1_2')
df1.head(5)
# Little function to retrieve sender-receiver tuple from df columns
import re
def extract_snd_rcv(df):
regex = r"trx_[1-4]_[1-4]_ifft_[0-9]*"
snd_rcv = {x[4:7] for x in df.columns if re.search(regex, x)}
return [(x[0],x[-1]) for x in snd_rcv]
def get_column_counts(snd_rcv, df):
col_counts = {}
for snd,rcv in snd_rcv:
col_counts['trx_{}_{}_ifft'.format(snd, rcv)] = len([i for i, word in enumerate(list(df.columns)) if word.startswith('trx_{}_{}_ifft'.format(snd, rcv))])
return col_counts
df1_snd_rcv = extract_snd_rcv(df1)
cc = get_column_counts(df1_snd_rcv, df1)
pp.pprint(cc)
print("Sum of measure columns: %i" % sum(cc.values()))
print("# of other columns: %i" % (len(df1.columns) - sum(cc.values())))
[col for col in df1.columns if 'ifft' not in col]
print(df1['target'].unique())
print("# Unique values in target: %i" % len(df1['target'].unique()))
df2 = hdfs.get('/x1/t1/trx_1_4')
df2.head()
import re
df2_snd_rcv = extract_snd_rcv(df2)
cc = get_column_counts(df2_snd_rcv, df2)
pp.pprint(cc)
print("Sum of measure columns: %i" % sum(cc.values()))
print("# of other columns: %i" % (len(df2.columns) - sum(cc.values())))
[col for col in df2.columns if 'ifft' not in col]
print(df2['target'].unique())
print("# Unique values in target: %i" % len(df2['target'].unique()))
plt.figure(figsize=(20, 15))
ax = sns.heatmap(df1.loc[:,'trx_1_2_ifft_0':'trx_1_2_ifft_1999'].values, cmap='nipy_spectral_r')
plt.figure(figsize=(20, 15))
ax = sns.heatmap(df2.loc[:,'trx_2_4_ifft_0':'trx_2_4_ifft_1999'].values, cmap='YlGnBu')
# Iterating over hdfs data and creating interim data presentation stored in data/interim/henrik/testmessungen_interim.hdf
# Interim data representation contains aditional binary class (binary_target - encoding 0=empty and 1=not empty)
# and multi class target (multi_target - encoding 0-9 for each possible class)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
interim_path = '../../data/interim/henrik/01_testmessungen.hdf'
def binary_mapper(df):
def map_binary(target):
if target.startswith('Empty'):
return 0
else:
return 1
df['binary_target'] = pd.Series(map(map_binary, df['target']))
def multiclass_mapper(df):
le.fit(df['target'])
df['multi_target'] = le.transform(df['target'])
for key in hdfs.keys():
df = hdfs.get(key)
binary_mapper(df)
multiclass_mapper(df)
df.to_hdf(interim_path, key)
hdfs.close()
from evaluation import *
from filters import *
from utility import *
from features import *
hdfs = pd.HDFStore('../../data/interim/henrik/01_testmessungen.hdf')
# generate datasets
tst = ['1','2','3']
tst_ds = []
for t in tst:
df_tst = hdfs.get('/x1/t'+t+'/trx_3_1')
lst = df_tst.columns[df_tst.columns.str.contains('_ifft_')]
#df_tst_cl,_ = distortion_filter(df_tst_cl)
groups = get_trx_groups(df_tst)
df_std = rf_grouped(df_tst, groups=groups, fn=rf_std_single, label='target')
df_mean = rf_grouped(df_tst, groups=groups, fn=rf_mean_single)
df_p2p = rf_grouped(df_tst, groups=groups, fn=rf_ptp_single) # added p2p feature
df_all = pd.concat( [df_std, df_mean, df_p2p], axis=1 ) # added p2p feature
df_all = cf_std_window(df_all, window=4, label='target')
df_tst_sum = generate_class_label_presence(df_all, state_variable='target')
# remove index column
df_tst_sum = df_tst_sum[df_tst_sum.columns.values[~df_tst_sum.columns.str.contains('index')].tolist()]
print('Columns in Dataset:',t)
print(df_tst_sum.columns)
tst_ds.append(df_tst_sum.copy())
# holdout validation
print(hold_out_val(tst_ds, target='target', include_self=False, cl='rf', verbose=False, random_state=1))
hdfs.close()
# Load hdfs data
hdfs = pd.HDFStore("../../data/raw/henrik/TestMessungen_NEU.hdf")
# Check available keys in hdf5 store
hdfs.keys
# Step-0
# Mapping groundtruth to 0-empty and 1-not empty and prepare for further preprocessing by
# removing additional timestamp columns and index column
# Storing cleaned dataframes (no index, removed _ts columns, mapped multi classes to 0-empty, 1-not empty)
# to new hdfstore to `data/interim/henrik/02_testmessungen.hdf`
hdf_path = "../../data/interim/henrik/02_tesmessungen.hdf"
dfs = []
for key in hdfs.keys():
df = hdfs.get(key)
#df['target'] = df['target'].map(lambda x: 0 if x.startswith("Empty") else 1)
# drop all time stamp columns who endswith _ts
cols = [c for c in df.columns if not c.lower().endswith("ts")]
df = df[cols]
df = df.drop('index', axis=1)
df.to_hdf(hdf_path, key)
hdfs.close()
hdfs = pd.HDFStore(hdf_path)
df = hdfs.get("/x1/t1/trx_1_2")
df.head()
# Step-1 repeating the previous taks 4 to get a comparable base result with the now dropped _ts and index column to improve from
# generate datasets
from evaluation import *
from filters import *
from utility import *
from features import *
tst = ['1','2','3']
tst_ds = []
for t in tst:
df_tst = hdfs.get('/x1/t'+t+'/trx_3_1')
lst = df_tst.columns[df_tst.columns.str.contains('_ifft_')]
#df_tst_cl,_ = distortion_filter(df_tst_cl)
df_tst,_ = distortion_filter(df_tst)
groups = get_trx_groups(df_tst)
df_std = rf_grouped(df_tst, groups=groups, fn=rf_std_single, label='target')
df_mean = rf_grouped(df_tst, groups=groups, fn=rf_mean_single)
df_p2p = rf_grouped(df_tst, groups=groups, fn=rf_ptp_single) # added p2p feature
df_kurt = rf_grouped(df_tst, groups=groups, fn=rf_kurtosis_single)
df_all = pd.concat( [df_std, df_mean, df_p2p, df_kurt], axis=1 ) # added p2p feature
df_all = cf_std_window(df_all, window=4, label='target')
df_all = cf_diff(df_all, label='target')
df_tst_sum = generate_class_label_presence(df_all, state_variable='target')
# remove index column
# df_tst_sum = df_tst_sum[df_tst_sum.columns.values[~df_tst_sum.columns.str.contains('index')].tolist()]
print('Columns in Dataset:',t)
print(df_tst_sum.columns)
tst_ds.append(df_tst_sum.copy())
print(hold_out_val(tst_ds, target='target', include_self=False, cl='dt', verbose=False, random_state=1))
# Evaluating different supervised learning methods provided in eval.py
# added a NN evaluator but there are some problems regarding usage and hidden layers
# For the moment only kurtosis and cf_diff are added to the dataset as well as the distortion filter
# Feature selection is needed right now!
for elem in ['rf', 'dt', 'nb' ,'nn','knn']:
print(hold_out_val(tst_ds, target='target', include_self=False, cl=elem, verbose=False, random_state=1))
%run -i './online.py'
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Aufgabe 2
Step2: Aufgabe 3
Step3: Groundtruth-Label anpassen
Step4: Aufgabe 4
Step5: Aufgabe 5
Step6: Aufgabe 6
|
732 | <ASSISTANT_TASK:>
Python Code:
# Author: Joan Massich <mailsik@gmail.com>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import read_proj
from mne.io import read_raw_fif
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg-proj.fif'
raw = read_raw_fif(fname)
empty_room_proj = raw.info['projs']
# Display the projections stored in `info['projs']` from the raw object
raw.plot_projs_topomap()
fig, axes = plt.subplots(1, len(empty_room_proj))
for proj, ax in zip(empty_room_proj, axes):
proj.plot_topomap(axes=ax)
assert isinstance(empty_room_proj, list)
mne.viz.plot_projs_topomap(empty_room_proj)
# read the projections
ecg_projs = read_proj(ecg_fname)
# add them to raw and plot everything
raw.add_proj(ecg_projs)
raw.plot_projs_topomap()
fig, axes = plt.subplots(1, len(ecg_projs))
for proj, ax in zip(ecg_projs, axes):
if proj['desc'].startswith('ECG-eeg'):
proj.plot_topomap(axes=ax, info=raw.info)
else:
proj.plot_topomap(axes=ax)
possible_layouts = [mne.find_layout(raw.info, ch_type=ch_type)
for ch_type in ('grad', 'mag', 'eeg')]
mne.viz.plot_projs_topomap(ecg_projs, layout=possible_layouts)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the FIF file and display the projections present in the file. Here the
Step2: Display the projections one by one
Step3: Use the function in mne.viz to display a list of projections
Step4: As shown in the tutorial on how to
Step5: Displaying the projections from a raw object requires no extra information
Step6: The correct layout or a list of layouts from where to choose can also be
|
733 | <ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
iris = load_iris()
print(iris.data.shape)
measurements = [
{'city': 'Dubai', 'temperature': 33.},
{'city': 'London', 'temperature': 12.},
{'city': 'San Francisco', 'temperature': 18.},
]
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer()
vec
vec.fit_transform(measurements).toarray()
vec.get_feature_names()
import os
import pandas as pd
titanic = pd.read_csv(os.path.join('datasets', 'titanic3.csv'))
print(titanic.columns)
titanic.head()
labels = titanic.survived.values
features = titanic[['pclass', 'sex', 'age', 'sibsp', 'parch', 'fare', 'embarked']]
features.head()
pd.get_dummies(features).head()
features_dummies = pd.get_dummies(features, columns=['pclass', 'sex', 'embarked'])
features_dummies.head(n=16)
data = features_dummies.values
import numpy as np
np.isnan(data).any()
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
train_data, test_data, train_labels, test_labels = train_test_split(
data, labels, random_state=0)
imp = Imputer()
imp.fit(train_data)
train_data_finite = imp.transform(train_data)
test_data_finite = imp.transform(test_data)
np.isnan(train_data_finite).any()
from sklearn.dummy import DummyClassifier
clf = DummyClassifier('most_frequent')
clf.fit(train_data_finite, train_labels)
print("Prediction accuracy: %f"
% clf.score(test_data_finite, test_labels))
# %load solutions/10_titanic.py
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: These features are
Step2: Derived Features
Step3: Here is a broad description of the keys and what they mean
Step4: We clearly want to discard the "boat" and "body" columns for any classification into survived vs not survived as they already contain this information. The name is unique to each person (probably) and also non-informative. For a first try, we will use "pclass", "sibsp", "parch", "fare" and "embarked" as our features
Step5: The data now contains only useful features, but they are not in a format that the machine learning algorithms can understand. We need to transform the strings "male" and "female" into binary variables that indicate the gender, and similarly for "embarked".
Step6: This transformation successfully encoded the string columns. However, one might argue that the class is also a categorical variable. We can explicitly list the columns to encode using the columns parameter, and include pclass
Step7: With all of the hard data loading work out of the way, evaluating a classifier on this data becomes straightforward. Setting up the simplest possible model, we want to see what the simplest score can be with DummyClassifier.
Step8: <div class="alert alert-success">
|
734 | <ASSISTANT_TASK:>
Python Code:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Install TF-Hub.
!pip install seaborn
from absl import logging
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns
# Load all files from a directory in a DataFrame.
def load_directory_data(directory):
data = {}
data["sentence"] = []
data["sentiment"] = []
for file_path in os.listdir(directory):
with tf.io.gfile.GFile(os.path.join(directory, file_path), "r") as f:
data["sentence"].append(f.read())
data["sentiment"].append(re.match("\d+_(\d+)\.txt", file_path).group(1))
return pd.DataFrame.from_dict(data)
# Merge positive and negative examples, add a polarity column and shuffle.
def load_dataset(directory):
pos_df = load_directory_data(os.path.join(directory, "pos"))
neg_df = load_directory_data(os.path.join(directory, "neg"))
pos_df["polarity"] = 1
neg_df["polarity"] = 0
return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)
# Download and process the dataset files.
def download_and_load_datasets(force_download=False):
dataset = tf.keras.utils.get_file(
fname="aclImdb.tar.gz",
origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
extract=True)
train_df = load_dataset(os.path.join(os.path.dirname(dataset),
"aclImdb", "train"))
test_df = load_dataset(os.path.join(os.path.dirname(dataset),
"aclImdb", "test"))
return train_df, test_df
# Reduce logging output.
logging.set_verbosity(logging.ERROR)
train_df, test_df = download_and_load_datasets()
train_df.head()
# Training input on the whole training set with no limit on training epochs.
train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
train_df, train_df["polarity"], num_epochs=None, shuffle=True)
# Prediction on the whole training set.
predict_train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
train_df, train_df["polarity"], shuffle=False)
# Prediction on the test set.
predict_test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
test_df, test_df["polarity"], shuffle=False)
embedded_text_feature_column = hub.text_embedding_column(
key="sentence",
module_spec="https://tfhub.dev/google/nnlm-en-dim128/1")
estimator = tf.estimator.DNNClassifier(
hidden_units=[500, 100],
feature_columns=[embedded_text_feature_column],
n_classes=2,
optimizer=tf.keras.optimizers.Adagrad(lr=0.003))
# Training for 5,000 steps means 640,000 training examples with the default
# batch size. This is roughly equivalent to 25 epochs since the training dataset
# contains 25,000 examples.
estimator.train(input_fn=train_input_fn, steps=5000);
train_eval_result = estimator.evaluate(input_fn=predict_train_input_fn)
test_eval_result = estimator.evaluate(input_fn=predict_test_input_fn)
print("Training set accuracy: {accuracy}".format(**train_eval_result))
print("Test set accuracy: {accuracy}".format(**test_eval_result))
def get_predictions(estimator, input_fn):
return [x["class_ids"][0] for x in estimator.predict(input_fn=input_fn)]
LABELS = [
"negative", "positive"
]
# Create a confusion matrix on training data.
cm = tf.math.confusion_matrix(train_df["polarity"],
get_predictions(estimator, predict_train_input_fn))
# Normalize the confusion matrix so that each row sums to 1.
cm = tf.cast(cm, dtype=tf.float32)
cm = cm / tf.math.reduce_sum(cm, axis=1)[:, np.newaxis]
sns.heatmap(cm, annot=True, xticklabels=LABELS, yticklabels=LABELS);
plt.xlabel("Predicted");
plt.ylabel("True");
def train_and_evaluate_with_module(hub_module, train_module=False):
embedded_text_feature_column = hub.text_embedding_column(
key="sentence", module_spec=hub_module, trainable=train_module)
estimator = tf.estimator.DNNClassifier(
hidden_units=[500, 100],
feature_columns=[embedded_text_feature_column],
n_classes=2,
optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.003))
estimator.train(input_fn=train_input_fn, steps=1000)
train_eval_result = estimator.evaluate(input_fn=predict_train_input_fn)
test_eval_result = estimator.evaluate(input_fn=predict_test_input_fn)
training_set_accuracy = train_eval_result["accuracy"]
test_set_accuracy = test_eval_result["accuracy"]
return {
"Training accuracy": training_set_accuracy,
"Test accuracy": test_set_accuracy
}
results = {}
results["nnlm-en-dim128"] = train_and_evaluate_with_module(
"https://tfhub.dev/google/nnlm-en-dim128/1")
results["nnlm-en-dim128-with-module-training"] = train_and_evaluate_with_module(
"https://tfhub.dev/google/nnlm-en-dim128/1", True)
results["random-nnlm-en-dim128"] = train_and_evaluate_with_module(
"https://tfhub.dev/google/random-nnlm-en-dim128/1")
results["random-nnlm-en-dim128-with-module-training"] = train_and_evaluate_with_module(
"https://tfhub.dev/google/random-nnlm-en-dim128/1", True)
pd.DataFrame.from_dict(results, orient="index")
estimator.evaluate(input_fn=predict_test_input_fn)["accuracy_baseline"]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How to build a simple text classifier with TF-Hub
Step2: More detailed information about installing Tensorflow can be found at https
Step3: Getting started
Step4: Model
Step5: Feature columns
Step6: Estimator
Step7: Training
Step8: Prediction
Step9: Confusion matrix
Step10: Further improvements
Step11: Let's look at the results.
Step12: We can already see some patterns, but first we should establish the baseline accuracy of the test set - the lower bound that can be achieved by outputting only the label of the most represented class
|
735 | <ASSISTANT_TASK:>
Python Code:
import py_entitymatching as em
import os
import pandas as pd
# specify filepaths for tables A and B.
path_A = 'newTableA.csv'
path_B = 'tableB.csv'
# read table A; table A has 'ID' as the key attribute
A = em.read_csv_metadata(path_A, key='id')
# read table B; table B has 'ID' as the key attribute
B = em.read_csv_metadata(path_B, key='id')
# Impute missing values
# Manually set metadata properties, as current py_entitymatching.impute_table()
# requires 'fk_ltable', 'fk_rtable', 'ltable', 'rtable' properties
em.set_property(A, 'fk_ltable', 'id')
em.set_property(A, 'fk_rtable', 'id')
em.set_property(A, 'ltable', A)
em.set_property(A, 'rtable', A)
A_all_attrs = list(A.columns.values)
A_impute_attrs = ['year','min_num_players','max_num_players','min_gameplay_time','max_gameplay_time','min_age']
A_exclude_attrs = list(set(A_all_attrs) - set(A_impute_attrs))
A1 = em.impute_table(A, exclude_attrs=A_exclude_attrs, missing_val='NaN', strategy='most_frequent', axis=0, val_all_nans=0, verbose=True)
# Compare number of missing values to check the results
print(sum(A['min_num_players'].isnull()))
print(sum(A1['min_num_players'].isnull()))
# Do the same thing for B
em.set_property(B, 'fk_ltable', 'id')
em.set_property(B, 'fk_rtable', 'id')
em.set_property(B, 'ltable', B)
em.set_property(B, 'rtable', B)
B_all_attrs = list(B.columns.values)
# TODO: add 'min_age'
B_impute_attrs = ['year','min_num_players','max_num_players','min_gameplay_time','max_gameplay_time']
B_exclude_attrs = list(set(B_all_attrs) - set(B_impute_attrs))
B1 = em.impute_table(B, exclude_attrs=B_exclude_attrs, missing_val='NaN', strategy='most_frequent', axis=0, val_all_nans=0, verbose=True)
# Compare number of missing values to check the results
print(sum(B['min_num_players'].isnull()))
print(sum(B1['min_num_players'].isnull()))
# Load the pre-labeled data
S = em.read_csv_metadata('sample_labeled.csv',
key='_id',
ltable=A1, rtable=B1,
fk_ltable='ltable_id', fk_rtable='rtable_id')
# Split S into I an J
IJ = em.split_train_test(S, train_proportion=0.75, random_state=35)
I = IJ['train']
J = IJ['test']
corres = em.get_attr_corres(A1, B1)
print(corres)
# Generate a set of features
#import pdb; pdb.set_trace();
import py_entitymatching.feature.attributeutils as au
import py_entitymatching.feature.simfunctions as sim
import py_entitymatching.feature.tokenizers as tok
ltable = A1
rtable = B1
# Get similarity functions for generating the features for matching
sim_funcs = sim.get_sim_funs_for_matching()
# Get tokenizer functions for generating the features for matching
tok_funcs = tok.get_tokenizers_for_matching()
# Get the attribute types of the input tables
attr_types_ltable = au.get_attr_types(ltable)
attr_types_rtable = au.get_attr_types(rtable)
# Get the attribute correspondence between the input tables
attr_corres = au.get_attr_corres(ltable, rtable)
print(attr_types_ltable['name'])
print(attr_types_rtable['name'])
attr_types_ltable['name'] = 'str_bt_5w_10w'
attr_types_rtable['name'] = 'str_bt_5w_10w'
# Get the features
F = em.get_features(ltable, rtable, attr_types_ltable,
attr_types_rtable, attr_corres,
tok_funcs, sim_funcs)
#F = em.get_features_for_matching(A1, B1)
print(F['feature_name'])
#TODO get name feature!
#http://pradap-www.cs.wisc.edu/cs638/py_entitymatching/user-manual/_modules/py_entitymatching/feature/simfunctions.html#get_sim_funs_for_matching
#name_feature = em.get_feature_fn('name', em.get_tokenizers_for_matching(), em.get_sim_funs_for_matching())
#print(name_feature)
#em.add_feature(F, 'name_dist', name_feature)
#print(F['feature_name'])
def cross_validation_eval(H):
cv_iter = pd.DataFrame(columns=['Precision', 'Recall', 'F1'])
# Matchers
matchers = [em.DTMatcher(name='DecisionTree', random_state=0),
em.RFMatcher(name='RandomForest', random_state=0),
em.SVMMatcher(name='SVM', random_state=0),
em.NBMatcher(name='NaiveBayes'),
em.LogRegMatcher(name='LogReg', random_state=0),
]
for m in matchers:
prec_result = em.select_matcher([m], table=H,
exclude_attrs=['_id', 'ltable_id', 'rtable_id','label'],
k=5,
target_attr='label', metric='precision', random_state=0)
recall_result = em.select_matcher([m], table=H,
exclude_attrs=['_id', 'ltable_id', 'rtable_id','label'],
k=5,
target_attr='label', metric='recall', random_state=0)
f1_result = em.select_matcher([m], table=H,
exclude_attrs=['_id', 'ltable_id', 'rtable_id','label'],
k=5,
target_attr='label', metric='f1', random_state=0)
cv_iter = cv_iter.append(
pd.DataFrame([
[prec_result['cv_stats']['Mean score'][0],
recall_result['cv_stats']['Mean score'][0],
f1_result['cv_stats']['Mean score'][0],
]],
index=[m.name],
columns=['Precision', 'Recall', 'F1']))
return cv_iter
# Subset of features we used on our first iteration
include_features = [
'min_num_players_min_num_players_lev_dist',
'max_num_players_max_num_players_lev_dist',
'min_gameplay_time_min_gameplay_time_lev_dist',
'max_gameplay_time_max_gameplay_time_lev_dist',
]
F_1 = F.loc[F['feature_name'].isin(include_features)]
# Convert the I into a set of feature vectors using F
H_1 = em.extract_feature_vecs(I, feature_table=F_1, attrs_after='label', show_progress=False)
H_1.head(10)
cross_validation_eval(H_1)
PQ = em.split_train_test(H_1, train_proportion=0.80, random_state=0)
P = PQ['train']
Q = PQ['test']
# Convert the I into a set of feature vectors using F
# Here, we add name edit distance as a feature
include_features_2 = [
'min_num_players_min_num_players_lev_dist',
'max_num_players_max_num_players_lev_dist',
'min_gameplay_time_min_gameplay_time_lev_dist',
'max_gameplay_time_max_gameplay_time_lev_dist',
'name_name_lev_dist'
]
F_2 = F.loc[F['feature_name'].isin(include_features_2)]
H_2 = em.extract_feature_vecs(I, feature_table=F_2, attrs_after='label', show_progress=False)
H_2.head(10)
# Split H into P and Q
PQ = em.split_train_test(H_2, train_proportion=0.75, random_state=0)
P = PQ['train']
Q = PQ['test']
# Convert the I into a set of feature vectors using F
# Here, we add name edit distance as a feature
include_features_3 = [
'min_num_players_min_num_players_lev_dist',
'max_num_players_max_num_players_lev_dist',
'min_gameplay_time_min_gameplay_time_lev_dist',
'max_gameplay_time_max_gameplay_time_lev_dist',
'name_name_lev_dist'
]
F_3 = F.loc[F['feature_name'].isin(include_features_3)]
H_3 = em.extract_feature_vecs(I, feature_table=F_3, attrs_after='label', show_progress=False)
cross_validation_eval(H_3)
# Convert the I into a set of feature vectors using F
# Here, we add name edit distance as a feature
include_features_4 = [
'min_num_players_min_num_players_lev_dist',
'max_num_players_max_num_players_lev_dist',
'min_gameplay_time_min_gameplay_time_lev_dist',
'max_gameplay_time_max_gameplay_time_lev_dist',
'name_name_jac_qgm_3_qgm_3'
]
F_4 = F.loc[F['feature_name'].isin(include_features_4)]
H_4 = em.extract_feature_vecs(I, feature_table=F_4, attrs_after='label', show_progress=False)
cross_validation_eval(H_4)
# Apply train, test set evaluation
I_table = em.extract_feature_vecs(I, feature_table=F_2, attrs_after='label', show_progress=False)
J_table = em.extract_feature_vecs(J, feature_table=F_2, attrs_after='label', show_progress=False)
matchers = [
#em.DTMatcher(name='DecisionTree', random_state=0),
#em.RFMatcher(name='RF', random_state=0),
#em.NBMatcher(name='NaiveBayes'),
em.LogRegMatcher(name='LogReg', random_state=0),
#em.SVMMatcher(name='SVM', random_state=0)
]
for m in matchers:
m.fit(table=I_table, exclude_attrs=['_id', 'ltable_id', 'rtable_id','label'], target_attr='label')
J_table['prediction'] = m.predict(
table=J_table,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
target_attr='label',
)
print(m.name)
em.print_eval_summary(em.eval_matches(J_table, 'label', 'prediction'))
J_table.drop('prediction', axis=1, inplace=True)
print('')
log_matcher = matchers[0]
J_table['prediction'] = m.predict(
table=J_table,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
target_attr='label',
)
print(m.name)
em.print_eval_summary(em.eval_matches(J_table, 'label', 'prediction'))
J_table.drop('prediction', axis=1, inplace=True)
print('')
candidate_set_C1.csv
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Filling in Missing Values
Step2: Generating Features
Step3: Cross Validation Method
Step4: Iteration 1
Step5: Iteration 2
Step6: Iteration 3
Step7: Iteration 4
Step8: Train-Test Set Accuracy
|
736 | <ASSISTANT_TASK:>
Python Code:
from sklearn.svm import SVC
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
# load and split the data
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, random_state=0)
# compute minimum and maximum on the training data
scaler = MinMaxScaler().fit(X_train)
# rescale training data
X_train_scaled = scaler.transform(X_train)
svm = SVC()
# learn an SVM on the scaled training data
svm.fit(X_train_scaled, y_train)
# scale test data and score the scaled data
X_test_scaled = scaler.transform(X_test)
svm.score(X_test_scaled, y_test)
from sklearn.model_selection import GridSearchCV
# illustration purposes only, don't use this code
param_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100],
'gamma': [0.001, 0.01, 0.1, 1, 10, 100]}
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=5)
grid.fit(X_train_scaled, y_train)
print("best cross-validation accuracy:", grid.best_score_)
print("test set score: ", grid.score(X_test_scaled, y_test))
print("best parameters: ", grid.best_params_)
mglearn.plots.plot_improper_processing()
from sklearn.pipeline import Pipeline
pipe = Pipeline([("scaler", MinMaxScaler()), ("svm", SVC())])
pipe.fit(X_train, y_train)
pipe.score(X_test, y_test)
param_grid = {'svm__C': [0.001, 0.01, 0.1, 1, 10, 100],
'svm__gamma': [0.001, 0.01, 0.1, 1, 10, 100]}
grid = GridSearchCV(pipe, param_grid=param_grid, cv=5)
grid.fit(X_train, y_train)
print("best cross-validation accuracy:", grid.best_score_)
print("test set score: ", grid.score(X_test, y_test))
print("best parameters: ", grid.best_params_)
mglearn.plots.plot_proper_processing()
rnd = np.random.RandomState(seed=0)
X = rnd.normal(size=(100, 10000))
y = rnd.normal(size=(100,))
from sklearn.feature_selection import SelectPercentile, f_regression
select = SelectPercentile(score_func=f_regression, percentile=5).fit(X, y)
X_selected = select.transform(X)
print(X_selected.shape)
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import Ridge
np.mean(cross_val_score(Ridge(), X_selected, y, cv=5))
pipe = Pipeline([("select", SelectPercentile(score_func=f_regression, percentile=5)), ("ridge", Ridge())])
np.mean(cross_val_score(pipe, X, y, cv=5))
def fit(self, X, y):
X_transformed = X
for step in self.steps[:-1]:
# iterate over all but the final step
# fit and transform the data
X_transformed = step[1].fit_transform(X_transformed, y)
# fit the last step
self.steps[-1][1].fit(X_transformed, y)
return self
def predict(self, X):
X_transformed = X
for step in self.steps[:-1]:
# iterate over all but the final step
# transform the data
X_transformed = step[1].transform(X_transformed)
# fit the last step
return self.steps[-1][1].predict(X_transformed)
![pipeline_illustration](figures/pipeline.svg)
from sklearn.pipeline import make_pipeline
# standard syntax
pipe_long = Pipeline([("scaler", MinMaxScaler()), ("svm", SVC(C=100))])
# abbreviated syntax
pipe_short = make_pipeline(MinMaxScaler(), SVC(C=100))
pipe_short.steps
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
pipe = make_pipeline(StandardScaler(), PCA(n_components=2), StandardScaler())
pipe.steps
# fit the pipeline defined above to the cancer dataset
pipe.fit(cancer.data)
# extract the first two principal components from the "pca" step
components = pipe.named_steps["pca"].components_
print(components.shape)
from sklearn.linear_model import LogisticRegression
pipe = make_pipeline(StandardScaler(), LogisticRegression())
param_grid = {'logisticregression__C': [0.01, 0.1, 1, 10, 100]}
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, random_state=4)
grid = GridSearchCV(pipe, param_grid, cv=5)
grid.fit(X_train, y_train)
print(grid.best_estimator_)
print(grid.best_estimator_.named_steps["logisticregression"])
print(grid.best_estimator_.named_steps["logisticregression"].coef_)
from sklearn.datasets import load_boston
boston = load_boston()
X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=0)
from sklearn.preprocessing import PolynomialFeatures
pipe = make_pipeline(
StandardScaler(),
PolynomialFeatures(),
Ridge())
param_grid = {'polynomialfeatures__degree': [1, 2, 3],
'ridge__alpha': [0.001, 0.01, 0.1, 1, 10, 100]}
grid = GridSearchCV(pipe, param_grid=param_grid, cv=5, n_jobs=-1)
grid.fit(X_train, y_train)
plt.matshow(np.array([s.mean_validation_score for s in grid.grid_scores_]).reshape(3, -1),
vmin=0, cmap="viridis")
plt.xlabel("ridge__alpha")
plt.ylabel("polynomialfeatures__degree")
plt.xticks(range(len(param_grid['ridge__alpha'])), param_grid['ridge__alpha'])
plt.yticks(range(len(param_grid['polynomialfeatures__degree'])), param_grid['polynomialfeatures__degree'])
plt.colorbar()
print(grid.best_params_)
grid.score(X_test, y_test)
param_grid = {'ridge__alpha': [0.001, 0.01, 0.1, 1, 10, 100]}
pipe = make_pipeline(StandardScaler(), Ridge())
grid = GridSearchCV(pipe, param_grid, cv=5)
grid.fit(X_train, y_train)
grid.score(X_test, y_test)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameter Selection with Preprocessing
Step2: Building Pipelines
Step3: Using Pipelines in Grid-searches
Step4: The General Pipeline Interface
Step5: Convenient Pipeline creation with make_pipeline
Step6: Accessing step attributes
Step7: Accessing attributes in grid-searched pipeline.
Step8: Grid-searching preprocessing steps and model parameters
|
737 | <ASSISTANT_TASK:>
Python Code:
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from sklearn.cross_validation import ShuffleSplit
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the Boston housing dataset
data = pd.read_csv('housing.csv')
prices = data['MEDV']
features = data.drop('MEDV', axis = 1)
# Success
print "Boston housing dataset has {} data points with {} variables each.".format(*data.shape)
# TODO: Minimum price of the data
minimum_price = np.min(prices)
# TODO: Maximum price of the data
maximum_price = np.max(prices)
# TODO: Mean price of the data
mean_price = np.mean(prices)
# TODO: Median price of the data
median_price = np.median(prices)
# TODO: Standard deviation of prices of the data
std_price = np.std(prices)
# Show the calculated statistics
print "Statistics for Boston housing dataset:\n"
print "Minimum price: ${:,.2f}".format(minimum_price)
print "Maximum price: ${:,.2f}".format(maximum_price)
print "Mean price: ${:,.2f}".format(mean_price)
print "Median price ${:,.2f}".format(median_price)
print "Standard deviation of prices: ${:,.2f}".format(std_price)
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
pt_ratio = data["RM"].reshape(-1,1)
reg.fit(pt_ratio, prices)
# Create the figure window
plt.plot(pt_ratio, reg.predict(pt_ratio), color='red', lw=1)
plt.scatter(pt_ratio, prices, alpha=0.5, c=prices)
plt.xlabel('RM')
plt.ylabel('Prices')
plt.show()
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
pt_ratio = data["LSTAT"].reshape(-1,1)
reg.fit(pt_ratio, prices)
# Create the figure window
plt.plot(pt_ratio, reg.predict(pt_ratio), color='red', lw=1)
plt.scatter(pt_ratio, prices, alpha=0.5, c=prices)
plt.xlabel('LSTAT')
plt.ylabel('Prices')
plt.show()
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
pt_ratio = data["PTRATIO"].reshape(-1,1)
reg.fit(pt_ratio, prices)
# Create the figure window
plt.plot(pt_ratio, reg.predict(pt_ratio), color='red', lw=1)
plt.scatter(pt_ratio, prices, alpha=0.5, c=prices)
plt.xlabel('PTRATIO')
plt.ylabel('Prices')
plt.show()
# TODO: Import 'r2_score'
from sklearn.metrics import r2_score
def performance_metric(y_true, y_predict):
Calculates and returns the performance score between
true and predicted values based on the metric chosen.
# TODO: Calculate the performance score between 'y_true' and 'y_predict'
score = r2_score(y_true, y_predict)
# Return the score
return score
# Calculate the performance of this model
score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3])
print "Model has a coefficient of determination, R^2, of {:.3f}.".format(score)
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
true, pred = [3.0, -0.5, 2.0, 7.0, 4.2],[2.5, 0.0, 2.1, 7.8, 5.3]
#plot true values
true_handle = plt.scatter(true, true, alpha=0.6, color='blue', label = 'True' )
#reference line
fit = np.poly1d(np.polyfit(true, true, 1))
lims = np.linspace(min(true)-1, max(true)+1)
plt.plot(lims, fit(lims), alpha = 0.3, color = "black")
#plot predicted values
pred_handle = plt.scatter(true, pred, alpha=0.6, color='red', label = 'Pred')
#legend & show
plt.legend(handles=[true_handle, pred_handle], loc="upper left")
plt.show()
# TODO: Import 'train_test_split'
from sklearn.cross_validation import train_test_split
# TODO: Shuffle and split the data into training and testing subsets
X_train, X_test, y_train, y_test = train_test_split(features, prices, test_size=0.2, random_state=0)
# Success
print "Training and testing split was successful."
# Produce learning curves for varying training set sizes and maximum depths
vs.ModelLearning(features, prices)
vs.ModelComplexity(X_train, y_train)
# TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV'
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import make_scorer
from sklearn.grid_search import GridSearchCV
def fit_model(X, y):
Performs grid search over the 'max_depth' parameter for a
decision tree regressor trained on the input data [X, y].
# Create cross-validation sets from the training data
cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)
# TODO: Create a decision tree regressor object
regressor = DecisionTreeRegressor()
# TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10
params = {'max_depth': range(1,11)}
# TODO: Transform 'performance_metric' into a scoring function using 'make_scorer'
scoring_fnc = make_scorer(performance_metric)
# TODO: Create the grid search object
grid = GridSearchCV(regressor, params, scoring = scoring_fnc, cv = cv_sets)
# Fit the grid search object to the data to compute the optimal model
grid = grid.fit(X, y)
# Return the optimal model after fitting the data
return grid.best_estimator_
# Fit the training data to the model using grid search
reg = fit_model(X_train, y_train)
# Produce the value for 'max_depth'
print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth'])
# Produce a matrix for client data
client_data = [[5, 17, 15], # Client 1
[4, 32, 22], # Client 2
[8, 3, 12]] # Client 3
# Show predictions
for i, price in enumerate(reg.predict(client_data)):
print "Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price)
from matplotlib import pyplot as plt
clients = np.transpose(client_data)
pred = reg.predict(client_data)
for i, feat in enumerate(['RM', 'LSTAT', 'PTRATIO']):
plt.scatter(features[feat], prices, alpha=0.25, c=prices)
plt.scatter(clients[i], pred, color='black', marker='x', linewidths=2)
plt.xlabel(feat)
plt.ylabel('MEDV')
plt.show()
vs.PredictTrials(features, prices, fit_model, client_data)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Exploration
Step2: Question 1 - Feature Observation
Step3: LSTAT
Step4: PTRATIO
Step6: Developing a Model
Step7: Question 2 - Goodness of Fit
Step8: Answer
Step9: Implementation
Step10: Question 3 - Training and Testing
Step11: Question 4 - Learning the Data
Step13: Question 5 - Bias-Variance Tradeoff
Step14: Making Predictions
Step15: Answer
Step16: Answer
Step17: Sensitivity
|
738 | <ASSISTANT_TASK:>
Python Code:
from thermostate import State, Q_, units, SystemInternational as SI
from thermostate.plotting import IdealGas, VaporDome
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
substance = 'water'
T_1 = Q_(560.0, 'degC')
p_1 = Q_(16.0, 'MPa')
mdot_1 = Q_(120.0, 'kg/s')
p_2 = Q_(1.0, 'MPa')
p_3 = Q_(8.0, 'kPa')
x_4 = Q_(0.0, 'percent')
x_6 = Q_(0.0, 'percent')
p_low = Q_(0.1, 'MPa')
p_high = Q_(7.5, 'MPa')
# State 1
st_1 = State(substance, T=T_1, p=p_1)
h_1 = st_1.h.to(SI.h)
s_1 = st_1.s.to(SI.s)
# State 2
s_2 = s_1
st_2 = State(substance, p=p_2, s=s_2)
h_2 = st_2.h.to(SI.h)
T_2 = st_2.T.to(SI.T)
x_2 = st_2.x
# State 3
s_3 = s_2
st_3 = State(substance, p=p_3, s=s_3)
h_3 = st_3.h.to(SI.h)
T_3 = st_3.T.to(SI.T)
x_3 = st_3.x
# State 4
p_4 = p_3
st_4 = State(substance, p=p_4, x=x_4)
h_4 = st_4.h.to(SI.h)
s_4 = st_4.s.to(SI.s)
T_4 = st_4.T.to(SI.T)
# State 5
p_5 = p_2
s_5 = s_4
st_5 = State(substance, p=p_5, s=s_5)
h_5 = st_5.h.to(SI.h)
T_5 = st_5.T.to(SI.T)
# State 6
p_6 = p_2
st_6 = State(substance, p=p_6, x=x_6)
h_6 = st_6.h.to(SI.h)
s_6 = st_6.s.to(SI.s)
T_6 = st_6.T.to(SI.T)
# State 7
p_7 = p_1
s_7 = s_6
st_7 = State(substance, p=p_7, s=s_7)
h_7 = st_7.h.to(SI.h)
T_7 = st_7.T.to(SI.T)
y = (h_6 - h_5)/(h_2 - h_5)
Rankine = VaporDome(substance, ('s', 'T'))
Rankine.add_process(st_1, st_2, 'isentropic')
Rankine.add_process(st_2, st_3, 'isentropic')
Rankine.add_process(st_3, st_4, 'isobaric')
Rankine.add_process(st_4, st_5, 'isentropic')
Rankine.add_process(st_5, st_6, 'isobaric')
Rankine.add_process(st_6, st_7, 'isentropic')
Rankine.add_process(st_7, st_1, 'isobaric')
Wdot_net = (mdot_1*(h_1 - h_2 + (1 - y)*(h_2 - h_3) + (1 - y)*(h_4 - h_5) + (h_6 - h_7))).to('MW')
Qdot_in = (mdot_1*(h_1 - h_7)).to('MW')
eta = Wdot_net/Qdot_in
p_range = np.linspace(p_low, p_high, 100)
y_values = np.zeros(shape=p_range.shape) * units.dimensionless
eta_values = np.zeros(shape=p_range.shape) * units.dimensionless
for i, p_2 in enumerate(p_range):
# State 2
s_2 = s_1
st_2 = State(substance, p=p_2, s=s_2)
h_2 = st_2.h
# State 5
p_5 = p_2
s_5 = s_4
st_5 = State(substance, p=p_5, s=s_5)
h_5 = st_5.h
# State 6
p_6 = p_2
st_6 = State(substance, p=p_6, x=x_6)
h_6 = st_6.h
s_6 = st_6.s
# State 7
p_7 = p_1
s_7 = s_6
st_7 = State(substance, p=p_7, s=s_7)
h_7 = st_7.h
y = (h_6 - h_5)/(h_2 - h_5)
y_values[i] = y
Wdot_net = (mdot_1*(h_1 - h_2 + (1 - y)*(h_2 - h_3) + (1 - y)*(h_4 - h_5) + (h_6 - h_7))).to('MW')
Qdot_in = (mdot_1*(h_1 - h_7)).to('MW')
eta = Wdot_net/Qdot_in
eta_values[i] = eta
plt.plot(y_values, eta_values, label='eta')
plt.legend(loc='best')
plt.title('$\eta$ vs. $y$')
plt.xlabel('$y$ ($\dot{m}_2/\dot{m}_1$)')
plt.ylabel('$\eta$');
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Definitions
Step2: Problem Statement
Step3: Plotting the T-s diagram of the cycle,
Step4: Summarizing the states
Step5: <div class="alert alert-success">
Step6: <div class="alert alert-success">
Step7: <div class="alert alert-success">
|
739 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import tensorflow_datasets as tfds
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(16, activation='relu', input_shape=(4,)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(3)
])
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer='adam')
model.summary()
def input_fn():
split = tfds.Split.TRAIN
dataset = tfds.load('iris', split=split, as_supervised=True)
dataset = dataset.map(lambda features, labels: ({'dense_input':features}, labels))
dataset = dataset.batch(32).repeat()
return dataset
for features_batch, labels_batch in input_fn().take(1):
print(features_batch)
print(labels_batch)
import tempfile
model_dir = tempfile.mkdtemp()
keras_estimator = tf.keras.estimator.model_to_estimator(
keras_model=model, model_dir=model_dir)
keras_estimator.train(input_fn=input_fn, steps=500)
eval_result = keras_estimator.evaluate(input_fn=input_fn, steps=10)
print('Eval result: {}'.format(eval_result))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 通过 Keras 模型创建 Estimator
Step2: 创建一个简单的 Keras 模型。
Step3: 编译模型并获取摘要。
Step4: 创建输入函数
Step5: 测试您的 input_fn
Step6: 通过 tf.keras 模型创建 Estimator。
Step7: 训练和评估 Estimator。
|
740 | <ASSISTANT_TASK:>
Python Code:
import os
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pyemu
import flopy
org_model_ws = os.path.join('freyberg_mf6')
os.listdir(org_model_ws)
id_arr = np.loadtxt(os.path.join(org_model_ws,"freyberg6.dis_idomain_layer3.txt"))
top_arr = np.loadtxt(os.path.join(org_model_ws,"freyberg6.dis_top.txt"))
top_arr[id_arr==0] = np.nan
plt.imshow(top_arr)
tmp_model_ws = "temp_pst_from"
if os.path.exists(tmp_model_ws):
shutil.rmtree(tmp_model_ws)
shutil.copytree(org_model_ws,tmp_model_ws)
os.listdir(tmp_model_ws)
sim = flopy.mf6.MFSimulation.load(sim_ws=tmp_model_ws)
m = sim.get_model("freyberg6")
sr = pyemu.helpers.SpatialReference.from_namfile(
os.path.join(tmp_model_ws, "freyberg6.nam"),
delr=m.dis.delr.array, delc=m.dis.delc.array)
sr
template_ws = "freyberg6_template"
pf = pyemu.utils.PstFrom(original_d=tmp_model_ws, new_d=template_ws,
remove_existing=True,
longnames=True, spatial_reference=sr,
zero_based=False,start_datetime="1-1-2018")
df = pd.read_csv(os.path.join(tmp_model_ws,"heads.csv"),index_col=0)
df
hds_df = pf.add_observations("heads.csv",insfile="heads.csv.ins",index_cols="time",
use_cols=list(df.columns.values),prefix="hds",)
hds_df
[f for f in os.listdir(template_ws) if f.endswith(".ins")]
df = pd.read_csv(os.path.join(tmp_model_ws, "sfr.csv"), index_col=0)
sfr_df = pf.add_observations("sfr.csv", insfile="sfr.csv.ins", index_cols="time", use_cols=list(df.columns.values))
sfr_df
v = pyemu.geostats.ExpVario(contribution=1.0,a=1000)
grid_gs = pyemu.geostats.GeoStruct(variograms=v, transform='log')
temporal_gs = pyemu.geostats.GeoStruct(variograms=pyemu.geostats.ExpVario(contribution=1.0,a=60))
grid_gs.plot()
print("spatial variogram")
temporal_gs.plot()
"temporal variogram (x axis in days)"
ib = m.dis.idomain[0].array
hk_arr_files = [f for f in os.listdir(tmp_model_ws) if "npf_k_" in f and f.endswith(".txt")]
hk_arr_files
pf.add_parameters(filenames="freyberg6.npf_k_layer1.txt",par_type="grid",
par_name_base="hk_layer_1",pargp="hk_layer_1",zone_array=ib,
upper_bound=10.,lower_bound=0.1,ult_ubound=100,ult_lbound=0.01)
[f for f in os.listdir(template_ws) if f.endswith(".tpl")]
with open(os.path.join(template_ws,"hk_layer_1_inst0_grid.csv.tpl"),'r') as f:
for _ in range(2):
print(f.readline().strip())
pf.add_parameters(filenames="freyberg6.npf_k_layer3.txt",par_type="pilotpoints",
par_name_base="hk_layer_1",pargp="hk_layer_1",zone_array=ib,
upper_bound=10.,lower_bound=0.1,ult_ubound=100,ult_lbound=0.01,
pp_space=5)
xmn = m.modelgrid.xvertices.min()
xmx = m.modelgrid.xvertices.max()
ymn = m.modelgrid.yvertices.min()
ymx = m.modelgrid.yvertices.max()
numpp = 20
xvals = np.random.uniform(xmn,xmx,numpp)
yvals = np.random.uniform(ymn, ymx, numpp)
pp_locs = pd.DataFrame({"x":xvals,"y":yvals})
pp_locs.loc[:,"zone"] = 1
pp_locs.loc[:,"name"] = ["pp_{0}".format(i) for i in range(numpp)]
pp_locs.loc[:,"parval1"] = 1.0
pyemu.pp_utils.write_pp_shapfile(pp_locs,os.path.join(template_ws,"pp_locs.shp"))
pf.add_parameters(filenames="freyberg6.npf_k_layer2.txt",par_type="pilotpoints",
par_name_base="hk_layer_1",pargp="hk_layer_1",zone_array=ib,
upper_bound=10.,lower_bound=0.1,ult_ubound=100,ult_lbound=0.01,
pp_space="pp_locs.shp")
_ = [print(line.rstrip()) for line in open("helpers.py",'r').readlines()]
assert os.path.exists("special_outputs.dat.ins")
special_ins_filename = os.path.join(template_ws,"special_outputs.dat.ins")
shutil.copy2("special_outputs.dat.ins",special_ins_filename)
pf.add_py_function("helpers.py","process_model_outputs()",is_pre_cmd=False)
out_file = special_ins_filename.replace(".ins","")
pf.add_observations_from_ins(ins_file=special_ins_filename,out_file=out_file,pst_path=".")
pst = pf.build_pst()
[f for f in os.listdir(template_ws) if f.endswith(".py")]
_ = [print(line.rstrip()) for line in open(os.path.join(template_ws,"forward_run.py"))]
# only execute this block once!
pf.mod_sys_cmds.append("mf6")
pst = pf.build_pst()
_ = [print(line.rstrip()) for line in open(os.path.join(template_ws,"forward_run.py"))]
pf.add_parameters(filenames="freyberg6.npf_k_layer3.txt",par_type="grid",
par_name_base="hk_layer_3",pargp="hk_layer_3",zone_array=ib,
upper_bound=10.,lower_bound=0.1,ult_ubound=100,ult_lbound=0.01,
geostruct=grid_gs)
pst = pf.build_pst()
cov = pf.build_prior()
x = cov.x.copy()
x[x<0.00001] = np.NaN
plt.imshow(x)
wel_files = [f for f in os.listdir(tmp_model_ws) if "wel_stress_period" in f and f.endswith(".txt")]
wel_files
pd.read_csv(os.path.join(tmp_model_ws,wel_files[0]),header=None)
# build up a container of stress period start datetimes - this will
# be used to specify the datetime of each multipler parameter
dts = pd.to_datetime(pf.start_datetime) + pd.to_timedelta(np.cumsum(sim.tdis.perioddata.array["perlen"]),unit='d')
for wel_file in wel_files:
# get the stress period number from the file name
kper = int(wel_file.split('.')[1].split('_')[-1]) - 1
pf.add_parameters(filenames=wel_file,par_type="constant",par_name_base="wel_cn",
pargp="wel_cn", upper_bound = 1.5, lower_bound=0.5,
datetime=dts[kper],geostruct=temporal_gs)
pst = pf.build_pst()
cov = pf.build_prior(fmt="none") # skip saving to a file...
x = cov.x.copy()
x[x==0] = np.NaN
plt.imshow(x)
plt.imshow(x[-25:,-25:])
pf.add_parameters(filenames=wel_files,par_type="grid",par_name_base="wel_gr",
pargp="wel_gr", upper_bound = 1.5, lower_bound=0.5,
geostruct=grid_gs)
pst = pf.build_pst()
cov = pf.build_prior(fmt="none")
x = cov.x.copy()
x[x==0] = np.NaN
plt.imshow(x[-49:,-49:])
tpl_filename = os.path.join(template_ws,"special_pars.dat.tpl")
with open(tpl_filename,'w') as f:
f.write("ptf ~\n")
f.write("special_par1 ~ special_par1 ~\n")
f.write("special_par2 ~ special_par2 ~\n")
pf.pst.add_parameters(tpl_filename,pst_path=".")
par = pf.pst.parameter_data
par.loc[pf.pst.par_names[0],"partrans"] = "tied"
par.loc[pf.pst.par_names[0],"partied"] = pf.pst.par_names[1]
par.loc[pf.pst.par_names[5:10],"parlbnd"]
par.loc[pf.pst.par_names[5:10],"parlbnd"] = 0.25
par.loc[pf.pst.par_names[5:10],"parlbnd"]
pe = pf.draw(num_reals=100,use_specsim=True)
pe.to_csv(os.path.join(template_ws,"prior.csv"))
print(pe.loc[:,pst.adj_par_names[0]])
pe.loc[:,pst.adj_par_names[0]]._df.hist()
# load the mf6 model with flopy to get the spatial reference
sim = flopy.mf6.MFSimulation.load(sim_ws=tmp_model_ws)
m = sim.get_model("freyberg6")
# work out the spatial rediscretization factor
redis_fac = m.dis.nrow.data / 40
# where the pest interface will be constructed
template_ws = tmp_model_ws.split('_')[1] + "_template"
# instantiate PstFrom object
pf = pyemu.utils.PstFrom(original_d=tmp_model_ws, new_d=template_ws,
remove_existing=True,
longnames=True, spatial_reference=m.modelgrid,
zero_based=False,start_datetime="1-1-2018")
# add observations from the sfr observation output file
df = pd.read_csv(os.path.join(tmp_model_ws, "sfr.csv"), index_col=0)
pf.add_observations("sfr.csv", insfile="sfr.csv.ins", index_cols="time",
use_cols=list(df.columns.values),
prefix="sfr")
# add observations for the heads observation output file
df = pd.read_csv(os.path.join(tmp_model_ws, "heads.csv"), index_col=0)
pf.add_observations("heads.csv", insfile="heads.csv.ins",
index_cols="time", use_cols=list(df.columns.values),
prefix="hds")
# the geostruct object for grid-scale parameters
grid_v = pyemu.geostats.ExpVario(contribution=1.0,a=500)
grid_gs = pyemu.geostats.GeoStruct(variograms=grid_v)
# the geostruct object for pilot-point-scale parameters
pp_v = pyemu.geostats.ExpVario(contribution=1.0, a=2000)
pp_gs = pyemu.geostats.GeoStruct(variograms=pp_v)
# the geostruct for recharge grid-scale parameters
rch_v = pyemu.geostats.ExpVario(contribution=1.0, a=1000)
rch_gs = pyemu.geostats.GeoStruct(variograms=rch_v)
# the geostruct for temporal correlation
temporal_v = pyemu.geostats.ExpVario(contribution=1.0,a=60)
temporal_gs = pyemu.geostats.GeoStruct(variograms=temporal_v)
# import flopy as part of the forward run process
pf.extra_py_imports.append('flopy')
# use the idomain array for masking parameter locations
ib = m.dis.idomain[0].array
# define a dict that contains file name tags and lower/upper bound information
tags = {"npf_k_":[0.1,10.],"npf_k33_":[.1,10],"sto_ss":[.1,10],
"sto_sy":[.9,1.1],"rch_recharge":[.5,1.5]}
dts = pd.to_datetime("1-1-2018") + \
pd.to_timedelta(np.cumsum(sim.tdis.perioddata.array["perlen"]),unit="d")
# loop over each tag, bound info pair
for tag,bnd in tags.items():
lb,ub = bnd[0],bnd[1]
# find all array based files that have the tag in the name
arr_files = [f for f in os.listdir(template_ws) if tag in f
and f.endswith(".txt")]
if len(arr_files) == 0:
print("warning: no array files found for ",tag)
continue
# make sure each array file in nrow X ncol dimensions (not wrapped, sigh)
for arr_file in arr_files:
arr = np.loadtxt(os.path.join(template_ws,arr_file)).reshape(ib.shape)
np.savetxt(os.path.join(template_ws,arr_file),arr,fmt="%15.6E")
# if this is the recharge tag
if "rch" in tag:
# add one set of grid-scale parameters for all files
pf.add_parameters(filenames=arr_files, par_type="grid",
par_name_base="rch_gr",pargp="rch_gr",
zone_array=ib, upper_bound=ub,
lower_bound=lb,geostruct=rch_gs)
# add one constant parameter for each array, and
# assign it a datetime so we can work out the
# temporal correlation
for arr_file in arr_files:
kper = int(arr_file.split('.')[1].split('_')[-1]) - 1
pf.add_parameters(filenames=arr_file,par_type="constant",
par_name_base=arr_file.split('.')[1]+"_cn",
pargp="rch_const",zone_array=ib,upper_bound=ub,
lower_bound=lb,geostruct=temporal_gs,
datetime=dts[kper])
# otherwise...
else:
# for each array add both grid-scale and pilot-point scale parameters
for arr_file in arr_files:
pf.add_parameters(filenames=arr_file,par_type="grid",
par_name_base=arr_file.split('.')[1]+"_gr",
pargp=arr_file.split('.')[1]+"_gr",zone_array=ib,
upper_bound=ub,lower_bound=lb,
geostruct=grid_gs)
pf.add_parameters(filenames=arr_file, par_type="pilotpoints",
par_name_base=arr_file.split('.')[1]+"_pp",
pargp=arr_file.split('.')[1]+"_pp",
zone_array=ib,upper_bound=ub,lower_bound=lb,
pp_space=int(5 * redis_fac),geostruct=pp_gs)
# get all the list-type files associated with the wel package
list_files = [f for f in os.listdir(tmp_model_ws) if
"freyberg6.wel_stress_period_data_"
in f and f.endswith(".txt")]
# for each wel-package list-type file
for list_file in list_files:
kper = int(list_file.split(".")[1].split('_')[-1]) - 1
# add spatially constant, but temporally correlated parameter
pf.add_parameters(filenames=list_file,par_type="constant",
par_name_base="twel_mlt_{0}".format(kper),
pargp="twel_mlt".format(kper),index_cols=[0,1,2],
use_cols=[3],upper_bound=1.5,lower_bound=0.5,
datetime=dts[kper], geostruct=temporal_gs)
# add temporally indep, but spatially correlated grid-scale
# parameters, one per well
pf.add_parameters(filenames=list_file, par_type="grid",
par_name_base="wel_grid_{0}".format(kper),
pargp="wel_{0}".format(kper), index_cols=[0, 1, 2],
use_cols=[3],upper_bound=1.5, lower_bound=0.5)
# add grid-scale parameters for SFR reach conductance.
# Use layer, row, col and reach number in the
# parameter names
pf.add_parameters(filenames="freyberg6.sfr_packagedata.txt",
par_name_base="sfr_rhk",
pargp="sfr_rhk", index_cols=[0,1,2,3],
use_cols=[9], upper_bound=10.,
lower_bound=0.1,
par_type="grid")
# add model run command
pf.mod_sys_cmds.append("mf6")
# build pest control file
pst = pf.build_pst('freyberg.pst')
# draw from the prior and save the ensemble in binary format
pe = pf.draw(100, use_specsim=True)
pe.to_binary(os.path.join(template_ws, "prior.jcb"))
# set some algorithmic controls
pst.control_data.noptmax = 0
pst.pestpp_options["additional_ins_delimiters"] = ","
# write the control file
pst.write(os.path.join(pf.new_d, "freyberg.pst"))
# run with noptmax = 0
pyemu.os_utils.run("{0} freyberg.pst".format(
os.path.join("pestpp-ies")), cwd=pf.new_d)
# make sure it ran
res_file = os.path.join(pf.new_d, "freyberg.base.rei")
assert os.path.exists(res_file), res_file
pst.set_res(res_file)
print(pst.phi)
# if successful, set noptmax = -1 for prior-based Monte Carlo
pst.control_data.noptmax = -1
# define what file has the prior parameter ensemble
pst.pestpp_options["ies_par_en"] = "prior.jcb"
# write the updated pest control file
pst.write(os.path.join(pf.new_d, "freyberg.pst"))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: An existing MODFLOW6 model is in the directory freyberg_mf6. Lets check it out
Step2: You can see that all the input array and list data for this model have been written "externally" - this is key to using the PstFrom class.
Step3: Now let's copy those files to a temporary location just to make sure we don't goof up those original files
Step4: Now we need just a tiny bit of info about the spatial discretization of the model - this is needed to work out separation distances between parameters for build a geostatistical prior covariance matrix later.
Step5: Here we use the simple SpatialReference pyemu implements to help us spatially locate parameters
Step6: Now we can instantiate a PstFrom class instance
Step7: Observations
Step8: The main entry point for adding observations is (surprise) PstFrom.add_observations(). This method works on the list-type observation output file. We need to tell it what column is the index column (can be string if there is a header or int if no header) and then what columns contain quantities we want to monitor (e.g. "observe") in the control file - in this case we want to monitor all columns except the index column
Step9: We can see that it returned a dataframe with lots of useful info
Step10: Nice! We also have a PEST-style instruction file for those obs.
Step11: Sweet as! Now that we have some observations, let's add parameters!
Step12: Now let's get the idomain array to use as a zone array - this keeps us from setting up parameters in inactive model cells
Step13: First, let's setup parameters for static properties - HK, VK, SS, SY. Do that, we need to find all the external array files that contain these static arrays. Let's do just HK slowly so as to explain what is happening
Step14: So those are the existing model input arrays for HK. Notice we found the files in the temporary model workspace - PstFrom will copy all those files to the new model workspace for us in a bit...
Step15: What just happened there? Well, we told our PstFrom instance to setup a set of grid-scale multiplier parameters (par_type="grid") for the array file "freyberg6.npf_k_layer1.txt". We told it to prefix the parameter names with "hk_layer_1" and also to make the parameter group "hk_layer_1" (pargp="hk_layer_1"). When specified two sets of bound information
Step16: So those might look like pretty redic parameter names, but they contain heaps of metadata to help you post process things later...
Step17: Now lets look at how to supply existing pilot locations - to do this, we simply change the pp_space arg to a filename or a dataframe. The dataframe must have "name", "x", and "y" as columns - it can have more, but must have those. If you supply pp_space as an str it is assumed to be a filename the extension is the guide
Step18: Normally, you would probably put more thought in to pilot point locations, or maybe not! Now we call add_parameters and just pass the shapefile name for pp_space
Step19: Extra pre- and post-processing functions
Step20: We see that the file helpers.py contains two functions (could be more..). We want to call process_model_outputs() each time pest(++) runs the model as a post processing function. This function will yield some quantities that we want to record with an instruction. So, first, we can call the function write_ins_file() in helpers.py to build the instruction file for the special processed outputs that process_model_outputs() will produce (in this trivial example, process_model_outputs() just generates random numbers...). Note that the instruction file needs to be in the template_ws directory since it is a pest interface file.
Step21: First, we can add the function process_model_outputs() to the forward run script like this
Step22: This will copy the function process_model_outputs() from helpers.py into the forward run script that PstFrom will write. But we still need to add the instruction file into the mix - lets do that!
Step23: that pst_path argument tells PstFrom that the instruction file will be in the directory where pest(++) is running
Step24: Oh snap! we did it! thanks for playing...
Step25: Not bad! We have everything we need, including our special post processing function...except we didnt set a command to run the model! Doh!
Step26: That's better! See the pyemu.os_utils.run(r'mf6') line in main()?
Step27: let's also check out the super awesome prior parameter covariance matrix and prior parameter ensemble helpers in PstFrom
Step28: Da-um! that's sweet ez! We can see the first block of HK parameters in the upper left as "uncorrelated" (diagonal only) entries, then the second block of HK parameters (lower right) that are spatially correlated.
Step29: There are several ways to approach wel file parameterization. One way is to add a constant multiplier parameter for each stress period (that is, one scaling parameter that is applied all active wells for each stress period). Let's see how that looks, but first one important point
Step30: See the little offset in the lower right? there are a few parameters there in a small block
Step31: Those are our constant-in-space but correlated in time wel rate parameters - snap!
Step32: The upper left block is the constant-in-space but correlated-in-time wel rate multiplier parameters, while the lower right block is the constant-in-time but correlated-in-space wel rate multiplier parameters. Boom!
Step33: Tying parameters
Step34: Manipulating parameter bounds
Step35: Setting observation values and weights
Step36: Industrial strength control file setup
|
741 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
SpotCrudePrices_2013_Data= { 'U.K. Brent' :
{'2013-Q1':112.9, '2013-Q2':103.0,
'2013-Q3':110.1, '2013-Q4':109.4},
'Dubai':
{'2013-Q1':108.1, '2013-Q2':100.8,
'2013-Q3':106.1,'2013-Q4':106.7},
'West Texas Intermediate':
{'2013-Q1':94.4, '2013-Q2':94.2,
'2013-Q3':105.8,'2013-Q4':97.4}}
SpotCrudePrices_2013=pd.DataFrame.from_dict(SpotCrudePrices_2013_Data)
SpotCrudePrices_2013
dubaiPrices=SpotCrudePrices_2013['Dubai']
dubaiPrices
SpotCrudePrices_2013[['West Texas Intermediate','U.K. Brent']]
SpotCrudePrices_2013.Dubai
SpotCrudePrices_2013.columns=['Dubai','UK_Brent','West_Texas_Intermediate']
SpotCrudePrices_2013
SpotCrudePrices_2013.UK_Brent
SpotCrudePrices_2013[[1]]
SpotCrudePrices_2013[2:]
# Reverse the order of rows in DataFrame
SpotCrudePrices_2013[::-1]
# Selecting Dubai's data as Pandas Series
dubaiPrices = SpotCrudePrices_2013['Dubai']
# Obtain the last 3 rows or all rows but the first:
dubaiPrices[1:]
# Obtain all rows but the last
dubaiPrices[:-1]
# Reverse the rows
dubaiPrices[::-1]
NYC_SnowAvgsData={'Months' : ['January','February','March','April', 'November', 'December'],
'Avg SnowDays' : [4.0,2.7,1.7,0.2,0.2,2.3],
'Avg Precip. (cm)' : [17.8,22.4,9.1,1.5,0.8,12.2],
'Avg Low Temp. (F)' : [27,29,35,45,42,32] }
NYC_SnowAvgs = pd.DataFrame(NYC_SnowAvgsData,
index=NYC_SnowAvgsData['Months'],
columns=['Avg SnowDays','Avg Precip. (cm)','Avg Low Temp. (F)'])
NYC_SnowAvgs
# Using a single label:
NYC_SnowAvgs.loc['January']
# Using a list of labels
NYC_SnowAvgs.loc[['January', 'April']]
# Using a Label range:
NYC_SnowAvgs.loc['January' : 'March']
NYC_SnowAvgs.loc[:,'Avg SnowDays']
# to select a specific coordinate value
NYC_SnowAvgs.loc['March','Avg SnowDays']
# Alternative Style
NYC_SnowAvgs.loc['March']['Avg SnowDays']
# Without using loc function, square bracket as follows
NYC_SnowAvgs['Avg SnowDays']['March']
NYC_SnowAvgs.loc['March']
# Selecting months have less than one snow day average
NYC_SnowAvgs.loc[NYC_SnowAvgs['Avg SnowDays']<1,:]
# brand of crude priced above 110 a barrel for row 2013-Q1
SpotCrudePrices_2013.loc[:,SpotCrudePrices_2013.loc['2013-Q1']>110]
# Using 2 .loc for more precise selection, how cool is that
SpotCrudePrices_2013.loc['2013-Q1']>110
import scipy.constants as phys
import math
sci_values=pd.DataFrame([[math.pi, math.sin(math.pi),math.cos(math.pi)],
[math.e,math.log(math.e), phys.golden],
[phys.c,phys.g,phys.e],
[phys.m_e,phys.m_p,phys.m_n]],
index=list(range(0,20,5)))
sci_values
# Select first two rows by using integer slicing
sci_values.iloc[:2]
sci_values.iloc[2,0:2]
sci_values.iloc[10]
sci_values.loc[10]
# To Slice out a specific row
sci_values.iloc[2:3,:]
# TO obtain a cross-section using an integer position
sci_values.iloc[3]
sci_values.iloc[3,0]
sci_values.iat[3,0]
%timeit sci_values.iloc[3,0]
%timeit sci_values.iat[3,0]
stockIndexDataDF=pd.read_csv('stock_index_closing.csv')
stockIndexDataDF
stockIndexDF=stockIndexDataDF.set_index('TradingDate')
stockIndexDF
# Using a single label
stockIndexDF.ix['2014/01/30']
# Using a list of labels
stockIndexDF.ix[['2014/01/30', '2014/02/06']]
type(stockIndexDF.ix['2014/01/30'])
type(stockIndexDF.ix[['2014/01/30']])
# Using a label-based slice:
tradingDates=stockIndexDataDF.TradingDate
stockIndexDF.ix[tradingDates[:3]]
# Using a single integer:
stockIndexDF.ix[0]
# Using a list of integers:
stockIndexDF.ix[[0,2]]
# Using an integer slice:
stockIndexDF.ix[1:3]
# Using an boolean array
stockIndexDF.ix[stockIndexDF['Russell 2000']>1100]
sharesIndexDataDF=pd.read_csv('stock_index_closing.csv')
sharesIndexDataDF
# Create a MultiIndex from trading date and priceType columns
sharesIndexDF=sharesIndexDataDF.set_index(['TradingDate','PriceType'])
mIndex = sharesIndexDF.index
mIndex
sharesIndexDF
mIndex.get_level_values(0)
mIndex.get_level_values(1)
# Getting All Price Type of date
sharesIndexDF.ix['2014/02/21']
# Getting specific PriceType of date
sharesIndexDF.ix['2014/02/21','open']
# We can slice on first level
sharesIndexDF.ix['2014/02/21':'2014/02/24']
# But if we can slice at lower level:
sharesIndexDF.ix[('2014/02/21','open'):('2014/02/24','open')]
sharesIndexDF.sortlevel(0).ix[('2014/02/21','open'):('2014/02/24','open')]
# Swapping level 0 and 1 in x axis
swappedDF=sharesIndexDF[:7].swaplevel(0, 1, axis=0)
swappedDF
reorderedDF=sharesIndexDF[:7].reorder_levels(['PriceType','TradingDate'],axis=0)
reorderedDF
# Selecting price type close which are bigger than 4300 in Nasdaq
sharesIndexDataDF.ix[(sharesIndexDataDF['PriceType']=='close')&(sharesIndexDataDF['Nasdaq']>4300) ]
# Ww can also do this extensively
highSelection=sharesIndexDataDF['PriceType']=='high'
NasdaqHigh=sharesIndexDataDF['Nasdaq']<4300
sharesIndexDataDF.ix[highSelection & NasdaqHigh]
# Check values in Series
stockSeries=pd.Series(['NFLX','AMZN','GOOG','FB','TWTR'])
stockSeries.isin(['AMZN','FB'])
# We can use the sub selecting to selecting true values
stockSeries[stockSeries.isin(['AMZN','FB'])]
# Dictionary to create a dataframe
australianMammals= {'kangaroo': {'Subclass':'marsupial','Species Origin':'native'},
'flying fox' : {'Subclass':'placental','Species Origin':'native'},
'black rat': {'Subclass':'placental','Species Origin':'invasive'},
'platypus' : {'Subclass':'monotreme','Species Origin':'native'},
'wallaby' :{'Subclass':'marsupial','Species Origin':'native'},
'palm squirrel' : {'Subclass':'placental','Origin':'invasive'},
'anteater': {'Subclass':'monotreme', 'Origin':'native'},
'koala': {'Subclass':'marsupial', 'Origin':'native'}}
ozzieMammalsDF = pd.DataFrame(australianMammals)
ozzieMammalsDF
aussieMammalsDF=ozzieMammalsDF.T # Transposing the data frame
aussieMammalsDF
# Selecting native animals
aussieMammalsDF.isin({'Subclass':['marsupial'],'Origin':['native']})
import numpy as np
np.random.seed(100) # Setting random generator to 100 so we can generate same results later
normvals = pd.Series([np.random.normal() for i in np.arange(10)])
normvals
# Return values bigger than 0
normvals[normvals>0]
# Return values bigger than 0, prints the same shape
# by putting NaN to other places
normvals.where(normvals>0)
# Creating DataFrame with set random values
np.random.seed(100)
normDF = pd.DataFrame([[round(np.random.normal(),3) for i in np.arange(5)] for j in range(3)],
columns=['0','30','60','90','120'])
normDF
# For DataFrames we get same shape no matter we use
normDF[normDF>0]
# For DataFrames we get same shape no matter we use
normDF.where(normDF>0)
# The inverse operation of the where is mask
normDF.mask(normDF>0)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can select the prices for the available time periods of Dubai crude oil by using the [] operator
Step2: We can also pass a list of columns to the [] operator in order to select the columns in a particular order
Step3: Rows cannot be selected with the bracket operator [] in a DataFrame.
Step4: However, this only works if the index element is a valid Python identifier, Dubai in this case is valid but U.K. Brent is not.
Step5: We can also select prices by specifying a column index number to select column 1 (U.K. Brent)
Step6: We can slice a range by using the [] operator. The syntax of the slicing operator exactly matches that of NumPy's
Step7: Label, Integer, and Mixed Indexing
Step8: Note that while using the .loc , .iloc , and .ix operators on a DataFrame, the row index must always be specified first. This is the opposite of the [] operator, where only columns can be selected directly.
Step9: We can use the .loc operator to select the rows instead
Step10: We can use selection with boolean statements, while we are selecting in Pandas.
Step11: Note that the preceding arguments involve the Boolean operators < and > that actually evaluate the Boolean arrays, for example
Step12: Integer-Oriented Indexing
Step13: Note that the arguments to .iloc are strictly positional and have nothing to do with the index values.
Step14: The .iat and .at operators can be used for a quick selection of scalar values. They are faster than them but not really common
Step15: Mixed Indexing with .ix operator
Step16: What we see from the preceding example is that the DataFrame created has an integer-based row index. We promptly set the index to be the trading date to index it based on the trading date so that we can use the .ix operator
Step17: For the former, the indexer is a scalar; for the latter, the indexer is a list. A list indexer is used to select multiple columns. A multi-column slice of a DataFrame can only result in another DataFrame since it is 2D; hence, what is returned in the latter case is a DataFrame.
Step18: As in the case of .loc , the row index must be specified first for the .ix operator.
Step19: Upon inspection, we see that the MultiIndex consists of a list of tuples. Applying the get_level_values function with the appropriate argument produces a list of the labels for each level of the index
Step20: You can achieve hierarchical indexing with a MultiIndexed DataFrame
Step21: However, this results in KeyError with a rather strange error message. The key lesson to be learned here is that the current incarnation of MultiIndex requires the labels to be sorted for the lower-level slicing routines to work correctly.
Step22: The swaplevel function enables levels within the MultiIndex to be swapped
Step23: The reorder_levels function is more general, allowing you to specify the order of the levels
Step24: Boolean Indexing
Step25: You can also create Boolean conditions in which you use arrays to filter out parts of the data
Step26: The isin and anyall methods enable user to achieve more with Boolean indexing that the standart operators used in the preceding sections.
Step27: where() method
|
742 | <ASSISTANT_TASK:>
Python Code:
import os
class Dog(object):
def __init__(self):
self.name = "Dog"
def bark(self):
return "woof!"
class Cat(object):
def __init__(self):
self.name = "Cat"
def meow(self):
return "meow!"
class Human(object):
def __init__(self):
self.name = "Human"
def speak(self):
return "'hello'"
class Car(object):
def __init__(self):
self.name = "Car"
def make_noise(self, octane_level):
return "vroom%s" % ("!" * octane_level)
def add(x, y):
print('add')
return x + y
def sub(x, y):
print('sub')
return x - y
def mul(x, y):
print('mul')
return x * y
def div(x, y):
print('div')
return x / y
@debug
def add(x, y):
return x + y
@debug
def sub(x, y):
return x - y
@debug
def mul(x, y):
return x * y
@debug
def div(x, y):
return x / y
add(3,4)
@debug(prefix='++++++')
def add(x, y):
return x + y
@debug(prefix='------')
def sub(x, y):
return x - y
@debug(prefix='******')
def mul(x, y):
return x * y
@debug(prefix='//////')
def div(x, y):
return x / y
add(3,4)
sub(3,2)
import os
import sqlite3
db_filename = 'todo.db'
schema_filename = 'todo_schema.sql'
db_is_new = not os.path.exists(db_filename)
with sqlite3.connect(db_filename) as conn:
if db_is_new:
print('Creating schema')
with open(schema_filename, 'rt') as f:
schema = f.read()
conn.executescript(schema)
print('Inserting initial data')
conn.executescript(
insert into project (name, description, deadline)
values ('pymotw', 'Python Module of the Week',
'2016-11-01');
insert into task (details, status, deadline, project)
values ('write about select', 'done', '2016-04-25',
'pymotw');
insert into task (details, status, deadline, project)
values ('write about random', 'waiting', '2016-08-22',
'pymotw');
insert into task (details, status, deadline, project)
values ('write about sqlite3', 'active', '2017-07-31',
'pymotw');
)
else:
print('Database exists, assume schema does, too.')
下面请尝试检索上面所创建数据库中的全部数据:(使用fetchall)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 可能小伙伴们会觉得设计模式这块的东西略微有些复杂,完全不用感到灰心,如果不是想要将软件开发作为自己的职业的话,可能一辈子也不需要了解,或者不经意间用到也不知道。但是这部分内容可以用来复习类的概念知识。
Step2: 每次函数都要输出一个print语句告知用户当前在哪个函数中,这样的操作实在是太麻烦,可否实现一个装饰器,自动输出当前所在的函数名称?
Step3: 现在想要对于上面的装饰器做一下功能增强,把一些必要的参数传递给装饰器函数,以增加装饰器函数的灵活性,比如说,对于加法函数,可以传入'+++++'作为参数传入,相应的减法函数传入‘----’,乘法函数传入‘****’,除法函数传入'/////'。
Step5: 装饰器的内容都是很套路的函数操作,一般情况下就是用语简化重复代码:即“don't repeat yourself‘, 不要写重复的代码。
|
743 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
from scipy.stats import norm
from scipy.signal import convolve2d
import skimage.measure
x = np.arange(-5,5, .01)
pdf = norm.pdf(x)
data = np.random.randn(1000)
fig, ax = plt.subplots(1,2, sharex='all')
ax[0].plot(x, pdf)
ax[0].set(ylabel='PDF', xlabel='Statistical value')
ax[1].hist(data, bins=50)
ax[1].set(ylabel='counts')
fig.tight_layout()
print(f'p_n = {sum(data>2)/1000:.3f}')
print(f'p_z = {1-norm.cdf(2):.3f}')
np.random.seed(1)
# create random smoothed map
xi, yi = np.meshgrid(np.arange(-10, 11), np.arange(-10, 11))
zi = xi**2 + yi**2
zi = 1 - (zi/np.max(zi))
map = convolve2d(np.random.randn(100,100), zi,'same')
# threshold at arb value
mapt = map.copy()
mapt[(np.abs(map)<map.flatten().std()*2)] = 0
# turn binary
bw_map = mapt!=0
conn_comp = skimage.measure.label(bw_map)
fig, ax = plt.subplots(1,2,sharex='all',sharey='all')
ax[0].imshow(mapt)
ax[1].imshow(conn_comp)
print(f'There are {len(np.unique(conn_comp))} unique blobs')
def max_blob_size(img):
helper function to compute max blob size
bw_img = img != 0
blobbed = skimage.measure.label(bw_img)
num_blobs = len(np.unique(blobbed))
max_size = max([np.sum(blobbed==i) for i in range(1, num_blobs)])
return max_size
n_perms = 1000
max_sizes = []
for _ in range(n_perms):
mapt_flat = mapt.flatten()
rand_flat = np.random.permutation(mapt_flat)
mapt_permuted = rand_flat.reshape(mapt.shape)
max_sizes.append(max_blob_size(mapt_permuted))
plt.hist(max_sizes, label='null')
plt.vlines(max_blob_size(mapt), 0, 200, label='true', color='red')
plt.legend()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Figure 33.1
Step2: 33.3
Step3: 33.5/6
Step5: 33.9
|
744 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from numpy import log
from scipy import integrate
from scipy.optimize import fsolve
import scipy.linalg as la
import scipy.integrate as spi
from IPython.html.widgets import interact, interactive, fixed
from IPython.html import widgets
#from matplotlib import rc
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
S0=0.995;I0=.005;R0=0.
beta=0.;gamma=0.35
n=1000
T=20
dt=T/float(n)
t=np.linspace(0,T,n+1)
plt.xlim(0,20)
plt.ylim(0,1.1)
beta=0.75
S,I,R=np.zeros(len(t)),np.zeros(len(t)),np.zeros(len(t))
S[0]=S0;I[0]=I0;R[0]=R0
for i in range(len(t)-1):
S[i+1] = S[i]-dt*beta*S[i]*I[i]
I[i+1] = I[i]+dt*(beta*S[i]*I[i]-gamma*I[i])
#R[i+1] = 1.-S[i+1]-I[i+1]
R=1.-S-I
plt.plot(t,S,t,I,t,R)
plt.legend(['S','I','R'])
plt.grid(True)
plt.title(r'$\beta=0.75,\gamma=0.35$',size=20)
plt.xlim(0,20)
plt.ylim(0,1.1)
beta=0.75
S,I,R=np.zeros(len(t)),np.zeros(len(t)),np.zeros(len(t))
S[0]=S0;I[0]=I0;R[0]=R0
for i in range(len(t)-1):
S[i+1] = S[i]-dt*beta*S[i]*I[i]
I[i+1] = I[i]+dt*(beta*S[i]*I[i]-gamma*I[i])
#R[i+1] = 1.-S[i+1]-I[i+1]
R=1.-S-I
def diff_eqs(INP,t):
'''The main set of equations'''
Y=np.zeros((2))
V = INP
Y[0] = - beta * V[0] * V[1]
Y[1] = beta * V[0] * V[1] - gamma * V[1]
return Y # For odeint
t_start = 0.0; t_end = T; t_inc = dt
t_range = np.arange(t_start, t_end+t_inc, t_inc)
INPUT=(S0,I0)
RES = spi.odeint(diff_eqs,INPUT,t_range)
Rec=1. - (RES[:,0]+RES[:,1])
plt.plot(t,RES[:,0],t,RES[:,1],t,Rec)
plt.legend(['S','I','R'])
plt.grid(True)
plt.title(r'$\beta=0.75,\gamma=0.35$',size=20)
beta=2.
def vis2(beta,gamma):
fig, ax = plt.subplots(figsize=(8,4),
subplot_kw={'axisbg':'#EEEEEE',
'axisbelow':True})
S0=0.995;I0=.005;R0=0.
n=1000
T=20
dt=T/float(n)
t=np.linspace(0,T,n+1)
ax.set_xlim(0,T)
ax.set_ylim(0,1.1)
S,I,R=np.zeros(len(t)),np.zeros(len(t)),np.zeros(len(t))
S[0]=S0;I[0]=I0;R[0]=R0
for i in range(len(t)-1):
S[i+1] = S[i]-dt*beta*S[i]*I[i]
I[i+1] = I[i]+dt*(beta*S[i]*I[i]-gamma*I[i])
R[i+1] = 1.-S[i+1]-I[i+1]
ax.plot(t,S)
ax.plot(t,I)
ax.plot(t,R)
ax.set_title("SIR model")
ax.set_xlabel("Time (days)")
ax.set_ylabel("Percent of Population")
ax.legend(('S','I','R'))
ax.grid(True)
return fig
i = interact(vis2,
beta=widgets.FloatSliderWidget(min=0.0, max=5.0, step=0.1, value=2.0, description="beta"),
gamma=widgets.FloatSliderWidget(min=1.0, max=5, step=0.1, value=1.5, description="gamma"),
)
beta=2.
gamma=1.5
S0=0.95
I0=0.05
R0=0.
def vis2(beta,gamma,S0,I0):
fig, ax = plt.subplots(figsize=(8,4),
subplot_kw={'axisbg':'#EEEEEE',
'axisbelow':True})
n=1000
T=40
R0=1-S0-I0
dt=T/float(n)
t=np.linspace(0,T,n+1)
R0=gamma/beta
ax.set_xlim(0,T)
ax.set_ylim(0,1.1)
S,I,R=np.zeros(len(t)),np.zeros(len(t)),np.zeros(len(t))
S[0]=S0;I[0]=I0;R[0]=R0
for i in range(len(t)-1):
S[i+1] = S[i]-dt*beta*S[i]*I[i]
I[i+1] = I[i]+dt*(beta*S[i]*I[i]-gamma*I[i])
R[i+1] = 1.-S[i+1]-I[i+1]
ax.plot(t,S)
ax.plot(t,I)
ax.plot(t,R)
ax.set_title('SIR model $R_0=$%f' %R0)
ax.set_xlabel("Time (days)")
ax.set_ylabel("Percent of Population")
ax.legend(('S','I','R'))
ax.grid(True)
return fig
i = interact(vis2,
beta=widgets.FloatSliderWidget(min=0.0, max=5.0, step=0.1, value=2.0, description="beta"),
gamma=widgets.FloatSliderWidget(min=1.0, max=2.0, step=0.1, value=1.5, description="gamma"),
S0=widgets.FloatSliderWidget(min=0.9, max=1.0, step=0.01, value=0.95, description="S0"),
I0=widgets.FloatSliderWidget(min=0., max=0.1, step=0.01, value=0.01, description="I0"),
)
beta=2.
gamma=1.5
S0=0.3
I0=0.05
R0=0.
def SI3(beta,gamma):
fig, ax = plt.subplots(figsize=(8,4),
subplot_kw={'axisbg':'#EEEEEE',
'axisbelow':True})
n=1000
St=np.linspace(S0,1,n+1)
ax.set_xlim(0,1)
ax.set_ylim(0,1.1)
I=gamma*log(St)/beta-St+I0+S0-gamma*log(S0)/beta
ax.plot(St,I)
ax.set_title("S-I Relation")
ax.set_xlabel("S")
ax.set_ylabel("I")
ax.grid(True)
return fig
i = interact(SI3,
beta=widgets.FloatSliderWidget(min=0.0, max=10.0, step=0.1, value=2.0, description="beta"),
gamma=widgets.FloatSliderWidget(min=1.0, max=10., step=0.1, value=1.5, description="gamma"),
)
b=8.
g=2.3
S0=0.99
I0=0.01
R0=0.
C0= I0-g/b*log(S0)+S0
print C0
def func(x):
return -g/b*np.log(x)+x+I0-C0
SS=fsolve(func,0.01);print(SS)
def xv0(x):
return 0*x
def intf(x):
return 1/(b*x*x-b*C0*x-g*x*log(x))
xv=np.linspace(SS,S0,100)
xv2=np.linspace(0,1,101)
plt.ylim(-50,5)
plt.text(0.2,-20,r'$\int^{S_1}_{S_0}\frac{ d S }{-\gamma S \ln S +\beta S^2-\beta C S}$',size=20)
plt.text(0.2,-30,r'$S_0=0.99,S_1=0.0358$',size=12)
plt.plot(xv2,xv0(xv2))
plt.plot(xv,intf(xv),'b--')
plt.fill_between(xv, intf(xv),0, facecolor='red', alpha=0.1)
x2 = lambda v: 1/(b*v*v-b*C0*v-g*v*log(v))
integrate.quad(x2,S0,SS)
beta=1.
alpha=4.
zeta=0.2
S0=.99
Z0=0.01
R0=0.0
T=100.
n=10000
t=np.linspace(0,T,n+1)
dt=t[1]-t[0]
S,Z,R=np.zeros(len(t)),np.zeros(len(t)),np.zeros(len(t))
S[0]=S0;Z[0]=Z0;R[0]=R0
for i in range(len(t)-1):
S[i+1] = S[i]-dt*beta*S[i]*Z[i]
Z[i+1] = Z[i]+dt*((beta-alpha)*S[i]*Z[i]+zeta*R[i])
R[i+1] = 1.-S[i+1]-Z[i+1]
plt.plot(t,S,t,Z,t,R)
plt.legend(('S','Z','R'))
print('S[100]','Z[100]','R[100]:',S[-1],Z[-1],R[-1])
t[:5]
mu=1/(70*365.0)
beta=520/365.0
k=1/14.0
gamma=1/7.0
ND=10*365.0
TS=1.0
S0=0.1
E0=1e-4
I0=1e-4
INPUT = (S0, E0, I0)
def diff_eqs(INP,t):
'''The main set of equations'''
Y=np.zeros((3))
V = INP
Y[0] = mu - beta * V[0] * V[2] - mu * V[0]
Y[1] = beta * V[0] * V[2] - k * V[1] - mu * V[1]
Y[2] = k * V[1] - gamma * V[2] - mu * V[2]
return Y # For odeint
t_start = 0.0; t_end = ND; t_inc = TS
t_range = np.arange(t_start, t_end+t_inc, t_inc)
RES = spi.odeint(diff_eqs,INPUT,t_range)
Rec=1. - (RES[:,0]+RES[:,1]+RES[:,2])
#print RES
#Ploting
plt.subplot(311)
plt.plot(RES[:,0], '-g', label='Susceptibles')
plt.title('SEIR with Births and Deaths')
plt.xlabel('Time')
plt.ylabel('Susceptibles')
plt.subplot(312)
plt.plot(RES[:,1], '-m', label='Exposed')
plt.plot(RES[:,2], '-r', label='Infectious')
plt.legend(loc=0)
plt.xlabel('Time')
plt.ylabel('Infected')
plt.subplot(313)
plt.plot(Rec, '-k', label='Recovereds')
plt.xlabel('Time')
plt.ylabel('Recovereds')
beta=520/365.
gamma=1/7.0
#mu=1/(70*365.0)
mudays=70
def vis3(mudays,beta,gamma):
#mu=1/(70*365.0)
#beta=520/365.0
k=1/14.0
#gamma=1/7.0
mu=1/float(mudays*365)
ND=10*365.0
TS=1.0
S0=0.1
E0=1e-4
I0=1e-4
INPUT = (S0, E0, I0)
def diff_eqs(INP,t):
'''The main set of equations'''
Y=np.zeros((3))
V = INP
Y[0] = mu - beta * V[0] * V[2] - mu * V[0]
Y[1] = beta * V[0] * V[2] - k * V[1] - mu * V[1]
Y[2] = k * V[1] - gamma * V[2] - mu * V[2]
return Y # For odeint
t_start = 0.0; t_end = ND; t_inc = TS
t_range = np.arange(t_start, t_end+t_inc, t_inc)
RES = spi.odeint(diff_eqs,INPUT,t_range)
Rec=1. - (RES[:,0]+RES[:,1]+RES[:,2])
fig, ax = plt.subplots()
plt.subplot(311)
plt.plot(RES[:,0], '-g', label='Susceptibles')
plt.title('SEIR with Births and Deaths,mu=1/mudys/365')
plt.xlabel('Time')
plt.ylabel('Susceptibles')
plt.subplot(312)
plt.plot(RES[:,1], '-m', label='Exposed')
plt.plot(RES[:,2], '-r', label='Infectious')
plt.legend(loc=0)
plt.xlabel('Time')
plt.ylabel('Infected')
plt.subplot(313)
plt.plot(Rec, '-k', label='Recovereds')
plt.xlabel('Time')
plt.ylabel('Recovereds')
return ax
i = interact(vis3,
mudays =widgets.FloatSliderWidget(min=1, max=365, step=1, value=70, description="mudays"),
beta=widgets.FloatSliderWidget(min=0.0, max=5.0, step=0.1, value=520/365., description="beta"),
gamma=widgets.FloatSliderWidget(min=0.0, max=5, step=0.1, value=1/7., description="gamma"),
)
N=64500.
S0=0.9*N;E0=0.0;I0=N-S0-E0;R0=0.;C0=0.;
beta0=0.33;beta1=0.09;
beta=beta0;gamma=1/5.61
k=1/5.3
n=10000
T=100
dt=T/float(n)
t=np.linspace(0,T,n+1)
S,E,I,R=np.zeros(len(t)),np.zeros(len(t)),np.zeros(len(t)),np.zeros(len(t))
S[0]=S0;E[0]=E0;I[0]=I0;R[0]=R0
for i in range(len(t)-1):
S[i+1] = S[i]-dt*beta*S[i]*I[i]/N
E[i+1] = E[i]+dt*(beta*S[i]*I[i]/N-k*E[i])
I[i+1] = I[i]+dt*(k*E[i]-gamma*I[i])
R[i+1] = N-S[i+1]-I[i+1]-E[i+1]
plt.plot(t,S/N,t,I/N,'r--',t,E/N,'y',t,R/N,'k-')
#plt.ylim(0,1)
plt.legend(['Suspective','Infected','Exposed','Recoverd'])
beta/gamma
J=np.matrix([[-k, beta],[k, -gamma]]);J
import scipy.linalg as la
r=np.max(np.real(la.eigvals(J)));r
#r=0.07
1+(r*r+(k+gamma)*r)/k/gamma
q=0.5
tI=5
th=tI+np.log(2)/q
beta1+(beta0-beta1)*np.exp(-q*(th-tI))
def bt(b0,b1,tI,q,T,n):
beta0=b0
beta1=b1
dt=T/float(n)
t=np.linspace(0,T,n+1)
bI=np.zeros(n+1)
nI=np.ceil(n*tI/T)
bI[:nI]=beta0*np.ones(nI)
bI[nI:]=beta1+(beta0-beta1)*np.exp(-q*(t[nI:]-tI))
return bI
bI=bt(beta0,beta1,5,0.29,T,n)
plt.xlim(0,100)
plt.plot(t,bI,t,(beta0+beta1)/2.*np.ones(np.size(t)))
def BRN(b):
s=np.size(b)
R=np.zeros(s)
i=0
for i in np.arange(s):
J=np.matrix([[-k, b[i]],[k, -gamma]]);
r=np.max(np.real(la.eigvals(J)));
R[i]=1+(r*r+(k+gamma)*r)/k/gamma
return R
R00=BRN(bI)
plt.plot(t,R00)
S,E,I,R=np.zeros(len(t)),np.zeros(len(t)),np.zeros(len(t)),np.zeros(len(t))
S[0]=S0;E[0]=E0;I[0]=I0;R[0]=R0
for i in range(len(t)-1):
S[i+1] = S[i]-dt*beta*S[i]*I[i]/N
E[i+1] = E[i]+dt*(beta*S[i]*I[i]/N-k*E[i])
I[i+1] = I[i]+dt*(k*E[i]-gamma*I[i])
R[i+1] = N-S[i+1]-I[i+1]-E[i+1]
plt.plot(t,S/N,t,I/N,'r--',t,E/N,'y',t,R/N,'k-',t,R00)
#plt.ylim(0,1)
plt.legend(['Suspective','Infected','Exposed','Recoverd','$R_0$'])
from sympy import Symbol , dsolve , Function , Derivative , Eq, lambdify, symbols
x=Symbol('x')
y0=Symbol('y0')
y=Function('y')
f_=Derivative(y(x),x)-y(x)
sol=dsolve(Derivative(y(x),x)-y(x),y(x),ics={y(0):y0})
print(sol)
C1=Symbol('C1')
ysol=sol.subs({C1:y0})
print(ysol)
z= lambdify(x,ysol.rhs)
z= lambdify(x,y0*exp(x))
z(1)
A,N=symbols("A N")
g_=Derivative(y(x),x)-A*y(x)*(N-y(x))
sol2=dsolve(g_,y(x))
sol2
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Solve system of ODE's by Scipy
Step2: Reproduction Ratio
Step3: Note
Step4: Conclusion
Step5: Ebola
Step6: Governed Differential Equations
Step7: Intervention Case
Step8: Symboilc Solution dor ODE's
|
745 | <ASSISTANT_TASK:>
Python Code:
# This cell just makes sure the library paths are correct.
# You need to run this cell before you run the rest of this
# tutorial, but you can ignore the contents!
import os
import sys
module_path = os.path.abspath(os.path.join('../..'))
if module_path not in sys.path:
sys.path.append(module_path)
from allennlp.data import Token
from allennlp.data.fields import TextField, LabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
review = TextField(list(map(Token, ["This", "movie", "was", "awful", "!"])), token_indexers={"tokens": SingleIdTokenIndexer()})
review_sentiment = LabelField("negative", label_namespace="tags")
# Access the original strings and labels using the methods on the Fields.
print("Tokens in TextField: ", review.tokens)
print("Label of LabelField", review_sentiment.label)
from allennlp.data import Instance
instance1 = Instance({"review": review, "label": review_sentiment})
print("Fields in instance: ", instance1.fields)
from allennlp.data import Dataset
# Create another
review2 = TextField(list(map(Token, ["This", "movie", "was", "quite", "slow", "but", "good" "."])), token_indexers={"tokens": SingleIdTokenIndexer()})
review_sentiment2 = LabelField("positive", label_namespace="tags")
instance2 = Instance({"review": review2, "label": review_sentiment2})
review_dataset = Dataset([instance1, instance2])
from allennlp.data import Vocabulary
# This will automatically create a vocab from our dataset.
# It will have "namespaces" which correspond to two things:
# 1. Namespaces passed to fields (e.g. the "tags" namespace we passed to our LabelField)
# 2. The keys of the 'Token Indexer' dictionary in 'TextFields'.
# passed to Fields (so it will have a 'tags' namespace).
vocab = Vocabulary.from_dataset(review_dataset)
print("This is the id -> word mapping for the 'tokens' namespace: ")
print(vocab.get_index_to_token_vocabulary("tokens"), "\n")
print("This is the id -> word mapping for the 'tags' namespace: ")
print(vocab.get_index_to_token_vocabulary("tags"), "\n")
print("Vocab Token to Index dictionary: ", vocab._token_to_index, "\n")
# Note that the "tags" namespace doesn't contain padding or unknown tokens.
# Next, we index our dataset using our newly generated vocabulary.
# This modifies the current object. You must perform this step before
# trying to generate arrays.
review_dataset.index_instances(vocab)
# Finally, we return the dataset as arrays, padded using padding lengths
# extracted from the dataset itself, which will be the max sentence length
# from our two instances.
padding_lengths = review_dataset.get_padding_lengths()
print("Lengths used for padding: ", padding_lengths, "\n")
tensor_dict = review_dataset.as_tensor_dict(padding_lengths)
print(tensor_dict)
from allennlp.data.token_indexers import TokenCharactersIndexer
word_and_character_text_field = TextField(list(map(Token, ["Here", "are", "some", "longer", "words", "."])),
token_indexers={"tokens": SingleIdTokenIndexer(), "chars": TokenCharactersIndexer()})
mini_dataset = Dataset([Instance({"sentence": word_and_character_text_field})])
# Fit a new vocabulary to this Field and index it:
word_and_char_vocab = Vocabulary.from_dataset(mini_dataset)
mini_dataset.index_instances(word_and_char_vocab)
print("This is the id -> word mapping for the 'tokens' namespace: ")
print(vocab.get_index_to_token_vocabulary("tokens"), "\n")
print("This is the id -> word mapping for the 'chars' namespace: ")
print(vocab.get_index_to_token_vocabulary("chars"), "\n")
# Now, the padding lengths method will find the max sentence length
# _and_ max word length in the batch and pad all sentences to the max
# sentence length and all words to the max word length.
padding_lengths = mini_dataset.get_padding_lengths()
print("Lengths used for padding (Note that we now have a new "
"padding key num_token_characters from the TokenCharactersIndexer): ")
print(padding_lengths, "\n")
tensor_dict = mini_dataset.as_tensor_dict(padding_lengths)
print(tensor_dict)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's create two of the most common Fields, imagining we are preparing some data for a sentiment analysis model.
Step2: Once we've made our Fields, we need to pair them together to form an Instance.
Step3: ... and once we've made our Instance, we can group several of these into a Dataset.
Step4: In order to get our tiny sentiment analysis dataset ready for use in a model, we need to be able to do a few things
Step5: Here, we've seen how to transform a dataset of 2 instances into arrays for feeding into an allennlp Model. One nice thing about the Dataset API is that we don't require the concept of a Batch - it's just a small dataset! If you are iterating over a large number of Instances, such as during training, you may want to look into allennlp.data.Iterators, which specify several different ways of iterating over a Dataset in batches, such as fixed batch sizes, bucketing and stochastic sorting.
|
746 | <ASSISTANT_TASK:>
Python Code:
%matplotlib nbagg
%config InlineBackend.figure_format='retina'
# import libraries
import numpy as np
import matplotlib as mp
import pandas as pd
import matplotlib.pyplot as plt
import pandas as pd
from importlib import reload
from datetime import datetime
import laUtilities as ut
import slideUtilities as sl
import demoUtilities as dm
from IPython.display import Image
from IPython.display import display_html
from IPython.display import display
from IPython.display import Math
from IPython.display import Latex
from IPython.display import HTML
print('')
%%html
<style>
.container.slides .celltoolbar, .container.slides .hide-in-slideshow {
display: None ! important;
}
</style>
sl.hide_code_in_slideshow()
# %matplotlib qt
ax = ut.plotSetup3d(-5,5,-7,7,-10,10,figsize=(12,8))
u = np.array([3.0,1,0])
v = np.array([1.0,6,0])
w = -1.0*u -0.5*v
ax.text(u[0],u[1],u[2],r'$\bf u$',size=20)
ax.text(v[0],v[1],v[2],r'$\bf v$',size=20)
ax.text(w[0],w[1],w[2],r'$\bf w$',size=20)
#ax.text(1,-4,-10,r'Span{$\bf a_1,a_2,a_3$}',size=16)
ax.text(0,0,0,r'$\bf 0$',size=20)
# plotting the span of v
ut.plotSpan3d(ax,u,v,'Green')
ut.plotPoint3d(ax,u[0],u[1],u[2],'r')
ut.plotPoint3d(ax,v[0],v[1],v[2],'r')
ut.plotPoint3d(ax,w[0],w[1],w[2],'r')
ut.plotPoint3d(ax,0,0,0,'b')
# plotting the axes
#ut.plotIntersection3d(ax,[0,0,1,0],[0,1,0,0])
#ut.plotIntersection3d(ax,[0,0,1,0],[1,0,0,0])
#ut.plotIntersection3d(ax,[0,1,0,0],[1,0,0,0])
ax.set_title(r'Linearly Dependent',size=20)
# ax.mouse_init()
print('')
sl.hide_code_in_slideshow()
# %matplotlib qt
ax = ut.plotSetup3d(-5,5,-7,7,-10,10,figsize=(12,8))
u = np.array([3.0,1,0])
v = np.array([1.0,6,0])
w = -1.0*u -0.5*v + np.array([0.5,0,8.0])
ax.text(u[0],u[1],u[2],r'$\bf u$',size=20)
ax.text(v[0],v[1],v[2],r'$\bf v$',size=20)
ax.text(w[0],w[1],w[2],r'$\bf w$',size=20)
#ax.text(1,-4,-10,r'Span{$\bf a_1,a_2,a_3$}',size=16)
ax.text(0,0,0,r'$\bf 0$',size=20)
# plotting the span of v
ut.plotSpan3d(ax,u,v,'Green')
ut.plotPoint3d(ax,u[0],u[1],u[2],'r')
ut.plotPoint3d(ax,v[0],v[1],v[2],'r')
ut.plotPoint3d(ax,w[0],w[1],w[2],'r')
ut.plotPoint3d(ax,0,0,0,'b')
# plotting the axes
#ut.plotIntersection3d(ax,[0,0,1,0],[0,1,0,0])
#ut.plotIntersection3d(ax,[0,0,1,0],[1,0,0,0])
#ut.plotIntersection3d(ax,[0,1,0,0],[1,0,0,0])
ax.set_title(r'Linearly Independent',size=20)
# ax.mouse_init()
print('')
# image credit: http://en.wikipedia.org/wiki/Carl_Friedrich_Gauss#mediaviewer/File:Carl_Friedrich_Gauss.jpg
sl.hide_code_in_slideshow()
display(Image("images/Carl_Friedrich_Gauss.jpg", width=450))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: %Set up useful MathJax (Latex) macros.
Step2: Notes
|
747 | <ASSISTANT_TASK:>
Python Code:
import random
import string
import mxnet as mx
from mxnet import gluon, nd
import numpy as np
max_num = 999
dataset_size = 60000
seq_len = 5
split = 0.8
batch_size = 512
ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()
X = mx.random.uniform(low=0, high=max_num, shape=(dataset_size, seq_len)).astype('int32').asnumpy()
Y = X.copy()
Y.sort() #Let's sort X to get the target
print("Input {}\nTarget {}".format(X[0].tolist(), Y[0].tolist()))
vocab = string.digits + " "
print(vocab)
vocab_idx = { c:i for i,c in enumerate(vocab)}
print(vocab_idx)
max_len = len(str(max_num))*seq_len+(seq_len-1)
print("Maximum length of the string: %s" % max_len)
def transform(x, y):
x_string = ' '.join(map(str, x.tolist()))
x_string_padded = x_string + ' '*(max_len-len(x_string))
x = [vocab_idx[c] for c in x_string_padded]
y_string = ' '.join(map(str, y.tolist()))
y_string_padded = y_string + ' '*(max_len-len(y_string))
y = [vocab_idx[c] for c in y_string_padded]
return mx.nd.one_hot(mx.nd.array(x), len(vocab)), mx.nd.array(y)
split_idx = int(split*len(X))
train_dataset = gluon.data.ArrayDataset(X[:split_idx], Y[:split_idx]).transform(transform)
test_dataset = gluon.data.ArrayDataset(X[split_idx:], Y[split_idx:]).transform(transform)
print("Input {}".format(X[0]))
print("Transformed data Input {}".format(train_dataset[0][0]))
print("Target {}".format(Y[0]))
print("Transformed data Target {}".format(train_dataset[0][1]))
train_data = gluon.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=20, last_batch='rollover')
test_data = gluon.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=5, last_batch='rollover')
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(
gluon.rnn.LSTM(hidden_size=128, num_layers=2, layout='NTC', bidirectional=True),
gluon.nn.Dense(len(vocab), flatten=False)
)
net.initialize(mx.init.Xavier(), ctx=ctx)
loss = gluon.loss.SoftmaxCELoss()
schedule = mx.lr_scheduler.FactorScheduler(step=len(train_data)*10, factor=0.75)
schedule.base_lr = 0.01
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate':0.01, 'lr_scheduler':schedule})
epochs = 100
for e in range(epochs):
epoch_loss = 0.
for i, (data, label) in enumerate(train_data):
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
with mx.autograd.record():
output = net(data)
l = loss(output, label)
l.backward()
trainer.step(data.shape[0])
epoch_loss += l.mean()
print("Epoch [{}] Loss: {}, LR {}".format(e, epoch_loss.asscalar()/(i+1), trainer.learning_rate))
n = random.randint(0, len(test_data)-1)
x_orig = X[split_idx+n]
y_orig = Y[split_idx+n]
def get_pred(x):
x, _ = transform(x, x)
output = net(x.as_in_context(ctx).expand_dims(axis=0))
# Convert output back to string
pred = ''.join([vocab[int(o)] for o in output[0].argmax(axis=1).asnumpy().tolist()])
return pred
x_ = ' '.join(map(str,x_orig))
label = ' '.join(map(str,y_orig))
print("X {}\nPredicted {}\nLabel {}".format(x_, get_pred(x_orig), label))
print(get_pred(np.array([500, 30, 999, 10, 130])))
print("Only four numbers:", get_pred(np.array([105, 302, 501, 202])))
print("Small digits:", get_pred(np.array([10, 3, 5, 2, 8])))
print("Small digits, 6 numbers:", get_pred(np.array([10, 33, 52, 21, 82, 10])))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Preparation
Step2: We are getting a dataset of dataset_size sequences of integers of length seq_len between 0 and max_num. We use split*100% of them for training and the rest for testing.
Step3: For the purpose of training, we encode the input as characters rather than numbers
Step4: We write a transform that will convert our numbers into text of maximum length max_len, and one-hot encode the characters.
Step5: Creating the network
Step6: We use a learning rate schedule to improve the convergence of the model
Step7: Training loop
Step8: Testing
Step9: Printing the result
Step10: We can also pick our own example, and the network manages to sort it without problem
Step11: The model has even learned to generalize to examples not on the training set
Step12: However we can see it has trouble with other edge cases
|
748 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!sudo apt-get update
!sudo apt-get install -y xvfb ffmpeg freeglut3-dev
!pip install 'imageio==2.4.0'
!pip install pyvirtualdisplay
!pip install tf-agents
!pip install pyglet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import imageio
import IPython
import matplotlib
import matplotlib.pyplot as plt
import PIL.Image
import pyvirtualdisplay
import tensorflow as tf
from tf_agents.agents.categorical_dqn import categorical_dqn_agent
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.networks import categorical_q_network
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
# Set up a virtual display for rendering OpenAI gym environments.
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
env_name = "CartPole-v1" # @param {type:"string"}
num_iterations = 15000 # @param {type:"integer"}
initial_collect_steps = 1000 # @param {type:"integer"}
collect_steps_per_iteration = 1 # @param {type:"integer"}
replay_buffer_capacity = 100000 # @param {type:"integer"}
fc_layer_params = (100,)
batch_size = 64 # @param {type:"integer"}
learning_rate = 1e-3 # @param {type:"number"}
gamma = 0.99
log_interval = 200 # @param {type:"integer"}
num_atoms = 51 # @param {type:"integer"}
min_q_value = -20 # @param {type:"integer"}
max_q_value = 20 # @param {type:"integer"}
n_step_update = 2 # @param {type:"integer"}
num_eval_episodes = 10 # @param {type:"integer"}
eval_interval = 1000 # @param {type:"integer"}
train_py_env = suite_gym.load(env_name)
eval_py_env = suite_gym.load(env_name)
train_env = tf_py_environment.TFPyEnvironment(train_py_env)
eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
categorical_q_net = categorical_q_network.CategoricalQNetwork(
train_env.observation_spec(),
train_env.action_spec(),
num_atoms=num_atoms,
fc_layer_params=fc_layer_params)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
train_step_counter = tf.Variable(0)
agent = categorical_dqn_agent.CategoricalDqnAgent(
train_env.time_step_spec(),
train_env.action_spec(),
categorical_q_network=categorical_q_net,
optimizer=optimizer,
min_q_value=min_q_value,
max_q_value=max_q_value,
n_step_update=n_step_update,
td_errors_loss_fn=common.element_wise_squared_loss,
gamma=gamma,
train_step_counter=train_step_counter)
agent.initialize()
#@test {"skip": true}
def compute_avg_return(environment, policy, num_episodes=10):
total_return = 0.0
for _ in range(num_episodes):
time_step = environment.reset()
episode_return = 0.0
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
episode_return += time_step.reward
total_return += episode_return
avg_return = total_return / num_episodes
return avg_return.numpy()[0]
random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(),
train_env.action_spec())
compute_avg_return(eval_env, random_policy, num_eval_episodes)
# Please also see the metrics module for standard implementations of different
# metrics.
#@test {"skip": true}
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_capacity)
def collect_step(environment, policy):
time_step = environment.current_time_step()
action_step = policy.action(time_step)
next_time_step = environment.step(action_step.action)
traj = trajectory.from_transition(time_step, action_step, next_time_step)
# Add trajectory to the replay buffer
replay_buffer.add_batch(traj)
for _ in range(initial_collect_steps):
collect_step(train_env, random_policy)
# This loop is so common in RL, that we provide standard implementations of
# these. For more details see the drivers module.
# Dataset generates trajectories with shape [BxTx...] where
# T = n_step_update + 1.
dataset = replay_buffer.as_dataset(
num_parallel_calls=3, sample_batch_size=batch_size,
num_steps=n_step_update + 1).prefetch(3)
iterator = iter(dataset)
#@test {"skip": true}
try:
%%time
except:
pass
# (Optional) Optimize by wrapping some of the code in a graph using TF function.
agent.train = common.function(agent.train)
# Reset the train step
agent.train_step_counter.assign(0)
# Evaluate the agent's policy once before training.
avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes)
returns = [avg_return]
for _ in range(num_iterations):
# Collect a few steps using collect_policy and save to the replay buffer.
for _ in range(collect_steps_per_iteration):
collect_step(train_env, agent.collect_policy)
# Sample a batch of data from the buffer and update the agent's network.
experience, unused_info = next(iterator)
train_loss = agent.train(experience)
step = agent.train_step_counter.numpy()
if step % log_interval == 0:
print('step = {0}: loss = {1}'.format(step, train_loss.loss))
if step % eval_interval == 0:
avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes)
print('step = {0}: Average Return = {1:.2f}'.format(step, avg_return))
returns.append(avg_return)
#@test {"skip": true}
steps = range(0, num_iterations + 1, eval_interval)
plt.plot(steps, returns)
plt.ylabel('Average Return')
plt.xlabel('Step')
plt.ylim(top=550)
def embed_mp4(filename):
Embeds an mp4 file in the notebook.
video = open(filename,'rb').read()
b64 = base64.b64encode(video)
tag = '''
<video width="640" height="480" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>'''.format(b64.decode())
return IPython.display.HTML(tag)
num_episodes = 3
video_filename = 'imageio.mp4'
with imageio.get_writer(video_filename, fps=60) as video:
for _ in range(num_episodes):
time_step = eval_env.reset()
video.append_data(eval_py_env.render())
while not time_step.is_last():
action_step = agent.policy.action(time_step)
time_step = eval_env.step(action_step.action)
video.append_data(eval_py_env.render())
embed_mp4(video_filename)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: DQN C51/Rainbow
Step2: ハイパーパラメータ
Step3: 環境
Step4: エージェント
Step5: また、先ほど作成したネットワークをトレーニングするためのoptimizerと、ネットワークが更新された回数を追跡するためのtrain_step_counter変数も必要です。
Step6: 最後に注意すべき点は、$n$ = 2のnステップ更新を使用する引数も追加されていることです。1ステップのQ学習($n$ = 1)では、(ベルマン最適化方程式に基づいて)1ステップの戻り値を使用して、その時点のタイムステップと次のタイムステップでのQ値間の誤差のみを計算します。 1ステップの戻り値は次のように定義されます。
Step7: データ収集
Step8: エージェントのトレーニング
Step9: 可視化
Step11: 動画
Step12: 次のコードは、いくつかのエピソードに渡るエージェントのポリシーを可視化します。
|
749 | <ASSISTANT_TASK:>
Python Code:
import ipyvolume as ipv
import numpy as np
# only x is a sequence of arrays
x = np.array([[-1, -0.8], [1, -0.1], [0., 0.5]])
y = np.array([0.0, 0.0])
z = np.array([0.0, 0.0])
ipv.figure()
s = ipv.scatter(x, y, z, marker='sphere', size=10)
ipv.xyzlim(-1, 1)
ipv.animation_control(s) # shows controls for animation controls
ipv.show()
s.sequence_index = 1
# create 2d grids: x, y, and r
u = np.linspace(-10, 10, 25)
x, y = np.meshgrid(u, u)
r = np.sqrt(x**2+y**2)
print("x,y and z are of shape", x.shape)
# and turn them into 1d
x = x.flatten()
y = y.flatten()
r = r.flatten()
print("and flattened of shape", x.shape)
# create a sequence of 15 time elements
time = np.linspace(0, np.pi*2, 15)
z = np.array([(np.cos(r + t) * np.exp(-r/5)) for t in time])
print("z is of shape", z.shape)
# draw the scatter plot, and add controls with animate_glyphs
ipv.figure()
s = ipv.scatter(x, z, y, marker="sphere")
ipv.animation_control(s, interval=200)
ipv.ylim(-3,3)
ipv.show()
# Now also include, color, which containts rgb values
color = np.array([[np.cos(r + t), 1-np.abs(z[i]), 0.1+z[i]*0] for i, t in enumerate(time)])
size = (z+1)
print("color is of shape", color.shape)
color = np.transpose(color, (0, 2, 1)) # flip the last axes
ipv.figure()
s = ipv.scatter(x, z, y, color=color, size=size, marker="sphere")
ipv.animation_control(s, interval=200)
ipv.ylim(-3,3)
ipv.show()
# This is commented out, otherwise it would run on readthedocs
# def set_view(figure, framenr, fraction):
# ipv.view(fraction*360, (fraction - 0.5) * 180, distance=2 + fraction*2)
# s.size = size * (2+0.5*np.sin(fraction * 6 * np.pi))
# ipv.movie('wave.gif', set_view, fps=20, frames=40)
import ipyvolume.datasets
stream = ipyvolume.datasets.animated_stream.fetch()
print("shape of steam data", stream.data.shape) # first dimension contains x, y, z, vx, vy, vz, then time, then particle
fig = ipv.figure()
# instead of doing x=stream.data[0], y=stream.data[1], ... vz=stream.data[5], use *stream.data
# limit to 50 timesteps to avoid having a huge notebook
q = ipv.quiver(*stream.data[:,0:50,:200], color="red", size=7)
ipv.style.use("dark") # looks better
ipv.animation_control(q, interval=200)
ipv.show()
# fig.animation = 0 # set to 0 for no interpolation
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic animation
Step2: You can control which array to visualize, using the scatter.sequence_index property. Actually, the pylab.animate_glyphs is connecting the Slider and Play button to that property, but you can also set it from Python.
Step3: Animating color and size
Step4: Now we only animate the z component
Step5: color is of the wrong shape, the last dimension should contain the rgb value, i.e. the shape of should be (15, 2500, 3)
Step6: Creating movie files
Step7: Resulting gif file
|
750 | <ASSISTANT_TASK:>
Python Code:
import sqlalchemy # pandas-mysql interface library
import sqlalchemy.exc # exception handling
from sqlalchemy import create_engine # needed to define db interface
import sys # for defining behavior under errors
import numpy as np # numerical libraries
import scipy as sp
import pandas as pd # for data analysis
import pandas.io.sql as sql # for interfacing with MySQL database
import matplotlib as mpl # a big library with plotting functionality
import matplotlib.pyplot as plt # a subset of matplotlib with most of the useful tools
import IPython as IP
%matplotlib inline
import pdb
#%qtconsole
pickle_dir = '../pickle_files/'
odds_file = 'odds.pkl'
matches_file = 'matches.pkl'
odds= pd.read_pickle(pickle_dir + odds_file)
matches= pd.read_pickle(pickle_dir + matches_file)
data = pd.merge(matches,odds[['PSW','PSL','key']],how='inner',on='key')
# name of database
db_name = "tennis"
# name of db user
username = "testuser"
# db password for db user
password = "test623"
# location of atp data files
atpfile_directory = "../data/tennis_atp-master/"
# focus on most recent data; exclude Davis Cup stuff
startdate = '20050101'
enddate = '20161231'
engine = create_engine('mysql+mysqldb://' + username + ':' + password + '@localhost/' + db_name)
# get unique winners and losers in our set
players = tuple(pd.concat((data.winner_id,data.loser_id)).unique())
# load all data pertinant for any player
with engine.begin() as connection:
matches_hist = pd.read_sql_query(SELECT * FROM matches \
WHERE tourney_date >= ' + startdate + ' \
AND tourney_date <= ' + enddate + ' \
AND (winner_id IN %(p)s \
OR loser_id IN %(p)s) \
AND tourney_name NOT LIKE 'Davis%%';,connection,params={'p':players})
matches_hist['key'] = np.arange(len(matches_hist))
# scores are just numbers, unless something weird happened. Extract comments about irregular outcomes.
t=matches_hist.score.str.extractall('(?P<comment>[a-zA-Z]+.+)').xs(0,level='match')
matches_hist = pd.merge(matches_hist,t,how='outer',left_index=True, right_index=True)
matches_hist.comment.unique()
# discard comments and trailing white space
scores = matches_hist.score.str.replace('(?P<comment>[a-zA-Z]+.+)','')
scores = scores.str.replace('(?P<comment>\([0-9]+\))','').str.strip()
# split the game scores into columns of a dataframe
scores = scores.str.split('-|\s',expand=True)
scores.columns=['W1','L1','W2','L2','W3','L3','W4','L4','W5','L5']
scores = scores.apply(lambda x: pd.to_numeric(x))
ngames = np.sum(scores,axis=1)
matches_hist.insert(0,'ngames',ngames.astype('int'))
# sanity check: are matches involving few than 12 games identical to those with commments?
idx1 = (ngames<12)
idx2 = matches_hist.comment.notnull()
z=(idx1*1)*(idx2*1-1)
zz = np.where(np.abs(z))[0]
print("matches with weird outcomes: ")
print(matches_hist.loc[zz,'score'])
matches_hist.insert(0,'w_fsp',matches_hist.w_1stIn/matches_hist.w_svpt)
matches_hist.insert(0,'l_fsp',matches_hist.l_1stIn/matches_hist.l_svpt)
matches_hist.loc[matches_hist.]
matches_hist.insert(0,'w_wfs',matches_hist.w_1stWon/matches_hist.w_svpt)
matches_hist.insert(0,'l_wfs',matches_hist.l_1stWon/matches_hist.l_svpt)
matches_hist.insert(0,'w_2ndIn',matches_hist.w_svpt-matches_hist.w_df-matches_hist.w_1stIn)
matches_hist.insert(0,'l_2ndIn',matches_hist.l_svpt-matches_hist.l_df-matches_hist.l_1stIn)
matches_hist.insert(0,'w_ssp',matches_hist.w_2ndIn/(matches_hist.w_2ndIn+matches_hist.w_df))
matches_hist.insert(0,'l_ssp',matches_hist.l_2ndIn/(matches_hist.l_2ndIn+matches_hist.l_df))
matches_hist.insert(0,'w_wss',matches_hist.w_2ndWon/matches_hist.w_2ndIn)
matches_hist.insert(0,'l_wss',matches_hist.l_2ndWon/matches_hist.l_2ndIn)
#matches_hist.insert(0,'w_wsp',(matches_hist.w_1stWon + matches_hist.w_2ndWon)/matches_hist.w_svpt)
#matches_hist.insert(0,'l_wsp',(matches_hist.l_1stWon+matches_hist.l_2ndWon)/matches_hist.l_svpt)
matches_hist['w_wsp']=(matches_hist.w_1stWon + matches_hist.w_2ndWon)/matches_hist.w_svpt
matches_hist['l_wsp']=(matches_hist.l_1stWon+matches_hist.l_2ndWon)/matches_hist.l_svpt
matches_hist.insert(0,'w_wrp',(matches_hist.l_svpt - matches_hist.l_1stWon \
- matches_hist.l_2ndWon)/(matches_hist.l_svpt))
matches_hist.insert(0,'l_wrp',(matches_hist.w_svpt - matches_hist.w_1stWon \
- matches_hist.w_2ndWon)/(matches_hist.w_svpt))
matches_hist.insert(0,'w_tpw',(matches_hist.l_svpt\
-matches_hist.l_1stWon-matches_hist.l_2ndWon\
+matches_hist.w_1stWon +matches_hist.w_2ndWon)/\
(matches_hist.l_svpt + matches_hist.w_svpt))
matches_hist.insert(0,'l_tpw',(matches_hist.w_svpt\
-matches_hist.w_1stWon-matches_hist.w_2ndWon\
+matches_hist.l_1stWon +matches_hist.l_2ndWon)/\
(matches_hist.l_svpt + matches_hist.w_svpt))
idx = np.where(((matches_hist.w_SvGms == 0)|(matches_hist.l_SvGms==0)) & (matches_hist.ngames >1))
print(matches_hist.loc[idx[0],['w_df','l_df','w_SvGms','l_SvGms','score','ngames']])
matches_hist.loc[idx[0],'w_SvGms'] = matches_hist.ngames[idx[0]]/2
matches_hist.loc[idx[0],'l_SvGms'] = matches_hist.ngames[idx[0]]/2
print(matches_hist.loc[idx[0],['w_df','l_df','w_SvGms','l_SvGms','score','ngames']])
matches_hist.insert(0,'w_dfpg',matches_hist.w_df/matches_hist.w_SvGms)
matches_hist.insert(0,'l_dfpg',matches_hist.l_df/matches_hist.l_SvGms)
#matches_hist['w_dfpg']=matches_hist.w_df/matches_hist.w_SvGms
#matches_hist['l_dfpg']=matches_hist.l_df/matches_hist.l_SvGms
matches_hist.insert(0,'w_acpg',matches_hist.w_ace/matches_hist.w_SvGms)
matches_hist.insert(0,'l_acpg',matches_hist.l_ace/matches_hist.l_SvGms)
#matches_hist['w_acpg']=matches_hist.w_ace/matches_hist.w_SvGms
#matches_hist['l_acpg']=matches_hist.l_ace/matches_hist.l_SvGms
matches_hist.insert(0,'w_bps',matches_hist.w_bpSaved/matches_hist.w_bpFaced)
matches_hist.insert(0,'l_bps',matches_hist.l_bpSaved/matches_hist.l_bpFaced)
matches_hist.insert(0,'retired',0)
matches_hist.loc[(matches_hist.comment=='RET'),'retired']=1
matches_hist.insert(0,'walkover',0)
matches_hist.loc[(matches_hist.comment=='W/O'),'walkover']=1
matches_hist.insert(0,'w_complete',matches_hist.w_wsp*matches_hist.w_wrp)
matches_hist.insert(0,'l_complete',matches_hist.l_wsp*matches_hist.l_wrp)
matches_hist.insert(0,'w_serveadv',matches_hist.w_wsp-matches_hist.l_wrp)
matches_hist.insert(0,'l_serveadv',matches_hist.l_wsp-matches_hist.w_wrp)
idx = matches_hist.comment.isnull()
labels = ['dfpg', 'acpg', 'tpw', 'wrp', 'wsp', 'wss', 'wfs', 'fsp', 'ssp', 'bps','complete']
for label in labels:
printstr = label + ": max for winner/loser is {:5.2f}/{:5.2f}, min for winner/loser is {:5.2f}/{:5.2f}"
v1 = eval('matches_hist.w_' + label + '[idx].max()')
v2 = eval('matches_hist.l_' + label + '[idx].max()')
v3 = eval('matches_hist.w_' + label + '[idx].min()')
v4 = eval('matches_hist.l_' + label + '[idx].min()')
print(printstr.format(v1,v2,v3,v4))
# extract winner stats
w_records = matches_hist[['winner_id',
'tourney_date',
'tourney_id',
'match_num',
'ngames',
'key',
'w_acpg', # avg. no of aces per game
'w_dfpg', # avg no. of double faults per game
'w_tpw', # total points won
'w_wrp', # wining return percent
'w_wsp', # winning service percent
'w_wss', # winning second serve percent
'w_wfs', # winning first serve percent
'w_fsp', # good first serves percent
'w_ssp', # good second serves percent
'w_bps', # breakpoints saved percent
'retired',# 1 if loser retired prematurely
'walkover', # 1 if loser didn't show up
'surface', # 'Hard', 'Clay', or 'Grass'
'winner_age', # age
'winner_ht', # height
'winner_rank', # rank
'winner_rank_points' # rank points
]]
# rename columns
newcols = {'winner_id':'pid',
'tourney_date':'date',
'tourney_id':'tid',
'match_num':'mid',
'ngames':'ngames',
'key':'key',
'w_acpg':'acpg',
'w_dfpg':'dfpg',
'w_tpw':'tpw',
'w_wrp':'wrp',
'w_wsp':'wsp',
'w_wss':'wss',
'w_wfs':'wfs',
'w_fsp':'fsp',
'w_ssp':'ssp',
'w_bps':'bps',
'retired':'retired',
'walkover':'walkover',
'surface':'surface',
'winner_age':'age',
'winner_ht':'ht',
'winner_rank':'rank',
'winner_rank_points':'rank_points'
}
w_records = w_records.rename(columns = newcols)
# record that the outcome was a victory for these players
w_records['outcome'] = np.ones(len(w_records))
# extract loser stats
l_records = matches_hist[['loser_id',
'tourney_date',
'tourney_id',
'match_num',
'ngames',
'key',
'l_acpg', # avg. no of aces per game
'l_dfpg', # avg no. of double faults per game
'l_tpw', # total points won
'l_wrp', # wining return percent
'l_wsp', # winning service percent
'l_wss', # winning second serve percent
'l_wfs', # winning first serve percent
'l_fsp', # percent of successful first serves
'l_ssp', # percent of successful second serves
'l_bps', # percent of breakpoints saved
'retired',# 1 if loser retired prematurely
'walkover',# 1 if loser didn't show up
'surface', # 'Hard', 'Clay', or 'Grass'
'loser_age', # age
'loser_ht', # height
'loser_rank', # rank
'loser_rank_points' # rank points
]]
# rename columns
newcols = {'loser_id':'pid',
'tourney_date':'date',
'tourney_id':'tid',
'match_num':'mid',
'ngames':'ngames',
'key':'key',
'l_acpg':'acpg',
'l_dfpg':'dfpg',
'l_tpw':'tpw',
'l_wrp':'wrp',
'l_wsp':'wsp',
'l_wss':'wss',
'l_wfs':'wfs',
'l_fsp':'fsp',
'l_ssp':'ssp',
'l_bps':'bps',
'retired':'retired',
'walkover':'walkover',
'surface':'surface',
'loser_age':'age',
'loser_ht':'ht',
'loser_rank':'rank',
'loser_rank_points':'rank_points'
}
l_records = l_records.rename(columns = newcols)
# record outcome as a loss
l_records['outcome'] = np.zeros(len(w_records))
# fuse all the data into one dataframe
all_records = pd.concat([w_records,l_records]).reset_index().sort_values(['key']).replace(np.inf,np.nan)
grouped = all_records.groupby(['pid','surface'])
t=grouped['outcome'].mean()
surf_wt = t.unstack(level=-1).corr()
surf_wt
def get_static_features(data):
Description: returns differences of those features that don't depend on match histories.
(Rank, Rankpoints, Height, Hand)
Input: dataframe with all the match data for which features are to be calculated
Output: another dataframe of the same length but only four columns, each with one feature
# boolean, 1 means (winner,loser) are (player1,player2), 0 means the reverse
outcome = data['outcome']
# features dataframe should include merge identifiers
features = data[['tourney_id', 'match_num','tourney_date','key']].copy()
# rank (normalize)
rank=(data.loser_rank-data.winner_rank)*(-1)**outcome
features.insert(0,'rank',rank/rank.std())
# rank points (normalize)
rankpts = (data.loser_rank_points-data.winner_rank_points)*(-1)**outcome
features.insert(0,'rankpts',rankpts/rankpts.std())
# height (normalize)
height = (data.loser_ht-data.winner_ht)*(-1)**outcome
features.insert(0,'height',height/height.std())
# age (normalize)
height = (data.loser_age-data.winner_age)*(-1)**outcome
features.insert(0,'age',height/height.std())
# hand (1 for right, 0 for left)
hand = ((data.loser_hand=='R')*1-(data.winner_hand=='R')*1)*(-1)**outcome
hand.iloc[np.where((data['winner_hand']=='U')|\
(data['loser_hand']=='U'))[0]]=np.nan
features.insert(0,'hand',hand)
return features
def get_dynamic_features(x):
Input: a row of the dataframe. Needs to have the following fields:
pid (player id number)
tid (tournament id number)
mid (match id number)
date (match date)
surface (match surface)
Output: a dataframe of two columns, one a time discount, the other a surface discount
# extract identifiers and date from input row
pid = x['pid'] # player id
tid = x['tid'] # tourney id
mid = x['mid'] # match id
date = x['date']
surface = x['surface']
# extract all historical records for this player, from before this match
records = all_records.loc[(all_records.pid==pid) & (all_records.date <= date) &\
((all_records.tid != tid) | (all_records.mid != mid)),:].copy()
# get time discount factor
p = 0.8
t = (date - records.date).apply(lambda x: x.days/365)
t_wt = p**t
t_wt.loc[t_wt>p]=p
# get surface discount factor
s_wt = records.surface.apply(lambda x: surf_wt.loc[x,surface])
# get time and court weighted averages of serve and performance stats
t = records[['dfpg','acpg','tpw','wrp','wsp',\
'wss','wfs','fsp','ssp','bps']].mul(t_wt*s_wt,axis=0).sum(axis=0)/\
records[['dfpg','acpg','tpw','wrp','wsp',\
'wss','wfs','fsp','ssp','bps']].notnull().mul(t_wt*s_wt,axis=0).sum(axis=0)
if len(records)==0:
t['complete']=np.nan
t['retired']=np.nan
return t
# get player completeness
t['complete'] = t['wsp']*t['wrp']
# get player serveadvantage
t['serveadv'] = t['wsp']+t['wrp']
# get player "return from retirement" status
t['retired'] = records.loc[records.date==records.date.min(),'retired'].values[0]
# return a series
return t
def dynamic_feature_wrapper(x):
calls "get_dynamic_features" to extract dynamic features for each player
pids = x[['lid','wid']]
y = x.copy()
# get Player1 info
y['pid'] = pids[y['outcome']]
P1_features = get_dynamic_features(y)
# get Player0 info
y['pid'] = pids[1-y['outcome']]
P2_features = get_dynamic_features(y)
# features are differences
features = P1_features - P2_features
# compute service advantage
features['serveadv'] =
return
data['outcome']=np.random.choice([0,1],size=len(features))
s_features=get_static_features(data)
s_features
x=data[['tourney_id','match_num','tourney_date','key','winner_id','loser_id','surface','outcome']].copy()
x.rename(columns={'tourney_id':'tid','match_num':'mid','tourney_date':'date',\
'winner_id':'wid','loser_id':'lid'},inplace=True)
x.iloc[0:5,:].apply(dynamic_feature_wrapper,axis=1)
#x.iloc[0:5,:]
y = matches_hist.iloc[15000]
dynamic_feature_wrapper(y)
x = all_records.iloc[13002]
records = get_dynamic_features(x)
records
# initialize dataframes to hold features for players 1 and 0
P1 = pd.DataFrame(columns=['DATE','TID','MID','PID','HAND','HT',\
'AGE','RANKPTS','RANK','ACE','DF','SVPT',\
'FSTIN','FSTWON','SNDWON','BPSAVED','BPFACED'])
P0 = pd.DataFrame(columns=['DATE','TID','MID','PID','HAND','HT',
'AGE','RANKPTS','RANK','ACE','DF','SVPT',\
'FSTIN','FSTWON','SNDWON','BPSAVED','BPFACED'])
# define a function that returns winner info if RES=1, otherwise loser info
def assign_player_1(x):
winner = pd.Series({'DATE':x['tourney_date'],\
'TID':x['tourney_id'],\
'MID':x['match_num'],\
'PID':x['winner_id'],\
'HAND':x['winner_hand'],\
'HT':x['winner_ht'],\
'AGE':x['winner_age'],\
'RANKPTS':x['winner_rank_points'],\
'RANK':x['winner_rank'],\
'ACE':x['w_ace'],\
'DF':x['w_df'],\
'SVPT':x['w_svpt'],\
'FSTIN':x['w_1stIn'],\
'FSTWON':x['w_1stWon'],\
'SNDWON':x['w_2ndWon'],\
'BPSAVED':x['w_bpSaved'],\
'BPFACED':x['w_bpFaced']})
loser = pd.Series({'DATE':x['tourney_date'],\
'TID':x['tourney_id'],\
'MID':x['match_num'],\
'PID':x['loser_id'],\
'HAND':x['loser_hand'],\
'HT':x['loser_ht'],\
'AGE':x['loser_age'],\
'RANKPTS':x['loser_rank_points'],\
'RANK':x['loser_rank'],\
'ACE':x['l_ace'],\
'DF':x['l_df'],\
'SVPT':x['l_svpt'],\
'FSTIN':x['l_1stIn'],\
'FSTWON':x['l_1stWon'],\
'SNDWON':x['l_2ndWon'],\
'BPSAVED':x['l_bpSaved'],\
'BPFACED':x['l_bpFaced']})
if x['RES']==1:
return winner
else:
return loser
# mutatis mutandis for player 0. (Note: no need to rewrite this function if I can figure
# out how to assign two outputs within an "apply" call.)
def assign_player_0(x):
winner = pd.Series({'DATE':x['tourney_date'],\
'TID':x['tourney_id'],\
'MID':x['match_num'],\
'PID':x['winner_id'],\
'HAND':x['winner_hand'],\
'HT':x['winner_ht'],\
'AGE':x['winner_age'],\
'RANKPTS':x['winner_rank_points'],\
'RANK':x['winner_rank'],\
'ACE':x['w_ace'],\
'DF':x['w_df'],\
'SVPT':x['w_svpt'],\
'FSTIN':x['w_1stIn'],\
'FSTWON':x['w_1stWon'],\
'SNDWON':x['w_2ndWon'],\
'BPSAVED':x['w_bpSaved'],\
'BPFACED':x['w_bpFaced']})
loser = pd.Series({'DATE':x['tourney_date'],\
'TID':x['tourney_id'],\
'MID':x['match_num'],\
'PID':x['loser_id'],\
'HAND':x['loser_hand'],\
'HT':x['loser_ht'],\
'AGE':x['loser_age'],\
'RANKPTS':x['loser_rank_points'],\
'RANK':x['loser_rank'],\
'ACE':x['l_ace'],\
'DF':x['l_df'],\
'SVPT':x['l_svpt'],\
'FSTIN':x['l_1stIn'],\
'FSTWON':x['l_1stWon'],\
'SNDWON':x['l_2ndWon'],\
'BPSAVED':x['l_bpSaved'],\
'BPFACED':x['l_bpFaced']})
if x['RES']==1:
return loser
else:
return winner
matches_hist.insert(len(matches_hist.columns),'RES',features['RES'].values)
P1=matches_hist.apply(assign_player_1,axis=1)
P0=matches_hist.apply(assign_player_0,axis=1)
features.insert(len(features.columns), 'RANKPTS', P1['RANKPTS']-P0['RANKPTS'])
features.insert(len(features.columns), 'RANK', P1['RANK']-P0['RANK'])
features['RANKPTS'] = features['RANKPTS']/features['RANKPTS'].std()
features['RANK'] = features['RANK']/features['RANK'].std()
# define figure and axes
fig = plt.figure(figsize=(15,5))
ax0 = fig.add_subplot(121)
ax1 = fig.add_subplot(122)
ax0.hist(features.RANK.dropna())
ax0.set_title('Diff. in rank')
ax1.hist(features.RANKPTS.dropna())
ax1.set_title('Diff in rank pts')
P1.insert(len(P1.columns),'FSWPCT',P1['FSTWON']/P1['SVPT'])
P0.insert(len(P0.columns),'FSWPCT',P0['FSTWON']/P0['SVPT'])
P1_grouped = P1.groupby('PID')
P0_grouped = P0.groupby('PID')
def extract_features(group):
mean_fswpct = group['FSWPCT'].mean()
size = len(group)
return pd.Series({'mean_fswpct':mean_fswpct,'size':size})
t1=P1_grouped.apply(extract_features).reset_index()
t0=P0_grouped.apply(extract_features).reset_index()
t2 = pd.merge(t1,t0,how='outer',on='PID')
t2 = t2.fillna(0)
t2['FSWPCT_HIST'] = (t2['mean_fswpct_x']*t2['size_x'] +\
t2['mean_fswpct_y']*t2['size_y'])/(t2['size_x']+t2['size_y'])
P1=pd.merge(P1,t2[['PID','FSWPCT_HIST']],how='inner',on='PID')
P0=pd.merge(P0,t2[['PID','FSWPCT_HIST']],how='inner',on='PID')
features['FSWPCT']=P1['FSWPCT']-P0['FSWPCT']
plt.hist(features.FSWPCT.dropna())
plt.title('Diff. in first serve winning percentages')
features['HT'] = P1['HT']-P2['HT']
features['HT'] = features['HT']/features['HT'].std()
plt.hist(features.HT.dropna())
plt.title('Difference in height')
features['AGE'] = P1['AGE']-P2['AGE']
features['AGE'] = features['AGE']/features['AGE'].std()
plt.hist(features.AGE.dropna())
plt.title('Difference in age')
P1=pd.merge(P1,t2[['PID','SSWPCT_HIST']],how='inner',on='PID')
P0=pd.merge(P0,t2[['PID','SSWPCT_HIST']],how='inner',on='PID')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Player and odds data from 2010-2016 has beeen matched and stored. Retrieve, merge, and rename.
Step5: Get additional training data. We'll include data from 2005, excluding Davis Cup matches, and focusing exclusively on players who played in our 2010-2016 set.
Step6: II. Calculate match statistics
Step7: Extract text string indicating unusual match outcomes
Step8: Calculate games scores for each set, store in a separate dataframe
Step9: Store the number of games played in each match.
Step10: It seems a few matches were cut short for no recorded reason
Step11: Calcuate first serve percentages (fsp) for both winners and losers.
Step12: Calculate winning first serve percentages (wfs)**
Step13: Calculate second serves in (2ndIn) for both winners and losers
Step14: Calculate second serve (ssp)* percentages *
Step15: Calculate wining second serve percentages (wss)**
Step16: Calculate overall win on serve percentages (wsp)**
Step17: Calculate winning on return percentages (wrp).
Step18: Calculate total points won percentage (tpw)**
Step19: Calculate double faults per game (dfpg), per game
Step20: Calculate aces per game (acpg), per game
Step21: Calculate break points saved percentage (bpsp)**
Step22: Flag games with premature closure, probably due to injury (retired)**
Step23: Flag games won as a walkover (wo)**
Step24: Calculate player completeness (complete), defined as
Step25: Calculate player service advantage (serveadv), defined as
Step26: Sanity check
Step27: III. Calculate features
Step28: Calculate surface weighting matrix. Note that the resulting values are quite different from Sipko's.
Step30: Function to calculate static features. (Specifically, calculate normalized rank, rankpts, age, height, hand features for every match in the dataset. Operates on the data as a whole, rather than by line.)
Step33: Get dynamic features (i.e. all features that require some time averaging.)
Step34: It will be help with indexing if we isolate all relevant features for players 1 and 0 into their own dataframes. This is what the following code does.
Step35: Features I and II
Step36: Feature III
Step37: Feature 4
Step38: Feature 5
Step39: Future Work
|
751 | <ASSISTANT_TASK:>
Python Code:
#Get info of the dataset once reduced to the trust variable
#Slice the dataframe to the people trust variable
ppltrust = raw_data[['cntry','cntry_year','ppltrst']]
#Info
ppltrust.info()
#Clean the values in the dataframe that are null
ppltrust_clean = ppltrust[ppltrust.ppltrst.notnull()]
#Check the remaining values
ppltrust_clean.info()
#Check the sizes of data by country
ppltrust_clean.groupby('cntry_year').size()
#Print the dataset for visual inspection
ppltrust_clean.groupby('cntry_year').hist(sharey=True,sharex=True)
#Build the lists for each country each year
T_CH_6 = ppltrust_clean[ppltrust_clean.cntry_year == 'CH_6']['ppltrst'].tolist()
T_CH_7 = ppltrust_clean[ppltrust_clean.cntry_year == 'CH_7']['ppltrst'].tolist()
T_CZ_6 = ppltrust_clean[ppltrust_clean.cntry_year == 'CZ_6']['ppltrst'].tolist()
T_CZ_7 = ppltrust_clean[ppltrust_clean.cntry_year == 'CZ_7']['ppltrst'].tolist()
T_DE_6 = ppltrust_clean[ppltrust_clean.cntry_year == 'DE_6']['ppltrst'].tolist()
T_DE_7 = ppltrust_clean[ppltrust_clean.cntry_year == 'DE_7']['ppltrst'].tolist()
T_ES_6 = ppltrust_clean[ppltrust_clean.cntry_year == 'ES_6']['ppltrst'].tolist()
T_ES_7 = ppltrust_clean[ppltrust_clean.cntry_year == 'ES_7']['ppltrst'].tolist()
T_NO_6 = ppltrust_clean[ppltrust_clean.cntry_year == 'NO_6']['ppltrst'].tolist()
T_NO_7 = ppltrust_clean[ppltrust_clean.cntry_year == 'NO_7']['ppltrst'].tolist()
T_SE_6 = ppltrust_clean[ppltrust_clean.cntry_year == 'SE_6']['ppltrst'].tolist()
T_SE_7 = ppltrust_clean[ppltrust_clean.cntry_year == 'SE_7']['ppltrst'].tolist()
#Build the arrays for comparison
trust = np.asarray(T_CH_6 + T_CH_7+
T_CZ_6 + T_CZ_7+
T_DE_6 + T_DE_7+
T_ES_6 + T_ES_7+
T_NO_6 + T_NO_7+
T_SE_6 + T_SE_7)
group = np.array(['T_CH_6','T_CH_7',
'T_CZ_6','T_CZ_7',
'T_DE_6','T_DE_7',
'T_ES_6','T_ES_7',
'T_NO_6','T_NO_7',
'T_SE_6','T_SE_7'])
group = np.repeat(group, len(trust)/12)
#Run the pairwise t-test using tukey hsd
tukey = pairwise_tukeyhsd(endog=trust, # Data
groups=group, # Groups
alpha=0.05) # Significance level
#Summarize the data
tukey.summary()
#Get info of the dataset once reduced to the trust variable
#Slice the dataframe to the people happiness variable
pplhappy = raw_data[['cntry','cntry_year','happy']]
#Get info about the data
pplhappy.info()
#Clean the values in the dataframe that are null
pplhappy_clean = pplhappy[pplhappy.happy.notnull()]
remove_n = 7
drop_indices = np.random.choice(pplhappy_clean.index, remove_n, replace=False)
pplhappy_clean = pplhappy_clean.drop(drop_indices)
#Check the remaining values
pplhappy_clean.info()
#Check the sizes of data by country
pplhappy_clean.groupby('cntry_year').size()
#Plot happy variables for visual inspection per country and year
pplhappy_clean.groupby('cntry_year').hist(sharey=True,sharex=True)
#Build the lists for each country each year
H_CH_6 = pplhappy_clean[pplhappy_clean.cntry_year == 'CH_6']['happy'].tolist()
H_CH_7 = pplhappy_clean[pplhappy_clean.cntry_year == 'CH_7']['happy'].tolist()
H_CZ_6 = pplhappy_clean[pplhappy_clean.cntry_year == 'CZ_6']['happy'].tolist()
H_CZ_7 = pplhappy_clean[pplhappy_clean.cntry_year == 'CZ_7']['happy'].tolist()
H_DE_6 = pplhappy_clean[pplhappy_clean.cntry_year == 'DE_6']['happy'].tolist()
H_DE_7 = pplhappy_clean[pplhappy_clean.cntry_year == 'DE_7']['happy'].tolist()
H_ES_6 = pplhappy_clean[pplhappy_clean.cntry_year == 'ES_6']['happy'].tolist()
H_ES_7 = pplhappy_clean[pplhappy_clean.cntry_year == 'ES_7']['happy'].tolist()
H_NO_6 = pplhappy_clean[pplhappy_clean.cntry_year == 'NO_6']['happy'].tolist()
H_NO_7 = pplhappy_clean[pplhappy_clean.cntry_year == 'NO_7']['happy'].tolist()
H_SE_6 = pplhappy_clean[pplhappy_clean.cntry_year == 'SE_6']['happy'].tolist()
H_SE_7 = pplhappy_clean[pplhappy_clean.cntry_year == 'SE_7']['happy'].tolist()
#Build the arrays for comparison
happy = np.asarray(H_CH_6 + H_CH_7 +
H_CZ_6 + H_CZ_7 +
H_DE_6 + H_DE_7 +
H_ES_6 + H_ES_7 +
H_NO_6 + H_NO_7 +
H_SE_6 + H_SE_7)
group_happy = np.array(['H_CH_6', 'H_CH_7',
'H_CZ_6', 'H_CZ_7',
'H_DE_6', 'H_DE_7',
'H_ES_6', 'H_ES_7',
'H_NO_6', 'H_NO_7',
'H_SE_6', 'H_SE_7'])
group_happy = np.repeat(group_happy, len(happy)/12)
#Run the pairwise t-test using tukey hsd
tukey = pairwise_tukeyhsd(endog=happy, # Data
groups=group_happy, # Groups
alpha=0.05) # Significance level
#Summarize the data
tukey.summary()
#Get info of the dataset once reduced to the trust variable
#Slice the dataframe to the people trust variable
tv_total = raw_data[['gndr','year','tvtot']]
#Info
tv_total.info()
#Slice by the year 2012
tv_total_2012 = tv_total[tv_total.year == 6]
#Drop year
tv_total_2012bis = tv_total_2012.drop('year',axis = 1)
#Get information of the new dataframe
tv_total_2012bis.info()
#Clean the values in the dataframe that are null (gender)
tv_total_gndr_cl = tv_total_2012bis[tv_total_2012bis.gndr.notnull()]
#Clean the values in the dataframe that are null (total hours)
tv_total_cle = tv_total_gndr_cl[tv_total_gndr_cl.tvtot.notnull()]
#Reset index
tv_total_clea = tv_total_cle.reset_index()
#Drop old index
tv_total_clean = tv_total_clea.drop('index',axis = 1)
#Check Values
tv_total_clean.info()
#Downsample majority class (due to computational restrictions we downsample the majority instead of upsampling the minority)
# Separate majority and minority classes
tv_total_clean_majority = tv_total_clean[tv_total_clean.gndr == 1.0]
tv_total_clean_minority = tv_total_clean[tv_total_clean.gndr == 2.0]
# Downsample the majority
tv_total_clean_majority_downsampled = resample(tv_total_clean_majority, replace=False, n_samples=2140, random_state=123)
# Combine minority class with downsampled majority class
tv_total_complete = pd.concat([tv_total_clean_majority_downsampled, tv_total_clean_minority])
# Display new class counts
tv_total_complete.gndr.value_counts()
#Plot happy variables for visual inspection per country and year
tv_total_complete.groupby('gndr').hist(sharey=True,sharex=True)
#Build a new dataframe with the total tv hours per gender
tv_total_reordered = pd.DataFrame()
#Create lists splitting between men and women the total tv watching hours
men = list(tv_total_complete[tv_total_complete.gndr == 1.0]['tvtot'])
women = list(tv_total_complete[tv_total_complete.gndr == 2.0]['tvtot'])
#Build columns and add to the dataframe
tv_total_reordered['men'] = men
tv_total_reordered['women'] = women
# Kruskal-Wallace Test
# Join all ratings together into a list, then ranking them.
ranks = stats.rankdata(
tv_total_reordered['men'].tolist() +
tv_total_reordered['women'].tolist())
# Add the new ranked variables to the data frame.
tv_total_reordered['Ranks_men'] = ranks[0:2140]
tv_total_reordered['Ranks_women'] = ranks[2140:4280]
# Average rank per group.
groups = ['Ranks_men', 'Ranks_women']
print(tv_total_reordered[groups].apply(np.mean))
# Overall average rank.
print(np.mean(ranks))
# Print the test statistic followed by the probability of getting this result
# if the groups were not different.
stats.kruskal(
tv_total_reordered['men'],
tv_total_reordered['women'])
#Get info of the dataset once reduced to the trust variable
#Slice the dataframe to the people trust variable
pplfairness = raw_data[['partner','year','pplfair']]
#Info
pplfairness.info()
#Slice by the year 2012
pplfairness_2012 = pplfairness[pplfairness.year == 6]
#Drop year
pplfairness_2012bis = pplfairness_2012.drop('year',axis = 1)
#Get information of the new dataframe
pplfairness_2012bis.info()
#Clean the values in the dataframe that are null (partner)
pplfairness_2012bis_partner_cl = pplfairness_2012bis[pplfairness_2012bis.partner.notnull()]
#Clean the values in the dataframe that are null (total hours)
pplfairness_cle = pplfairness_2012bis_partner_cl[pplfairness_2012bis_partner_cl.pplfair.notnull()]
#Reset index
pplfairness_clea = pplfairness_cle.reset_index()
#Drop old index
pplfairness_clean = pplfairness_clea.drop('index',axis = 1)
#Check Values
pplfairness_clean.info()
#Downsample majority class (due to computational restrictions we downsample the majority instead of upsampling the minority)
# Separate majority and minority classes
pplfairness_clean_majority = pplfairness_clean[pplfairness_clean.partner == 1.0]
pplfairness_clean_minority = pplfairness_clean[pplfairness_clean.partner == 2.0]
# Downsample the majority
pplfairness_clean_majority_downsampled = resample(pplfairness_clean_majority, replace=False, n_samples=1608, random_state=123)
# Combine minority class with downsampled majority class
pplfairness_complete = pd.concat([pplfairness_clean_majority_downsampled, pplfairness_clean_minority])
# Display new class counts
pplfairness_complete.partner.value_counts()
#Plot happy variables for visual inspection per country and year
pplfairness_complete.groupby('partner').hist(sharey=True,sharex=True)
#Build a new dataframe with the total tv hours per gender
pplfairness_reordered = pd.DataFrame()
#Create lists splitting between men and women the total tv watching hours
wpartner = list(pplfairness_complete[pplfairness_complete.partner == 1.0]['pplfair'])
alone = list(pplfairness_complete[pplfairness_complete.partner == 2.0]['pplfair'])
#Build columns and add to the dataframe
pplfairness_reordered['wpartner'] = wpartner
pplfairness_reordered['alone'] = alone
# Printing the means for each group.
print(pplfairness_reordered.mean())
pplfairness_reordered.boxplot()
plt.show()
F, p = stats.f_oneway(
pplfairness_reordered['wpartner'],
pplfairness_reordered['alone'])
# The F statistic.
print(F)
# The probability. A p < .05 would lead us to believe the group means were
# not all similar in the population.
print(p)
#Run a one way ANOVA test on the groups
print(stats.ttest_ind(pplfairness_reordered['wpartner'], pplfairness_reordered['alone']))
#Tukey HSD test
#Build the arrays for comparison
fair = np.asarray(wpartner + alone)
group_fair = np.array(['wpartner', 'alone'])
group_fair = np.repeat(group_fair, len(fair)/2)
#Run the pairwise t-test using tukey hsd
tukey = pairwise_tukeyhsd(endog=fair, # Data
groups=group_fair, # Groups
alpha=0.05) # Significance level
#Summarize the data
tukey.summary()
#Get info of the dataset once reduced to the trust variable
#Slice the dataframe to the people trust variable
meet = raw_data[['cntry','year','sclmeet']]
#Info
meet.info()
#Slice by the year 2014
meet_2014 = meet[meet.year == 7]
#Drop year
meet_2014bis = meet_2014.drop('year',axis = 1)
#Get information of the new dataframe
meet_2014bis.info()
#Clean the values in the dataframe that are null (country)
meet_2014bis_cntr_cl = meet_2014bis[meet_2014bis.cntry.notnull()]
#Clean the values in the dataframe that are null (social meet)
meet_cle = meet_2014bis_cntr_cl[meet_2014bis_cntr_cl.sclmeet.notnull()]
#Reset index
meet_clea = meet_cle.reset_index()
#Drop old index
meet_clean = meet_clea.drop('index',axis = 1)
#Check Values
meet_clean.info()
#SLice the dataframe for three countries
meet_final = meet_clean.loc[meet_clean['cntry'].isin(['ES','SE','CH'])]
#Plot happy variables for visual inspection per country and year
meet_final.groupby('cntry').hist(sharey=True,sharex=True)
#Downsample majority class (due to computational restrictions we downsample the majority instead of upsampling the minority)
# Separate majority and minority classes
meet_final_majority_1 = meet_final[meet_final.cntry == 'ES']
meet_final_majority_2 = meet_final[meet_final.cntry == 'SE']
meet_final_minority = meet_final[meet_final.cntry == 'CH']
# Downsample the majority
meet_final_majority_1_downsampled = resample(meet_final_majority_1, replace=False, n_samples=772, random_state=123)
meet_final_majority_2_downsampled = resample(meet_final_majority_2, replace=False, n_samples=772, random_state=123)
# Combine minority class with downsampled majority class
meet_complete = pd.concat([meet_final_majority_1_downsampled, meet_final_majority_2_downsampled, meet_final_minority])
# Display new class counts
meet_complete.cntry.value_counts()
#Build a new dataframe with the total tv hours per gender
meet_reordered = pd.DataFrame()
#Create lists splitting between countries
ES = list(meet_complete[meet_complete.cntry == 'ES']['sclmeet'])
SE = list(meet_complete[meet_complete.cntry == 'SE']['sclmeet'])
CH = list(meet_complete[meet_complete.cntry == 'CH']['sclmeet'])
#Build columns and add to the dataframe
meet_reordered['SE'] = SE
meet_reordered['ES'] = ES
meet_reordered['CH'] = CH
# Kruskal-Wallace Test
# Join all ratings together into a list, then ranking them.
ranks = stats.rankdata(
meet_reordered['ES'].tolist() +
meet_reordered['SE'].tolist() +
meet_reordered['CH'].tolist())
# Add the new ranked variables to the data frame.
meet_reordered['Ranks_ES'] = ranks[0:772]
meet_reordered['Ranks_SE'] = ranks[772:1544]
meet_reordered['Ranks_CH'] = ranks[1544:2316]
# Average rank per group.
groups = ['Ranks_ES', 'Ranks_SE', 'Ranks_CH']
print(meet_reordered[groups].apply(np.mean))
# Overall average rank.
print(np.mean(ranks))
# Print the test statistic followed by the probability of getting this result
# if the groups were not different.
stats.kruskal(
meet_reordered['ES'],
meet_reordered['SE'],
meet_reordered['CH'])
#Get info of the dataset once reduced to the trust variable
#Slice the dataframe to the people trust variable
social = raw_data[['cntry','year', 'agea', 'sclact']]
#Info
social.info()
#Slice by the year 2014
social_2014 = social[social.year == 7]
#Drop year
social_2014bis = social_2014.drop('year',axis = 1)
#Get information of the new dataframe
social_2014bis.info()
#Clean the values in the dataframe that are null (country)
social_2014bis_cntr_cl = social_2014bis[meet_2014bis.cntry.notnull()]
#Clean the values in the dataframe that are null (social activities)
social_cle = social_2014bis_cntr_cl[social_2014bis_cntr_cl.sclact.notnull()]
#Reset index
social_clea = social_cle.reset_index()
#Drop old index
social_clean = social_clea.drop('index',axis = 1)
#Check Values
social_clean.info()
#SLice the dataframe for three countries
social_complete = social_clean.loc[social_clean['cntry'].isin(['ES','SE','CH'])]
#Count values per country
social_complete.cntry.value_counts()
#Plot happy variables for visual inspection per country and year
social_complete.groupby('cntry').hist(sharey=False,sharex=False)
#Create lists gourping by age interval and country Spain: <20, between 20 and 40, between 40 and 60, more than 60
ES_1 = list(social_complete[(social_complete.cntry == 'ES') & (social_complete.agea <= 20)]['sclact'])
ES_2 = list(social_complete[(social_complete.cntry == 'ES') & (social_complete.agea > 20)& (social_complete.agea <= 40)]['sclact'])
ES_3 = list(social_complete[(social_complete.cntry == 'ES') & (social_complete.agea > 40)& (social_complete.agea <= 60)]['sclact'])
ES_4 = list(social_complete[(social_complete.cntry == 'ES') & (social_complete.agea > 60) ]['sclact'])
#Create lists gourping by age interval and country Sweden: <20, between 20 and 40, between 40 and 60, more than 60
SE_1 = list(social_complete[(social_complete.cntry == 'SE') & (social_complete.agea <= 20)]['sclact'])
SE_2 = list(social_complete[(social_complete.cntry == 'SE') & (social_complete.agea > 20)& (social_complete.agea <= 40)]['sclact'])
SE_3 = list(social_complete[(social_complete.cntry == 'SE') & (social_complete.agea > 40)& (social_complete.agea <= 60)]['sclact'])
SE_4 = list(social_complete[(social_complete.cntry == 'SE') & (social_complete.agea > 60) ]['sclact'])
#Create lists gourping by age interval and country Switzerland: <20, between 20 and 40, between 40 and 60, more than 60
CH_1 = list(social_complete[(social_complete.cntry == 'CH') & (social_complete.agea <= 20)]['sclact'])
CH_2 = list(social_complete[(social_complete.cntry == 'CH') & (social_complete.agea > 20)& (social_complete.agea <= 40)]['sclact'])
CH_3 = list(social_complete[(social_complete.cntry == 'CH') & (social_complete.agea > 40)& (social_complete.agea <= 60)]['sclact'])
CH_4 = list(social_complete[(social_complete.cntry == 'CH') & (social_complete.agea > 60) ]['sclact'])
#Tukey hsd
#Build the arrays for comparison
social= np.asarray(ES_1 + ES_2 + ES_3 + ES_4 +
SE_1 + SE_2 + SE_3 + SE_4 +
CH_1 + CH_2 + CH_3 + CH_4)
group_social = np.array(['ES_1', 'ES_2', 'ES_3', 'ES_4',
'SE_1', 'SE_2', 'SE_3', 'SE_4',
'CH_1', 'CH_2', 'CH_3', 'CH_4'])
group_social = np.repeat(group_social, len(social)/12)
#Run the pairwise t-test using tukey hsd
tukey = pairwise_tukeyhsd(endog=social, # Data
groups=group_social, # Groups
alpha=0.05) # Significance level
#Summarize the data
tukey.summary()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: All countries except for Denmark seem to follow a normal distribution
Step2: In this case we have run all the comparisons between countries. We are assuming that all of them follow a normal distribution.
Step3: In this case we have run all the comparisons between countries. We are assuming that all of them follow a normal distribution.
Step4: The distribution of total tv watching hours is non parametric as it can be appreciated when plotting the hours by gender for 2012.
Step5: It appears that the groups do not differ significantly. The test says there is a 43.8% chance of getting our data if there were no systematic difference between in the total tv watching hours per gender in the population. Based on our data, tv watching hours doesn't seem to differ systematically across gender.
Step6: Distributions are considered to be normal although skewed to the left
Step7: It appears that the two groups of people (in 2012) living with a partner or alone are likely to differ in the population (p < .005). Visual inspection suggests that the people living alone believe that people are not fair compared to the ones living with their partner. To test whether the people living alone is the odd one out, we compare each group to each other.
Step8: In the Tukey HSD test the null hypothesis is that all means being compared are from the same population.
Step9: It appears that the groups do differ significantly. The test says there is a less than 0.05% chance of getting our data if there were no systematic difference between in the social meetings between countries. Based on our data, social meeting seem to differ systematically across countries.
|
752 | <ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
type(mnist)
type(mnist.train.images)
#mnist.train.images[0]
mnist.train.images[2].shape
sample = mnist.train.images[2].reshape(28,28)
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(sample)
# Parameters
learning_rate = 0.001
training_epochs = 150
batch_size = 100
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
n_samples = mnist.train.num_examples
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
def multilayer_perceptron(x, weights, biases):
'''
x : Place Holder for Data Input
weights: Dictionary of weights
biases: Dicitionary of biases
'''
# First Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Second Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Last Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=x))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
Xsamp,ysamp = mnist.train.next_batch(1)
plt.imshow(Xsamp.reshape(28,28))
# Remember indexing starts at zero!
print(ysamp)
# Launch the session
sess = tf.InteractiveSession()
# Intialize all the variables
sess.run(init)
# Training Epochs
# Essentially the max amount of loops possible before we stop
# May stop earlier if cost/loss limit was set
for epoch in range(training_epochs):
# Start with cost = 0.0
avg_cost = 0.0
# Convert total number of batches to integer
total_batch = int(n_samples/batch_size)
# Loop over all batches
for i in range(total_batch):
# Grab the next batch of training data and labels
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Feed dictionary for optimization and loss value
# Returns a tuple, but we only need 'c' the cost
# So we set an underscore as a "throwaway"
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
# Compute average loss
avg_cost += c / total_batch
print("Epoch: {} cost={:.4f}".format(epoch+1,avg_cost))
print("Model has completed {} Epochs of Training".format(training_epochs))
# Test model
correct_predictions = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
print(correct_predictions[0])
correct_predictions = tf.cast(correct_predictions, "float")
print(correct_predictions[0])
accuracy = tf.reduce_mean(correct_predictions)
type(accuracy)
mnist.test.labels
mnist.test.images
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Format
Step2: Parameters
Step3: Network Parameters
Step4: TensorFlow Graph Input
Step5: MultiLayer Model
Step6: Weights and Bias
Step7: Cost and Optimization Functions
Step8: Initialization of Variables
Step9: Training the Model
Step10: Running the Session
Step11: Model Evaluations
Step12: In order to get a numerical value for our predictions we will need to use tf.cast to cast the Tensor of booleans back into a Tensor of Floating point values in order to take the mean of it.
Step13: Now we use the tf.reduce_mean function in order to grab the mean of the elements across the tensor.
Step14: This may seem a little strange, but this accuracy is still a Tensor object. Remember that we still need to pass in our actual test data! Now we can call the MNIST test labels and images and evaluate our accuracy!
Step15: The eval() method allows you to directly evaluates this tensor in a Session without needing to call tf.sess()
|
753 | <ASSISTANT_TASK:>
Python Code:
# Don't worry about warnings in this exercise, as they can be distracting.
import warnings
warnings.simplefilter('ignore')
# Import the required Python modules
import mne
import conpy
import surfer
# Import and configure the 3D graphics backend
from mayavi import mlab
mlab.init_notebook('png')
# Tell MNE-Python to be quiet. The normal barrage of information will only distract us. Only display errors.
mne.set_log_level('ERROR')
# Configure the plotting interface to display plots in their own windows.
# The % sign makes it a "magic" command: a command ment for the notebook environment, rather than a command for Python.
%matplotlib notebook
# Tell MNE-Python and PySurfer where to find the brain model
import os
os.environ['SUBJECTS_DIR'] = 'data/subjects'
# Let's test plotting a brain (this is handled by the PySurfer package)
surfer.Brain('sample', hemi='both', surf='pial')
mne.read_epochs?
# Write your Python code here
# If your code in the above cell is correct, executing this cell print some information about the data
print(epochs)
# The semicolon at the end prevents the image from being included in this notebook
epochs.plot();
# Write here the code to plot the PSD of the MEG signal
# Fill in the source frequency, in Hertz
source_frequency = ###
# Write here the code to plot some PSD topomaps
number_of_sources = ###
# Write here the code to construct a CSD matrix
# If the code in the cell above is correct, executing this cell will plot the CSD matrix
csd.plot()[0]
# Write your code to read the forward solution here
# If the code in the above cell is correct, executing this cell will plot the source grid
fwd['src'].plot(trans='data/simulated-data-trans.fif')
# Write your code to compute the DICS filters here
# If the code in the above cell is correct, executing this cell will print some information about the filters
print('Filters have been computed for %d points on the cortex at %d frequency.' %
(filters['weights'].shape[1], filters['weights'].shape[0]))
print('At each point, there are %d source dipoles (XYZ)' % filters['n_orient'])
# Write your code to compute the power map here
# If the code in the above cell is correct, executing the cell will plot the power map
power_map.plot(hemi='both', smoothing_steps=20);
# Write your code to find the seed point here
# If the code in the above cell is correct, executing this cell will plot the seed point on the power map
brain = power_map.plot(hemi='both', smoothing_steps=20) # Plot power map
# We need to find out on which hemisphere the seed point lies
lh_verts, rh_verts = power_map.vertices
if seed_point < len(lh_verts):
# Seed point is on the left hemisphere
brain.add_foci(lh_verts[seed_point], coords_as_verts=True, hemi='lh')
else:
# Seed point is on the right hemisphere
brain.add_foci(rh_verts[seed_point - len(lh_verts)], coords_as_verts=True, hemi='rh')
# Splitting the data is not hard to do.
epochs_rest = epochs['rest']
epochs_task = epochs['task']
# Write your code here to compute the CSD on the epochs_task data
# If the code in the above cell is correct, executing this cell will plot the CSD matrix
csd_task.plot()[0]
# Write your code here to compute one-to-all connectivity for the "task" data
# If the code in the above cell is correct, executing this cell will print some information about the connectivity
print(con_task)
# Write your code here to compute the coherence map for the epochs_task data
# If the code in the above cell is correct, executing this cell will plot the coherence map
brain = coherence_task.plot(hemi='both', smoothing_steps=20)
lh_verts, rh_verts = coherence_task.vertices
if seed_point < len(lh_verts):
# Seed point is on the left hemisphere
brain.add_foci(lh_verts[seed_point], coords_as_verts=True, hemi='lh')
else:
# Seed point is on the right hemisphere
brain.add_foci(rh_verts[seed_point - len(lh_verts)], coords_as_verts=True, hemi='rh')
# Write your code here to compute connectivity for the epochs_rest data and make a coherence map
# If the code in the above cell is correct, executing this cell will plot the coherence map
brain = coherence_rest.plot(hemi='both', smoothing_steps=20)
lh_verts, rh_verts = coherence_rest.vertices
if seed_point < len(lh_verts):
# Seed point is on the left hemisphere
brain.add_foci(lh_verts[seed_point], coords_as_verts=True, hemi='lh')
else:
# Seed point is on the right hemisphere
brain.add_foci(rh_verts[seed_point - len(lh_verts)], coords_as_verts=True, hemi='rh')
# Write your code here to compute a contrast between the "task" and "rest" connectivity and make a coherence map
# If the code in the above cell is correct, executing this cell will plot the coherence map
brain = coherence_contrast.plot(hemi='both', smoothing_steps=20)
lh_verts, rh_verts = coherence_contrast.vertices
if seed_point < len(lh_verts):
# Seed point is on the left hemisphere
brain.add_foci(lh_verts[seed_point], coords_as_verts=True, hemi='lh')
else:
# Seed point is on the right hemisphere
brain.add_foci(rh_verts[seed_point - len(lh_verts)], coords_as_verts=True, hemi='rh')
# Write your code to produce all-to-all connectivity estimates for the "rest" and "task" segments
# and the contrast between them.
# If the code in the above cell is correct, executing this cell will print some information about the connectivity
print(all_to_all_contrast)
# This cell will plot the coherence map
all_to_all_coherence = all_to_all_contrast.make_stc()
all_to_all_coherence.plot(hemi='both', smoothing_steps=20);
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the simulation data
Step2: The documentation shows us that mne.read_epochs takes one required parameter (fname) and three optional parameters (proj, preload and verbose). You can recognize optional parameters by the fact that they have a default value assigned to them. In this exercise, you can always leave the optional parameters as they are, unless explicitly instructed to change them.
Step3: "Epochs" are snippets of MEG sensor data. In this simulation, all sensors are gradiometers. There are two epochs, appoximately 10 second in length
Step4: In the epochs plot, you can use the scrolling function of your mouse/trackpad to browse through the channels. The vertical dashed line indicates where one epoch ends and the next one begins.
Step5: If you were to name one frequency at which the sources are sending out a signal, what would that frequency be? Fill in the answer below. We'll use it in the upcoming tasks
Step6: Question 2
Step7: Take a look at the topomap corresponding to the frequency band that contains the frequency at which the sources are sending out their signal. How many sources do you think I simulated? Fill in your answer below
Step8: Question 3
Step9: If you examine the CSD matrix closely, you can already spot which sources are coherent with each other. Sssshhh! we'll look at it in more detail later. For now, let's compute the DICS beamformer!
Step10: For this exercise, we use a very sparse source grid (the yellow dots in the plot). This grid is enough for our purposes and our computations will run quickly. For real studies, I recommend a much denser grid.
Step11: mne.beamformer.apply_dics_csd
Step12: Use the mouse/trackpad to rotate the brain around. Can you find the sources on the cortex? Even though I've simulated them as dipole sources, they show more as "blobs" in the power map. This is called spatial leaking and is due to various inaccuracies and limitations of the DICS beamformer filters.
Step13: You may need to rotate the brain around to find the seed point. It should be drawn as a white sphere.
Step14: To estimate connectivity for just the epochs_task part, we need to compute the CSD matrix on only this data. You've computed a CSD matrix before, so rince and repeat
Step15: Now you are ready to compute one-to-all connectivity using DICS. It will take two lines of Python code. First, you'll need to use the conpy.one_to_all_connectivity_pairs function to compute the list of connectivity pairs. Then, you can use the conpy.dics_connectivity function to perform the connectivity estimation. Check the documentation for both functions (remember
Step16: To visualize the connectivity result, we can create a cortical map, where the value at each source point is the coherence between the source point and the seed region. The con_task object defines a .make_stc() method that will do just that. Take a look at its documentation and store the map in the coherence_task variable
Step17: Which source points seem to be in coherence with the seed point? Double-click on the text-cell below to edit it and write down your answer.
Step18: See? You'll find that also when no coherent sources are active, there is an area of coherence surrounding the seed region. This will be a major problem when attempting to estimate all-to-all connectivity.
Step19: If all went well, you'll see that the coherence due to spatial leakage has disappeared from the coherence map.
Step20: How to visualize this all-to-all connectivity? This is a question worth pondering a bit. But for this exercise, we can get away with producing a coherence map like we did with the one-to-all connectivity. The value of the coherence map is, for each source point, the sum of the coherence of all connections from and to the source point.
|
754 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (13.0, 8.0)
%matplotlib inline
import pickle
import sklearn
import sklearn.linear_model
import sklearn.preprocessing
import sklearn.gaussian_process
import sklearn.ensemble
import pickle # Pickle files allow us to easily save and load python objects.
with open('data/cpu_page_views.pickle', 'rb') as file:
cpu_usage, page_views, page_names, total_page_views = pickle.load(file, encoding='latin1')
print('Array shapes:')
print('-'*25)
print(f'cpu_usage\t {cpu_usage.shape}')
print(f'page_views\t {page_views.shape}')
print(f'page_names\t {page_names.shape}')
print(f'total_page_views {total_page_views.shape}')
plt.figure(figsize=(13, 6))
plt.plot(total_page_views, label='Total page views')
plt.plot(cpu_usage, label='CPU %')
plt.legend()
plt.show()
plt.figure(figsize=(13, 6))
plt.xlabel("Total page views")
plt.ylabel("CPU usage")
### BEGIN SOLUTION
plt.scatter(total_page_views, cpu_usage)
### END SOLUTION
# plt.scatter( ? , ? )
plt.show()
import sklearn.linear_model
simple_lin_model = sklearn.linear_model.LinearRegression()
### BEGIN SOLUTION
simple_lin_model.fit(total_page_views.reshape((-1, 1)), cpu_usage)
### END SOLUTION
# simple_lin_model.fit( ? , ? )
print(f"Coefficient = {simple_lin_model.coef_[0]:.2f}\nConstant term = {simple_lin_model.intercept_:.2f}")
### BEGIN SOLUTION
simple_lin_model.predict([[880]])
### END SOLUTION
# simple_lin_model.predict( [[ ? ]] )
### BEGIN SOLUTION
simple_lin_model.predict([[100]])
### END SOLUTION
# simple_lin_model.predict( [[ ? ]] )
plt.figure(figsize=(13, 6))
plt.scatter(total_page_views, cpu_usage, color='black')
plt.plot(total_page_views, simple_lin_model.predict(total_page_views.reshape((-1, 1))), color='blue', linewidth=3)
plt.xlabel("Total page views")
plt.ylabel("CPU usage")
plt.show()
R2 = simple_lin_model.score(total_page_views.reshape((-1, 1)), cpu_usage)
print(f'R2 = {R2:.3f}')
with open('data/cpu_page_views_2.pickle', 'rb') as file:
cpu_usage, total_page_views = pickle.load(file, encoding='latin1')
print('Array shapes:')
print('-'*25)
print(f'cpu_usage\t {cpu_usage.shape}')
print(f'total_page_views {total_page_views.shape}')
simple_lin_model = sklearn.linear_model.LinearRegression()
simple_lin_model.fit(total_page_views, cpu_usage)
### BEGIN SOLUTION
prediction = simple_lin_model.predict([[8]])
### END SOLUTION
# prediction = simple_lin_model.predict(?)
print(f'The predicted value is: {prediction}')
assert prediction < 25
all_page_views = np.concatenate((total_page_views, [[8]]))
plt.figure(figsize=(13, 6))
plt.scatter(total_page_views, cpu_usage, color='black')
plt.plot(all_page_views, simple_lin_model.predict(all_page_views), color='blue', linewidth=3)
plt.axvline(8, color='r')
plt.xlabel("Total page views")
plt.ylabel("CPU usage")
plt.show()
plt.figure(figsize=(16, 5))
plt.plot(total_page_views, label='Total page views')
plt.plot(cpu_usage, label='CPU %')
plt.legend()
plt.show()
x = np.array([1, 2, 3])
selection = np.array([True, False, True])
x[selection]
### BEGIN SOLUTION
selection = cpu_usage < 25
### END SOLUTION
# selection = ?
assert selection.dtype == np.dtype('bool'), 'The selection variable should be an array of True/False values'
assert len(selection) == len(total_page_views)
simple_lin_model = sklearn.linear_model.LinearRegression()
simple_lin_model.fit(total_page_views[selection], cpu_usage[selection])
prediction = simple_lin_model.predict([[8]])
print(f'The predicted value is: {prediction}')
all_page_views = np.concatenate((total_page_views, [[8]]))
plt.figure(figsize=(13, 6))
plt.scatter(total_page_views, cpu_usage, c=selection, cmap='RdYlGn')
plt.plot(all_page_views, simple_lin_model.predict(all_page_views), color='blue', linewidth=3)
plt.axvline(8, color='r')
plt.xlabel("Total page views")
plt.ylabel("CPU usage")
plt.show()
assert prediction > 23
# load the data
with open('data/cpu_page_views.pickle', 'rb') as file:
cpu_usage, page_views, page_names, total_page_views = pickle.load(file, encoding='latin1')
print('Array shapes:')
print('-'*25)
print(f'cpu_usage\t {cpu_usage.shape}')
print(f'page_views\t {page_views.shape}')
print(f'page_names\t {page_names.shape}')
print(f'total_page_views {total_page_views.shape}\n')
print(page_names)
plt.figure(figsize=(13, 6))
for i in range(len(page_names)):
plt.plot(page_views[:,i], label=page_names[i])
plt.plot(cpu_usage, label= 'CPU %')
plt.legend()
plt.show()
plt.figure(figsize=(13, 6))
for i in range(len(page_names)):
plt.scatter(page_views[:,i], cpu_usage, label=page_names[i])
plt.xlabel("Page views")
plt.ylabel("CPU usage")
plt.legend()
plt.show()
multi_lin_model = sklearn.linear_model.LinearRegression()
### BEGIN SOLUTION
multi_lin_model.fit(page_views, cpu_usage)
### END SOLUTION
# multi_lin_model.fit( ? , ? )
# Some quick and dirty code to print the most consuming pages first
print('Index\tCPU (%)\t Page')
print('-'*41)
indices = np.argsort(-multi_lin_model.coef_)
for i in indices:
print(f"{i}\t{ multi_lin_model.coef_[i]:4.2f}\t {page_names[i]}")
print(f'The other processes on the server consume {multi_lin_model.intercept_:.2f}%')
R2 = multi_lin_model.score(page_views, cpu_usage)
print(f'R2 = {R2:.3f}')
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.figure(figsize=(13, 6))
plt.plot(x, np.sin(x))
plt.show()
# helper function to generate the data
def sine_train_data():
x_train = np.linspace(0, 6, 10).reshape((-1, 1))
y_train = np.sin(x_train)
return x_train, y_train
x_train, y_train = sine_train_data()
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
plt.show()
x_train, y_train = sine_train_data()
### BEGIN SOLUTION
model = sklearn.linear_model.LinearRegression()
model.fit(x_train, y_train)
### END SOLUTION
# model = ?
# model.fit( ? )
print(f'The R2 score of this model is: {model.score(x_train, y_train):.3}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
plt.plot(x, model.predict(x))
plt.show()
import sklearn.preprocessing
x = [[2]]
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=3)
pol_exp.fit_transform(x)
x = [[2, 3]]
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=3)
pol_exp.fit_transform(x)
x_train, y_train = sine_train_data()
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=3)
pol_exp.fit_transform(x_train)
x_train, y_train = sine_train_data()
### BEGIN SOLUTION
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=9)
### END SOLUTION
# pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= ? )
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train), y_train)
train_score = model.score(pol_exp.fit_transform(x_train), y_train)
print(f'The R2 score of this model is: {train_score:.6f}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
def sine_test_data():
x_test = 0.5 + np.arange(6).reshape((-1, 1))
y_test = np.sin(x_test)
return x_test, y_test
assert train_score > .99999, 'Adjust the degree parameter 2 cells above until the train_score > .99999'
x_test, y_test = sine_test_data()
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test, y_test, color='r', label='test')
plt.legend()
x = np.arange(0, 6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
### BEGIN SOLUTION
test_score = model.score(pol_exp.fit_transform(x_test), y_test)
### END SOLUTION
# test_score = model.score( ? )
print(f'The R2 score of the model on the test set is: {test_score:.3f}')
assert test_score > 0.99
# a helper function to create the sine train set that can also add noise to the data
def noisy_sine_train_data(noise=None):
x_train = np.linspace(0, 6, 10).reshape((-1, 1))
y_train = np.sin(x_train)
# If fixed, set the random seed so that the next call of the
# random function always returns the same result
if noise == 'fixed':
np.random.seed(1)
x_train += np.random.randn(len(x_train)).reshape((-1, 1)) / 5
return x_train, y_train
x_train, y_train = noisy_sine_train_data(noise='fixed')
### BEGIN SOLUTION
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=9)
### END SOLUTION
# pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= ? )
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train), y_train)
train_score = model.score(pol_exp.fit_transform(x_train), y_train)
print(f'The R2 score of this method on the train set is {train_score:.3f}')
assert train_score > 0.99
x_test, y_test = sine_test_data()
print(f'The R2 score of the model on the test set is: {model.score(pol_exp.fit_transform(x_test), y_test):.3f}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
x_train, y_train = noisy_sine_train_data()
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=9)
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train), y_train)
print(f'The R2 score of this method on the train set is {model.score(pol_exp.fit_transform(x_train), y_train):.3f}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
x = np.arange(x_train[0], x_train[-1], 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
x_train, y_train = noisy_sine_train_data(noise='fixed')
x_test, y_test = sine_test_data()
### BEGIN SOLUTION
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=3)
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train), y_train)
### END SOLUTION
# pol_exp = ?
# model = ?
# model.fit( ? )
print(f'The score of this method on the train set is: {model.score(pol_exp.fit_transform(x_train), y_train):.3f}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test, y_test, color='r', label='test')
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.legend()
plt.show()
test_score = model.score(pol_exp.fit_transform(x_test), y_test)
print(f'The score of the model on the test set is: {test_score:.3f}')
assert test_score > 0.99, 'Adjust the degree parameter until test_score > 0.99'
# create the data in case you skipped the previous exercise
# a helper function to create the sine train set that can also add noise to the data
def noisy_sine_train_data(noise=None):
x_train = np.linspace(0, 6, 10).reshape((-1, 1))
y_train = np.sin(x_train)
# If fixed, set the random seed so that the next call of the
# random function always returns the same result
if noise == 'fixed':
np.random.seed(1)
x_train += np.random.randn(len(x_train)).reshape((-1, 1)) / 5
return x_train, y_train
def sine_test_data():
x_test = 0.5 + np.arange(6).reshape((-1, 1))
y_test = np.sin(x_test)
return x_test, y_test
x_train, y_train = noisy_sine_train_data(noise='fixed')
# we randomly pick 3 data points to get a nice validation set
train_i = [0, 1, 3, 4, 6, 7, 9]
val_i = [2, 5, 8]
# create the train and validation sets
x_train_i = x_train[train_i, :]
y_train_i = y_train[train_i]
x_val_i = x_train[val_i, :]
y_val_i = y_train[val_i]
### BEGIN SOLUTION
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=4)
### END SOLUTION
# pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= ? )
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train_i), y_train_i)
### BEGIN SOLUTION
train_score = model.score(pol_exp.fit_transform(x_train_i), y_train_i)
validation_score = model.score(pol_exp.fit_transform(x_val_i), y_val_i)
### END SOLUTION
# train_score = model.score( ? )
# validation_score = model.score( ? )
print(f'The R2 score of this model on the train set is: {train_score:.3f}')
print(f'The R2 score of this model on the validation set is: {validation_score:.3f}')
assert pol_exp.degree < 5, 'Select a polynomial degree < 5'
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train), y_train)
x_test, y_test = sine_test_data()
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test, y_test, color='r', label='test')
plt.legend()
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
print(f'The score of the model on the test set is: {model.score(pol_exp.fit_transform(x_test), y_test):.3f}')
x_train, y_train = noisy_sine_train_data(noise='fixed')
### BEGIN SOLUTION
results = np.inf * np.ones((10, 10))
for i in range(10):
### END SOLUTION
# results = np.inf * np.ones(( ? , ?))
# The results array should have a shape of "the number of data points" x "the number of polynomial degrees to try"
# The ones are multiplied with a very large number, np.inf, since we are looking for the smallest error
# for i in range( ? ):
train_i = np.where(np.arange(10) != i)[0]
x_train_i = x_train[train_i, :]
y_train_i = y_train[train_i]
x_val_i = x_train[i:i+1, :]
y_val_i = y_train[i:i+1]
### BEGIN SOLUTION
for degree in range(10):
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=degree)
### END SOLUTION
# for degree in range(?):
# pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= ? )
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train_i), y_train_i)
### BEGIN SOLUTION
results[i, degree] = sklearn.metrics.mean_squared_error(model.predict(pol_exp.fit_transform(x_val_i)), y_val_i)
### END SOLUTION
# Fill out the results for each validation set and each degree in the results matrix
# results[ ? ] = sklearn.metrics.mean_squared_error(model.predict(pol_exp.fit_transform(x_val_i)), y_val_i)
max_degree = 10
plt.boxplot(results[:, : max_degree])
plt.xticks(range(1, max_degree + 1), range(max_degree))
plt.xlabel('Polynomial degree')
plt.ylabel('Mean Squared Error')
plt.show()
### BEGIN SOLUTION
average_results = np.mean(results, axis=0)
degree = np.argmin(average_results)
### END SOLUTION
# average the results over all validation sets
# average_results = np.mean(results, axis= ? )
# find the optimal degree
# degree = np.argmin( ? )
print(f'The optimal degree for the polynomials is: {degree}')
assert degree == 3
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=degree)
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train), y_train)
print(f'The score of this method on the train set is: {model.score(pol_exp.fit_transform(x_train), y_train):.3f}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test, y_test, color='r', label='test')
plt.legend()
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
print(f'The score of the model on the test set is: {model.score(pol_exp.fit_transform(x_test), y_test):.3f}')
x_train, y_train = noisy_sine_train_data(noise='fixed')
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=9)
### BEGIN SOLUTION
model = sklearn.linear_model.RidgeCV()
### END SOLUTION
# model = sklearn.linear_model. ?
model.fit(pol_exp.fit_transform(x_train), y_train)
print(f'The R2 score of this method on the train set is: {model.score(pol_exp.fit_transform(x_train), y_train):.3f}')
plt.figure(figsize=(13,8))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test, y_test, color='r', label='test')
plt.legend()
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
print(f'The R2 score of the model on the test set is: {model.score(pol_exp.fit_transform(x_test), y_test):.3f}')
x_train, y_train = noisy_sine_train_data(noise='fixed')
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=3)
model = sklearn.linear_model.RidgeCV()
model.fit(pol_exp.fit_transform(x_train), y_train)
print('The R2 score of this method on the train set is:',
f'{model.score(pol_exp.fit_transform(x_train), y_train):.3f}')
# Now test outside the area of the training
x_test_extended = np.array([-3,-2,-1,7,8,9]).reshape((-1, 1))
y_test_extended = np.sin(x_test_extended)
plt.figure(figsize=(13, 8))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test_extended, y_test_extended, color='r', label='test')
plt.legend()
x = np.arange(-4,10, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
print('The R2 score of the model on the test set outside the area used for training is:',
f'{model.score(pol_exp.fit_transform(x_test_extended), y_test_extended):.3f}')
# Some code to generate spirals. You can ignore this for now.
# To comply with standards in machine learning we use x1 and x2 as opposed to x and y for this graph
# because y is reserved for the output in Machine Learning (= 0 or 1 in this case)
r = np.arange(0.1, 1.5, 0.0001)
theta = 2 * np.pi * r
x1_0 = r * np.cos(theta)
x2_0 = r * np.sin(theta)
x1_1 = - r * np.cos(theta)
x2_1 = - r * np.sin(theta)
perm_indices = np.random.permutation(range(len(x1_0)))
x1_0_rand = x1_0[perm_indices[ : 1000]] + np.random.randn(1000) / 5
x2_0_rand = x2_0[perm_indices[ : 1000]] + np.random.randn(1000) / 5
x1_1_rand = x1_1[perm_indices[1000 : 2000]] + np.random.randn(1000) / 5
x2_1_rand = x2_1[perm_indices[1000 : 2000]] + np.random.randn(1000) / 5
plt.figure(figsize=(8, 8))
plt.scatter(x1_0_rand, x2_0_rand, color = 'b', alpha=0.6, linewidth=0)
plt.scatter(x1_1_rand, x2_1_rand, color = 'r', alpha=0.6, linewidth=0)
plt.plot(x1_0, x2_0, color = 'b', lw=3)
plt.plot(x1_1, x2_1, color='r', lw=3)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
# Create a train and validation set
x_train_0 = np.concatenate((x1_0_rand[ : 800].reshape((-1,1)), x2_0_rand[ : 800].reshape((-1,1))), axis=1)
y_train_0 = np.zeros((len(x_train_0),))
x_train_1 = np.concatenate((x1_1_rand[ : 800].reshape((-1,1)), x2_1_rand[ : 800].reshape((-1,1))), axis=1)
y_train_1 = np.ones((len(x_train_1),))
x_val_0 = np.concatenate((x1_0_rand[800 : ].reshape((-1,1)), x2_0_rand[800 : ].reshape((-1,1))), axis=1)
y_val_0 = np.zeros((len(x_val_0),))
x_val_1 = np.concatenate((x1_1_rand[800 : ].reshape((-1,1)), x2_1_rand[800 : ].reshape((-1,1))), axis=1)
y_val_1 = np.ones((len(x_val_1),))
x_train = np.concatenate((x_train_0, x_train_1), axis=0)
y_train = np.concatenate((y_train_0, y_train_1), axis=0)
x_val = np.concatenate((x_val_0, x_val_1), axis=0)
y_val = np.concatenate((y_val_0, y_val_1), axis=0)
# Plot the train and test data
plt.figure(figsize=(8, 8))
plt.scatter(x_train[:, 0], x_train[:, 1], color='k', alpha=0.6, linewidth=0)
plt.scatter(x_val[:, 0], x_val[:, 1], color='y', alpha=0.6, linewidth=0)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.show()
### BEGIN SOLUTION
model = sklearn.linear_model.LogisticRegression()
model.fit(x_train, y_train)
### END SOLUTION
# model = sklearn.linear_model. ?
# model.fit( ? )
train_score = sklearn.metrics.accuracy_score(model.predict(x_train), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(x_val), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
assert val_score > 0.5
# A quick and dirty helper function to plot the decision boundaries
def plot_decision_boundary(model, pol_exp=None):
n=250
lin_space = np.linspace(-2, 2, num=n).reshape((-1, 1))
x1 = np.dot(lin_space, np.ones((1, n))).reshape((-1, 1))
x2 = np.dot(np.ones((n, 1)), lin_space.T).reshape((-1, 1))
x = np.concatenate((x1, x2), axis=1)
if pol_exp is None:
y = model.predict(x)
else:
y = model.predict(pol_exp.fit_transform(x))
i_0 = np.where(y < 0.5)
i_1 = np.where(y > 0.5)
plt.figure(figsize=(8,8))
plt.scatter(x[i_0, 0], x[i_0, 1], color='b', s=2, alpha=0.5, linewidth=0, marker='s')
plt.scatter(x[i_1, 0], x[i_1, 1], color='r',s=2, alpha=0.5, linewidth=0, marker='s')
plt.plot(x1_0, x2_0, color = 'b', lw=3)
plt.plot(x1_1, x2_1, color='r', lw=3)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
# Call the function
plot_decision_boundary(model)
### BEGIN SOLUTION
model = sklearn.linear_model.LogisticRegressionCV(max_iter=1000)
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=10)
model.fit(pol_exp.fit_transform(x_train), y_train)
### END SOLUTION
# model = sklearn.linear_model. ?
# pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= ? )
# model.fit( ? )
train_score = sklearn.metrics.accuracy_score(model.predict(pol_exp.fit_transform(x_train)), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(pol_exp.fit_transform(x_val)), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model, pol_exp=pol_exp)
assert val_score >= 0.8
import sklearn.ensemble
### BEGIN SOLUTION
model = sklearn.ensemble.RandomForestClassifier()
model.fit(x_train, y_train)
### END SOLUTION
# model = ?
# model.fit( ? )
train_score = sklearn.metrics.accuracy_score(model.predict(x_train), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(x_val), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model)
assert val_score > 0.7
### BEGIN SOLUTION
model = sklearn.ensemble.RandomForestClassifier(min_samples_leaf=.02)
### END SOLUTION
# model = sklearn.ensemble.RandomForestClassifier(min_samples_leaf= ? )
model.fit(x_train, y_train)
train_score = sklearn.metrics.accuracy_score(model.predict(x_train), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(x_val), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model)
assert val_score > 0.5
### BEGIN SOLUTION
model = sklearn.ensemble.RandomForestClassifier(n_estimators=100)
### END SOLUTION
# model = sklearn.ensemble.RandomForestClassifier(n_estimators= ? )
model.fit(x_train, y_train)
train_score = sklearn.metrics.accuracy_score(model.predict(x_train), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(x_val), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model)
assert val_score > 0.7
### BEGIN SOLUTION
model = sklearn.ensemble.RandomForestClassifier(n_estimators=1000, min_samples_leaf=0.02)
### END SOLUTION
# model = sklearn.ensemble.RandomForestClassifier(n_estimators= ? , min_samples_leaf= ? )
model.fit(x_train, y_train)
train_score = sklearn.metrics.accuracy_score(model.predict(x_train), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(x_val), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model)
assert val_score > 0.7
### BEGIN SOLUTION
model = sklearn.ensemble.RandomForestClassifier(n_estimators=100, min_samples_leaf=0.01)
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=15)
model.fit(pol_exp.fit_transform(x_train), y_train)
### END SOLUTION
# model = sklearn.ensemble.RandomForestClassifier(n_estimators= ? , min_samples_leaf= ? )
# pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= ?)
# model.fit( ? )
train_score = sklearn.metrics.accuracy_score(model.predict(pol_exp.fit_transform(x_train)), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(pol_exp.fit_transform(x_val)), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model, pol_exp=pol_exp)
assert val_score > 0.7
with open('data/train_set_forecasting.pickle', 'rb') as file:
train_set = pickle.load(file, encoding='latin1')
print(f'Shape of the train set = {train_set.shape}')
plt.figure(figsize=(20,4))
plt.plot(train_set)
plt.show()
import sklearn
import sklearn.linear_model
import sklearn.gaussian_process
model = sklearn.linear_model.LinearRegression()
# the input x_train contains all the data except the last data point
x_train = train_set[ : -1].reshape((-1, 1)) # the reshape is necessary since sklearn requires a 2 dimensional array
# the output y_train contains all the data except the first data point
y_train = train_set[1 : ]
# this code fits the model on the train data
model.fit(x_train, y_train)
# this score gives you how well it fits on the train set
# higher is better and 1.0 is perfect
print(f'The R2 train score of the linear model is {model.score(x_train, y_train):.3f}')
n_predictions = 100
import copy
# use the last data point as the first input for the predictions
x_test = copy.deepcopy(train_set[-1]) # make a copy to avoid overwriting the training data
prediction = []
for i in range(n_predictions):
# predict the next data point
y_test = model.predict([[x_test]])[0] # sklearn requires a 2 dimensional array and returns a one-dimensional one
### BEGIN SOLUTION
prediction.append(y_test)
x_test = y_test
### END SOLUTION
# prediction.append( ? )
# x_test = ?
prediction = np.array(prediction)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction)), 'g')
plt.plot(train_set, 'b')
plt.show()
def convert_time_series_to_train_data(ts, width):
x_train, y_train = [], []
for i in range(len(ts) - width - 1):
x_train.append(ts[i : i + width])
y_train.append(ts[i + width])
return np.array(x_train), np.array(y_train)
width = 5
x_train, y_train = convert_time_series_to_train_data(train_set, width)
print(x_train.shape, y_train.shape)
width = 5
x_train, y_train = convert_time_series_to_train_data(train_set, width)
model = sklearn.linear_model.LinearRegression()
model.fit(x_train, y_train)
print(f'The R2 score of the linear model with width={width} is {model.score(x_train, y_train):.3f}')
import copy
# this is a helper function to make the predictions
def predict(model, train_set, width, n_points):
prediction = []
# create the input data set for the first predicted output
# copy the data to make sure the original is not overwritten
x_test = copy.deepcopy(train_set[-width : ])
for i in range(n_points):
# predict only the next data point
prediction.append(model.predict(x_test.reshape((1, -1))))
# use the newly predicted data point as input for the next prediction
x_test[0 : -1] = x_test[1 : ]
x_test[-1] = prediction[-1]
return np.array(prediction)
n_predictions = 200
prediction = predict(model, train_set, width, n_predictions)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction[:,0])), 'g')
plt.plot(train_set, 'b')
plt.show()
### BEGIN SOLUTION
width = 22
### END SOLUTION
# width = ?
x_train, y_train = convert_time_series_to_train_data(train_set, width)
model = sklearn.linear_model.LinearRegression()
model.fit(x_train, y_train)
print(f'The R2 score of the linear model with width={width} is {model.score(x_train, y_train):.3f}')
prediction = predict(model, train_set, width, 200)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction[:,0])), 'g')
plt.plot(train_set, 'b')
plt.show()
assert width > 1
model_generators = [sklearn.linear_model.LinearRegression(),
sklearn.linear_model.RidgeCV(cv=3),
sklearn.linear_model.LassoCV(cv=3),
sklearn.ensemble.RandomForestRegressor(n_estimators=10)]
best_score = 0
### BEGIN SOLUTION
for model_gen in model_generators:
for width in range(1, 50):
### END SOLUTION
# for model_gen in ? :
# for width in range( ? , ? ):
x_train, y_train = convert_time_series_to_train_data(train_set, width)
# train the model on the first 48 hours
x_train_i, y_train_i = x_train[ : -48, :], y_train[ : -48]
# use the last 48 hours for validation
x_val_i, y_val_i = x_train[-48 : ], y_train[-48 : ]
# there is a try except clause here because some models do not converge for some data
try:
# Constructs a new, untrained, model with the same parameters
model = sklearn.base.clone(model_gen, safe=True)
### BEGIN SOLUTION
model.fit(x_train_i, y_train_i)
this_score = model.score(x_val_i, y_val_i)
### END SOLUTION
# model.fit( ? , ? )
# this_score = ?
if this_score > best_score:
best_score = this_score
# Constructs a new, untrained, model with the same parameters
best_model = sklearn.base.clone(model, safe=True)
best_width = width
except:
pass
print(f'{best_model.__class__.__name__} was selected as the best model with a width of {best_width}',
f'and a validation R2 score of {best_score:.3f}')
### BEGIN SOLUTION
width = best_width
model = best_model
### END SOLUTION
# width = ?
# model = ?
x_train, y_train = convert_time_series_to_train_data(train_set, width)
### BEGIN SOLUTION
model.fit(x_train, y_train) # train on the full data set
### END SOLUTION
# model.fit( ? , ? )
n_predictions = 200
prediction = predict(model, train_set, width, n_predictions)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction[:,0])), 'g')
plt.plot(train_set, 'b')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Linear Regression
Step2: The orange line on the plot above is the number of page views in blue and the orange line is the CPU load that viewing this pages generates on the server.
Step3: There clearly is a strong correlation between the page views and the CPU usage. Because of this correlation we can build a model to predict the CPU usage from the total page views. If we use a linear model we get a formula like the following
Step4: Now we need to feed the data to the model to fit it.
Step5: We can now inspect the coefficient $c_1$ and constant term (intercept) $c_0$ of the model
Step6: So this means that each additional page view adds about 0.11% CPU load to the server and all the other processes running on the server consume on average 0.72% CPU.
Step7: What is the expected CPU usage when we have 1000 page views per second? Is this technically possible? Why does the model predict it this way?
Step8: Now we plot the linear model together with our data to verify it captures the relationship correctly (the predict method can accept the entire total_page_views array at once).
Step9: Our model can calculate the R2 score indicating how well the linear model captures the data. A score of 1 means there is perfect linear correlation and the model can fit the data perfectly, a score of 0 (or lower) means that there is no correlation at all (and it does not make sense to try to model it that way). The score method takes the same arguments as the fit method.
Step10: 2.3 Extrapolation
Step11: Now let's plot what you have done.
Step12: Is this what you would expect? Can you see what's wrong?
Step13: The spikes of CPU usage are actually backups that run at night and they can be ignored. So repeat the exercise again but ignore these data points.
Step14: So what you should have learned from the previous exercise is that you should always look at your data and/or write scripts to inspect your data. Additionally extrapolation does not always work because there are no training examples in that area.
Step15: Let's have a look at this data.
Step16: We start again by creating a LinearRegression model.
Step17: Next we fit the model on the data, using multi_lin_model.fit(X,y). In contrast to the case above our page_views variable already has the correct shape to pass as the X matrix
Step18: Now, given the coefficients calculated by the model, which capture the contribution of each page view to the total CPU usage, we can start to answer some interesting questions. For example,
Step19: From this table we see that 'resources/js/basket.js' consumes the most per CPU per view. It generates about 0.30% CPU load for each additional page view. 'products/science.html' on the other hand is much leaner and only consumes about 0.04% CPU per view. Does this seem to be correct if you look at the scatter plot above?
Step20: As you can see this term is very similar to the result achieved in single linear regression, but it is not entirely the same. This means that these models are not perfect. However, they seem to be able to give a reliable estimate.
Step21: As you can see from the R2 score, this model performs better. It can explain 91.5% of the variance instead of just 90.5% of the variance. So this gives the impression that this model is more accurate.
Step22: For our training set, we will calculate 10 y values from evenly spaced x values using this function.
Step23: Now let's try to fit a model to this data with linear regression.
Step24: As you can see this fit is not optimal.
Step25: As you can see above this function transforms $x$ into [$x^0$, $x^1$, $x^2$, $x^3$] with $x^0=1$ and $x^1 = x$. If you have 2 inputs it will also take the cross products so that [$x_1$, $x_2$] is transformed into
Step26: In this example we only have 1 input so the number of features is always the degree + 1.
Step27: Now play with the degree of the polynomial expansion function below to create better features. Search for the optimal degree.
Step28: What do you notice? When does it work better? And when does it work best?
Step29: If everything is correct your score is very close to 1. Which means that we have built a model that can fit this data (almost) perfectly.
Step30: Now let's see what this results to in the test set.
Step31: As you can clearly see, this result is not that good. Why do you think this is?
Step32: Is this what you expect?
Step33: What did you observe? And what is the method learning? And how can you avoid this?
Step34: 5. Over-fitting and Cross-Validation
Step35: Now let's train on the entire train set (including the validation set) and test this result on the test set with the following code.
Step36: As you can see this approach works to select the optimal degree. Usually the test score is lower than the validation score, but in this case it is not because the test data doesn't contain noise.
Step37: Let's plot these results in a box plot to get an idea on how well the models performed on average.
Step38: Next we will compute the best degree.
Step39: Now let's train the model on the entire train set (including the validation set) and have a look at the result.
Step40: As you can see this automatic way of selecting the optimal degree has resulted in a good fit for the sine function.
Step41: As you can see above, the result of Ridge Regression is not as good as reducing the number of features in this example. However it works a lot better than without regularisation (try that). In the example above you will notice that it makes the result a lot smoother and removes the unwanted spikes. It will actually make sure that if you have too many features you still get a reasonable result. So this means that it should be in your standard toolkit.
Step42: As you can see, the extrapolation results for non-linear regression are even worse than for those of linear regression. This is because models only work well in the input space they have been trained in.
Step43: In a colored image this is easy to do, but when you remove the color it becomes much harder. Can you do the classification in the image below?
Step44: As you can see classifying is very hard to do when you don't get the answer even if you saw the solution earlier. But you will see that machine learning algorithms can solve this quite well if they can learn from examples.
Step45: Now let's plot the result.
Step46: As you can see a linear classifier returns a linear decision boundary.
Step47: If everything went well you should get a validation/test accuracy very close to 0.8.
Step48: As you can see they are quite powerful right out of the box without any parameter tuning. But we can get the results even better with some fine tuning.
Step49: The min_samples_leaf parameter sets the number of data points that can create a new branch/leaf in the tree. So in practice it limits the depth of the decision tree. The bigger this parameter is, the less deep the tree will be and less likely each tree will over-fit.
Step50: As you can see increasing the number of estimators improves the model and reduces over-fitting. This parameter actually sets the number of trees in the random forest. The more trees there are in the forest the better the result is. But obviously it requires more computing power so that is the limiting factor here.
Step51: As you have noticed by now it seems that random forests are less powerful than linear regression with polynomial feature extraction. This is because these polynomials are ideally suited for this task. This also means that you could get a better result if you would also apply polynomial expansion for random forests. Try that below.
Step52: As you have may have noticed, it is hard to get results that are better than the ones obtained using logistic regression. This illustrates that linear techniques are very powerful and often underrated. But in some situations they are not powerful enough and you need something stronger like a random forest or even neural networks (check this simulator if you want to play with the latter).
Step53: In the graph above you can clearly see that there is a rising trend in the data.
Step54: As you can see from the score above, the model is not perfect but it seems to get a relatively high score. Now let's make a prediction into the future and plot this.
Step55: As you can see from the image above the model doesn't quite seem to fit the data well. Let's see how we can improve this.
Step56: As you can see from the print above both x_train and y_train contains 303 data points. For x_train you see that there are now 5 features which contain the page views from the 5 past hours.
Step57: Now change the width parameter to see if you can get a better score.
Step58: As you can see in the image above the prediction is not what you would expect from a perfect model. What happened is that the model learned the training data by heart without 'understanding' what the data is really about. This phenomenon is called over-fitting and will always occur if you make your model too complex.
Step59: As you will have noticed by now is that it is better to have a non-perfect score which will give you a much better outcome. Now try the same thing for the following models
Step60: If everything is correct the LassoCV methods was selected.
|
755 | <ASSISTANT_TASK:>
Python Code:
# TEST
import larch.numba as lx
import larch
import pandas as pd
pd.set_option("display.max_columns", 999)
pd.set_option('expand_frame_repr', False)
pd.set_option('display.precision', 3)
larch._doctest_mode_ = True
import larch.numba as lx
d = lx.examples.MTC(format='dataset')
d1 = d.dc.query_cases("numveh <= 1")
d2 = d.dc.query_cases("numveh > 1")
m1 = lx.Model(d1, title="Cars<=1")
m2 = lx.Model(d2, title="Cars>=2")
from larch import P, X
for m in (m1,m2):
m.availability_var = 'avail'
m.choice_ca_var = 'chose'
m.utility_ca = (
+ X("totcost/hhinc") * P("costbyincome")
+ X("tottime * (altnum <= 4)") * P("motorized_time")
+ X("tottime * (altnum >= 5)") * P("nonmotorized_time")
+ X("ovtt/dist * (altnum <= 4)") * P("motorized_ovtbydist")
)
for a in [4,5,6]:
m.utility_co[a] += X("hhinc") * P("hhinc#{}".format(a))
for i in d['alt_names'][1:3]:
name = str(i.values)
a = int(i.altid)
m.utility_co[a] += (
+ X("vehbywrk") * P("vehbywrk_SR")
+ X("wkccbd+wknccbd") * P("wkcbd_"+name)
+ X("wkempden") * P("wkempden_"+name)
+ P("ASC_"+name)
)
for i in d['alt_names'][3:]:
name = str(i.values)
a = int(i.altid)
m.utility_co[a] += (
+ X("vehbywrk") * P("vehbywrk_"+name)
+ X("wkccbd+wknccbd") * P("wkcbd_"+name)
+ X("wkempden") * P("wkempden_"+name)
+ P("ASC_"+name)
)
m.ordering = (
('LOS', ".*cost.*", ".*time.*", ".*dist.*",),
('Zonal', "wkcbd.*", "wkempden.*",),
('Household', "hhinc.*", "vehbywrk.*",),
('ASCs', "ASC.*",),
)
r1 = m1.maximize_loglike()
r2 = m2.maximize_loglike()
# TEST
from pytest import approx
assert r1.loglike == approx(-1049.2796388550328)
assert r2.loglike == approx(-2296.667143538367)
assert r1.n_cases == 1221
assert r2.n_cases == 3808
assert 'success' in r1.message.lower()
assert 'success' in r2.message.lower()
for m in (m1,m2):
m.calculate_parameter_covariance()
m.loglike_null()
from larch.util.summary import joint_parameter_summary
joint_parameter_summary([m1,m2])
m2.utility_ca = m2.utility_ca.reformat_param('{}_2Cars')
for a in m2.utility_co:
m2.utility_co[a] = m2.utility_co[a].reformat_param(
pattern='(ASC)', repl='ASC_2Cars'
)
mg = lx.ModelGroup([m1,m2])
# TEST
assert mg.loglike('null') == approx(-7309.600971749634)
mg.set_cap()
rg = mg.maximize_loglike()
mg.calculate_parameter_covariance()
rg
# TEST
assert rg.loglike == approx(-3406.7232213364787)
assert rg.n_cases == 5029
mg.ordering = (
('LOS', ".*cost.*", ".*time.*", ".*dist.*",),
('Zonal', "wkcbd.*", "wkempden.*",),
('Household', "hhinc.*", "vehbywrk.*",),
('ASCs', "ASC.*",),
)
mg.parameter_summary()
# TEST
import re, platform
mash = lambda x: re.sub('\\s+', ' ', str(x)).strip()
if platform.system() != "Windows":
assert mash(mg.parameter_summary().data) == mash('''
Value Std Err t Stat Signif Null Value
Category Parameter
LOS costbyincome -0.0174 0.0117 -1.49 0.0
costbyincome_2Cars -0.102 0.0154 -6.60 *** 0.0
motorized_time -0.0196 0.00567 -3.46 *** 0.0
motorized_time_2Cars -0.0195 0.00496 -3.94 *** 0.0
nonmotorized_time -0.0442 0.00776 -5.69 *** 0.0
nonmotorized_time_2Cars -0.0471 0.00868 -5.43 *** 0.0
motorized_ovtbydist -0.100 0.0237 -4.23 *** 0.0
motorized_ovtbydist_2Cars -0.187 0.0309 -6.05 *** 0.0
Zonal wkcbd_Bike 0.460 0.364 1.27 0.0
wkcbd_SR2 0.233 0.124 1.88 0.0
wkcbd_SR3+ 1.04 0.192 5.42 *** 0.0
wkcbd_Transit 1.27 0.168 7.53 *** 0.0
wkcbd_Walk 0.100 0.250 0.40 0.0
wkempden_Bike 0.00137 0.00124 1.11 0.0
wkempden_SR2 0.00132 0.000392 3.36 *** 0.0
wkempden_SR3+ 0.00184 0.000461 3.99 *** 0.0
wkempden_Transit 0.00274 0.000358 7.64 *** 0.0
wkempden_Walk 0.00248 0.000730 3.39 *** 0.0
Household hhinc#4 -0.00101 0.00208 -0.48 0.0
hhinc#5 -0.00366 0.00527 -0.70 0.0
hhinc#6 -0.00232 0.00331 -0.70 0.0
vehbywrk_Bike -0.255 0.290 -0.88 0.0
vehbywrk_SR -0.314 0.0740 -4.24 *** 0.0
vehbywrk_Transit -0.638 0.132 -4.82 *** 0.0
vehbywrk_Walk -0.395 0.198 -1.99 * 0.0
ASCs ASC_2Cars_Bike -2.94 0.629 -4.67 *** 0.0
ASC_2Cars_SR2 -1.91 0.127 -15.06 *** 0.0
ASC_2Cars_SR3+ -3.53 0.173 -20.43 *** 0.0
ASC_2Cars_Transit -1.47 0.320 -4.60 *** 0.0
ASC_2Cars_Walk -1.00 0.517 -1.94 0.0
ASC_Bike -1.39 0.439 -3.17 ** 0.0
ASC_SR2 -1.58 0.126 -12.57 *** 0.0
ASC_SR3+ -3.26 0.206 -15.84 *** 0.0
ASC_Transit -0.747 0.271 -2.76 ** 0.0
ASC_Walk 0.314 0.416 0.75 0.0
''')
mg.estimation_statistics()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For this example, we're going to re-create the market segmentation
Step2: We can use the query_cases method to create two separate datasets for this work,
Step3: We will then construct the same model stucture in each, using the
Step4: Independent Estimation
Step5: To have t-statistics and $\rho^2_{\oslash}$ values, we'll also need
Step6: We can generate a side-by-side summary of the
Step7: Joint Estimation
Step8: To change just a selection of parameters, there's a regular
Step9: Then, we can put our models together into a ModelGroup for
Step10: The estimation interface for a ModelGroup is the
Step11: To review the estimation results, we can use ordering,
|
756 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import logging
logging.basicConfig(level=40)
logger = logging.getLogger()
import numpy as np
import math
import random
from neon.datasets.dataset import Dataset
class GeneratedDS(Dataset):
# for each example we will generate 400 time steps
feature_count = 400
# number of examples to generate
num_train_examples = 1000
num_test_examples = 500
# We will also give names to our classes to make reading easier
human_labels = ['Active-Sitting', 'Advanced-Wobble-Board-Stand', 'Arm-Circles', 'Back-Extension', 'Bicep-Curl', 'Bosu-Balance', 'Bosu-Glute-Press', 'Bosu-Sit', 'Calf-Press', 'Captains-Chair', 'Chest-Fly', 'Chest-Press', 'Crunches', 'Decline-Chest-Press', 'Deltoid-Fly', 'Eversion', 'External-Rotation', 'Forearm-Curl', 'Front-Deltoid-Raise', 'Glute-Press', 'Glute-Squeeze', 'Hip-Abduction', 'Hip-Adduction', 'Incline-Chest-Press', 'Internal-Rotation', 'Inversion', 'Knee-Lift', 'Lat-Pull-down', 'Lateral-Deltoid-Raise', 'Lateral-Lunges-With-Medicine-Ball', 'Leg-Curl', 'Leg-Extension', 'Lunges', 'Oblique-Crunches', 'Oblique-Engagement', 'Oblique-Twist-With-Medicine-Ball', 'One-Leg-Stand', 'Plank', 'Pull-ups', 'Push-ups']
num_classes = len(human_labels)
# To the underlying sine waves we will add random noise with a maximum of noise_strength
noise_strength = 5.0 / num_classes
def human_label_for(self, id):
return self.human_labels[id]
# Get the characteristic (but madup) sinus curve of an excercise. Each exercise will
# feature a different period
def period(self, exercise_class):
return (exercise_class * 2.0 + 1) / self.num_classes * 10
def amplitude(self, exercise_class):
return exercise_class * 0.5 / self.num_classes + 0.5
# Add some 'measurement' noise to the data to make the challenge slightly harder
def sin_with_noise(self, x, period):
noise = (random.random() - 0.5) * self.noise_strength
return math.sin(x * 1.0 / period) + noise
# Generate a single example for the given exercise class
def generate_example(self, exercise_class):
e = np.empty(self.feature_count)
phase = random.random() * (self.period(exercise_class) * math.pi * 2)
period = self.period(exercise_class)
amplitude = self.amplitude(exercise_class)
for t in range(0, self.feature_count):
e[t] = self.sin_with_noise(random.random() + t + phase, period) * amplitude
return e
# Choose a random gym exercise
def generate_label(self):
return random.randint(0, self.num_classes - 1)
# Generate `example_count` number of exercise examples
def generate_input(self, example_count):
d = np.empty((example_count, self.feature_count), dtype=float)
l = np.zeros((example_count, self.num_classes), dtype=float)
for i in range(0, example_count):
label = self.generate_label()
l[i, label] = 1
d[i, :] = self.generate_example(label)
return d, l
# Now we need to do some plumbing to get the data into neon.
def load(self, **kwargs):
# Generate training and test datasets
(data, labels) = self.generate_input(self.num_train_examples)
self.inputs['train'] = data
self.targets['train'] = labels
(data, labels) = self.generate_input(self.num_test_examples)
self.inputs['test'] = data
self.targets['test'] = labels
if 'format_data' not in kwargs or kwargs['format_data']:
self.format()
# Load label mapping and train / test data from disk.
def __init__(self):
(train_data, train_labels) = self.generate_input(self.num_train_examples)
self.X_train = train_data
self.y_train = train_labels
(test_data, test_labels) = self.generate_input(self.num_test_examples)
self.X_test = test_data
self.y_test = test_labels
# Now we need to do some plumbing to get the data into neon.
def load(self, **kwargs):
# Assign training and test datasets
self.inputs['train'] = self.X_train
self.targets['train'] = self.y_train
self.inputs['test'] = self.X_test
self.targets['test'] = self.y_test
self.format()
dataset = GeneratedDS()
print "Generated", dataset.num_train_examples, "training examples"
from matplotlib import pyplot, cm
plot_ids = np.random.random_integers(0, dataset.num_train_examples - 1, 3)
def label_of_example(i):
class_id = np.where(dataset.y_train[i] == 1)[0][0]
return dataset.human_label_for(class_id)
pyplot.figure(figsize=(20,5))
for i in plot_ids:
c = np.random.random((3,))
pyplot.plot(range(0, dataset.feature_count), dataset.X_train[i,], '-o', c=c)
pyplot.legend(map(label_of_example, plot_ids))
pyplot.title('Feature values for the first three training examples')
pyplot.show()
from ipy_table import *
from operator import itemgetter
train_dist = np.reshape(np.transpose(np.sum(dataset.y_train, axis=0)), (dataset.num_classes,1))
test_dist = np.reshape(np.transpose(np.sum(dataset.y_test, axis=0)), (dataset.num_classes,1))
train_ratio = train_dist / dataset.num_train_examples
test_ratio = test_dist / dataset.num_test_examples
# Fiddle around to get it into table shape
table = np.hstack((np.zeros((dataset.num_classes,1), dtype=int), train_dist, train_ratio, test_dist, test_ratio))
table = np.vstack((np.zeros((1, 5), dtype=int), table)).tolist()
human_labels = map(dataset.human_label_for, range(0,dataset.num_classes))
for i,s in enumerate(human_labels):
table[i + 1][0] = s
table.sort(lambda x,y: cmp(x[1], y[1]))
table[0][0] = ""
table[0][1] = "Train"
table[0][2] = "Train %"
table[0][3] = "Test"
table[0][4] = "Test %"
make_table(table)
set_global_style(float_format='%0.0f', align="center")
set_column_style(2, float_format='%0.2f%%')
set_column_style(4, float_format='%0.2f%%')
set_column_style(0, align="left")
from os.path import expanduser, exists
import os
from neon.backends import gen_backend
from neon.layers import FCLayer, DataLayer, CostLayer
from neon.models import MLP
from neon.transforms import RectLin, Logistic, CrossEntropy
from neon.experiments import FitPredictErrorExperiment
import cPickle as pkl
import shutil
# Directory where model snapshots are stored
working_directory = expanduser('~/data/signal-mlp')
# Make sure there is no old model lying around
if exists(working_directory):
shutil.rmtree(working_directory)
os.makedirs(working_directory)
train_err = []
test_err = []
# We will see each training example exactly `max_epochs` times during training
max_epochs = 20
# Batch size is the number of examples that are grouped together and will together result in
# a single update of the network. One batch should be able to contain one example of each class.
batch_size = 32
print 'Epochs: %d Batch-Size: %d' % (max_epochs, batch_size)
# Setup of the network trainin parameters. Be carefull when choosing the learning rate
lrule = {'lr_params': {
'learning_rate': 0.001,
'momentum_params': {
'coef': 0.9,
'type': 'constant'}},
'type': 'gradient_descent_momentum'}
for num_epochs in range(0,max_epochs+1):
# Set up the layers of our MLP
layers = []
layers.append(DataLayer(nout=dataset.feature_count))
layers.append(FCLayer(nout=50, activation=RectLin(), lrule_init=lrule))
layers.append(FCLayer(nout=50, activation=RectLin()))
layers.append(FCLayer(nout=GeneratedDS.num_classes, activation=Logistic()))
layers.append(CostLayer(cost=CrossEntropy()))
# Set up model and experiment
model = MLP(num_epochs=num_epochs, batch_size=batch_size, layers=layers,
serialized_path=working_directory+'/signal-mlp.prm')
# We will choose a fixed seed to create reproducable results
backend = gen_backend(rng_seed=123)
experiment = FitPredictErrorExperiment(model=model,
backend=backend,
dataset=dataset)
# Run the training, and dump weights
dest_path = working_directory+'/signal-ep' + str(num_epochs) + '.prm'
print 'Trained for '+ str(num_epochs) +' epochs'
if num_epochs > 0:
res = experiment.run()
train_err.append(res['train']['MisclassPercentage_TOP_1'])
test_err.append(res['test']['MisclassPercentage_TOP_1'])
# Save the weights at this epoch
shutil.copy2(working_directory+'/signal-mlp.prm', dest_path)
else:
params = layers[1].weights.asnumpyarray()
pkl.dump(params, open(dest_path,'w'))
print 'Finished training the MLP.'
import numpy as np
from matplotlib import pyplot, cm
from IPython.html import widgets
from IPython.html.widgets import interact
nrows = 4
ncols = 5
fr = math.sqrt(dataset.feature_count)
def plot_filters(**kwargs):
n = kwargs['n']
dest_path = expanduser('~/data/signal-dl/signal-ep' + str(n) + '.prm')
params = pkl.load(open(dest_path, 'r'))
if n>0:
wts = params['layer_1']['weights']
else:
wts = params
fi = 0
W = np.zeros((fr*nrows, fr*ncols))
for row, col in [(row, col) for row in range(nrows) for col in range(ncols)]:
W[fr*row:fr*(row+1):,fr*col:fr*(col+1)] = wts[fi].reshape(20,20)
fi = fi + 1
pyplot.matshow(W, cmap=cm.gray)
pyplot.title('Visualizing the 0st layer weights in epoch ' + str(n) )
pyplot.show()
_i = interact(plot_filters,
n=widgets.IntSliderWidget(description='epochs',
min=0, max=max_epochs, value=0))
pyplot.plot(range(1, max_epochs+1), train_err, linewidth=3, label='train')
pyplot.plot(range(1, max_epochs+1), test_err, linewidth=3, label='test')
pyplot.grid()
pyplot.legend()
pyplot.xlabel("epoch")
pyplot.ylabel("error %")
pyplot.show()
from sklearn.metrics import confusion_matrix
from ipy_table import *
# confusion_matrix(y_true, y_pred)
predicted, actual = model.predict_fullset(dataset, "test")
y_pred = np.argmax(predicted.asnumpyarray(), axis = 0)
y_true = np.argmax(actual.asnumpyarray(), axis = 0)
confusion_mat = confusion_matrix(y_true, y_pred, range(0,dataset.num_classes))
# Fiddle around with cm to get it into table shape
confusion_mat = np.vstack((np.zeros((1,dataset.num_classes), dtype=int), confusion_mat))
confusion_mat = np.hstack((np.zeros((dataset.num_classes + 1, 1), dtype=int), confusion_mat))
table = confusion_mat.tolist()
human_labels = map(dataset.human_label_for, range(0,dataset.num_classes))
for i,s in enumerate(human_labels):
table[0][i+1] = s
table[i+1][0] = s
table[0][0] = "actual \ predicted"
mt = make_table(table)
set_row_style(0, color='lightGray', rotate = "315deg")
set_column_style(0, color='lightGray')
set_global_style(align='center')
for i in range(1, dataset.num_classes + 1):
for j in range(1, dataset.num_classes + 1):
if i == j:
set_cell_style(i,j, color='lightGreen', width = 80)
elif table[i][j] > 20:
set_cell_style(i,j, color='Pink')
elif table[i][j] > 0:
set_cell_style(i,j, color='lightYellow')
mt
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To exercise our MLP skills, we will start with a very simple Dataset. It consists of wave forms over time. They can be thought of captured accelerometer data from repetitive gym exercises.
Step2: Don't worry to much about the data generation. It is totally made up and in a real world problem that would be the part nature would handle for you. In that case you should think of the dataset more like a black box with no certain knowledge about how the data was generated.
Step3: Visualizing the data as early in the process as possible is very important. It alows to get a first glimps and to instantly see if there is something wrong with the preprocessing.
Step4: Now we are going dive into the actual model creation. We will start with a multilayer preceptron having 2 hidden layers.
Step5: To check weather the network is learning something we will plot the weight matrices of the different training epochs. They should show some structure. If the are completly white or black the training collapsed and you probably need to choose a different learning rate or model.
Step6: Another good thing to visualize is the development of the test and train loss over the epochs. Train and test loss should be similar. If they differ to much the model suffers from over- or underfitting.
Step7: As a final evaluation step we will create a confusion matrix to see, if there are certain classes that get mistaken very often. If that is the case, one should try to get more training examples for those classes.
|
757 | <ASSISTANT_TASK:>
Python Code:
from pysal.lib.weights.contiguity import Queen
from pysal.lib import examples
import pysal.lib as lp
import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
%matplotlib inline
f = gpd.read_file(examples.get_path("sids2.dbf"))
varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']
varnames
variable = [np.array(f[variable]) for variable in varnames]
variable[0]
w = lp.io.open(examples.get_path("sids2.gal")).read()
w
from pysal.explore.esda.moran import Moran_BV_matrix
matrix = Moran_BV_matrix(variable, w, varnames = varnames)
matrix
from pysal.viz.splot.esda import moran_facet
moran_facet(matrix)
plt.show()
path = examples.get_path('columbus.shp')
gdf = gpd.read_file(path)
variables2 = gdf[['HOVAL', 'CRIME', 'INC', 'EW']]
variables2.head()
variables2.shape
w2 = Queen.from_shapefile(path)
w2
from pysal.explore.esda.moran import Moran_BV_matrix
matrix2 = Moran_BV_matrix(variables2, w2)
matrix2
from pysal.viz.splot.esda import moran_facet
moran_facet(matrix2)
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1
Step2: Next, we can open a file containing pre calculated spatial weights for "sids2.dbf". In case you don't have spatial weights, check out pysal.lib.weights which will provide you with many options calculating your own.
Step3: Now we are ready to import and generate our Moran_BV_matrix
Step4: Let's visualise our matrix with splot.esda.moran_facet(). You will see Univariate Moran objects with a grey background, surrounded by all possible bivariate combinations of your input dataset
Step5: Example 2
Step6: In order for moran_facet to generate sensible results, it is recommended to extract all columns you would specifically like to analyse and generate a new GeoDataFrame
Step7: We will now generate our own spatial weights leveraging pysal.lib and create a second matrix2 from our GeoDataFrame. Note that there is no list of varnames needed, this list will be automatically extracted from teh first row of your gdf
Step8: Like in the first example we can now plot our data with a simple splot call
|
758 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(42)
t = np.sort(np.append(
np.random.uniform(0, 3.8, 57),
np.random.uniform(5.5, 10, 68),
)) # The input coordinates must be sorted
yerr = np.random.uniform(0.08, 0.22, len(t))
y = 0.2 * (t-5) + np.sin(3*t + 0.1*(t-5)**2) + yerr * np.random.randn(len(t))
true_t = np.linspace(0, 10, 5000)
true_y = 0.2 * (true_t-5) + np.sin(3*true_t + 0.1*(true_t-5)**2)
plt.plot(true_t, true_y, "k", lw=1.5, alpha=0.3)
plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
plt.xlabel("x")
plt.ylabel("y")
plt.xlim(0, 10)
plt.ylim(-2.5, 2.5);
import celerite
from celerite import terms
# A non-periodic component
Q = 1.0 / np.sqrt(2.0)
w0 = 3.0
S0 = np.var(y) / (w0 * Q)
bounds = dict(log_S0=(-15, 15), log_Q=(-15, 15), log_omega0=(-15, 15))
kernel = terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0),
bounds=bounds)
kernel.freeze_parameter("log_Q") # We don't want to fit for "Q" in this term
# A periodic component
Q = 1.0
w0 = 3.0
S0 = np.var(y) / (w0 * Q)
kernel += terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0),
bounds=bounds)
gp = celerite.GP(kernel, mean=np.mean(y))
gp.compute(t, yerr) # You always need to call compute once.
print("Initial log likelihood: {0}".format(gp.log_likelihood(y)))
print("parameter_dict:\n{0}\n".format(gp.get_parameter_dict()))
print("parameter_names:\n{0}\n".format(gp.get_parameter_names()))
print("parameter_vector:\n{0}\n".format(gp.get_parameter_vector()))
print("parameter_bounds:\n{0}\n".format(gp.get_parameter_bounds()))
print(gp.get_parameter_names())
gp.freeze_parameter("kernel:terms[0]:log_omega0")
print(gp.get_parameter_names())
gp.thaw_parameter("kernel:terms[0]:log_omega0")
print(gp.get_parameter_names())
from scipy.optimize import minimize
def neg_log_like(params, y, gp):
gp.set_parameter_vector(params)
return -gp.log_likelihood(y)
initial_params = gp.get_parameter_vector()
bounds = gp.get_parameter_bounds()
r = minimize(neg_log_like, initial_params, method="L-BFGS-B", bounds=bounds, args=(y, gp))
gp.set_parameter_vector(r.x)
print(r)
gp.get_parameter_dict()
x = np.linspace(0, 10, 5000)
pred_mean, pred_var = gp.predict(y, x, return_var=True)
pred_std = np.sqrt(pred_var)
color = "#ff7f0e"
plt.plot(true_t, true_y, "k", lw=1.5, alpha=0.3)
plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
plt.plot(x, pred_mean, color=color)
plt.fill_between(x, pred_mean+pred_std, pred_mean-pred_std, color=color, alpha=0.3,
edgecolor="none")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim(0, 10)
plt.ylim(-2.5, 2.5);
omega = np.exp(np.linspace(np.log(0.1), np.log(20), 5000))
psd = gp.kernel.get_psd(omega)
plt.plot(omega, psd, color=color)
for k in gp.kernel.terms:
plt.plot(omega, k.get_psd(omega), "--", color=color)
plt.yscale("log")
plt.xscale("log")
plt.xlim(omega[0], omega[-1])
plt.xlabel("$\omega$")
plt.ylabel("$S(\omega)$");
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This plot shows the simulated data as black points with error bars and the true function is shown as a gray line.
Step2: Then we wrap this kernel in a GP object that can be used for computing the likelihood function.
Step3: There is a modeling language built into celerite that will come in handy. Other tutorials will go into more detail but here are some of the features that the modeling language exposes
Step4: You already saw that it is possible to freeze and thaw parameters above but here's what you would do if you wanted to freeze another parameter
Step5: Now we'll use the L-BFGS-B non-linear optimization routine from scipy.optimize to find the maximum likelihood parameters for this model.
Step6: With a small dataset like this, this optimization should have only taken a fraction of a second to converge. The maximum likelihood parameters are the following
Step7: Finally, let's see what the model predicts for the underlying function. A GP model can predict the (Gaussian) conditional (on the observed data) distribution for new observations. Let's do that on a fine grid
Step8: Let's plot this prediction and compare it to the true underlying function.
Step9: In this figure, the 1-sigma prediction is shown as an orange band and the mean prediction is indicated by a solid orange line. Comparing this to the true underlying function (shown as a gray line), we see that the prediction is consistent with the truth at all times and the the uncertainty in the region of missing data increases as expected.
|
759 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
animals = ["Lion", "Tiger", "Monkey", None]
s = pd.Series(animals)
print(s)
print("The name of this Series: ", s.name)
numbers = [1, 2, 3, None]
pd.Series(numbers)
import numpy as np
np.NaN == None
np.NaN == np.NaN
np.isnan(np.NaN)
sports = {'Cricket': 'India', 'Football': 'America', 'Soccer': 'Brazil'}
s = pd.Series(sports)
s
s.index
s = pd.Series(['Cricket', 'Football', 'Soccer'], index = [ 'India', 'America', 'Brazil'])
s
s.iloc[0]
s.loc['America']
s = pd.Series(np.random.randint(0,1000,10000))
s.head()
%%timeit -n 100
summary = 0
for item in s:
summary += item
%%timeit -n 100
np.sum(s)
%%timeit -n 10
s = pd.Series(np.random.randint(0,1000,10000))
for label, value in s.iteritems():
s.set_value(label, value + 2)
%%timeit -n 10
s = pd.Series(np.random.randint(0,1000,10000))
for label, value in s.iteritems():
s.loc[label] = value + 2
%%timeit -n 10
s = pd.Series(np.random.randint(0,1000,10000))
s += 2
s = pd.Series([2,1,2])
s.loc['Animal'] = 'Bear'
s
original_sports = pd.Series({'Archery':'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan'})
cricket_loving_countries = pd.Series(['Australia', 'India', 'England'], index=['Cricket','Cricket','Cricket'])
all_countries = original_sports.append(cricket_loving_countries)
all_countries
original_sports
all_countries['Cricket']
purchase_1 = pd.Series({'Name':'Kasi',
'Item purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name':'Pradeep',
'Item purchased': 'Cat Food',
'Cost': 21.50})
purchase_3 = pd.Series({'Name':'Sri',
'Item purchased': 'Bird Food',
'Cost': 5.50})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store1','Store1','Store2'])
df
print(df.loc['Store2'])
type(df.loc['Store2'])
print(df.loc['Store1'])
type(df.loc['Store1'])
df.T # This essential turns your column names into indicies
df.T.loc['Cost'] # We can then use the loc method
print(df['Item purchased'])
type(df['Item purchased'])
df.loc['Store1']['Cost']
df.loc[:, ['Name','Cost']]
df.drop('Store1')
df.drop('Cost',axis=1)
del df['Item purchased']
df
df['Location'] = None
df
costs = df['Cost']
costs
costs += 2
costs
df
!cat olympics.csv
df = pd.read_csv('olympics.csv')
df.head()
df = pd.read_csv('olympics.csv', index_col=0, skiprows=1)
df.head()
df.columns
df.rename?
for col in df.columns:
if col[:2]=='01': # if the first two letters are '01'
df.rename(columns={col:'Gold'+col[4:]}, inplace=True) #mapping changes labels
if col[:2]=='02':
df.rename(columns={col:'Silver'+col[4:]}, inplace=True)
if col[:2]=='03':
df.rename(columns={col:'Bronze'+col[4:]}, inplace=True)
if col[:1]=='№':
df.rename(columns={col:'#'+col[1:]}, inplace=True)
df.head()
df['Gold']>0
only_gold = df.where(df['Gold']>0)
only_gold.head()
df['Gold'].count()
only_gold['Gold'].count()
only_gold = only_gold.dropna()
only_gold.head()
only_gold = df[df['Gold']>0]
only_gold.head()
#To get the no of countries who recieved at least one gold in Summer or Winter Olympics
len(df[(df['Gold']>0) | df['Gold.1']>0])
#Are there any countries which won a gold in winter olympics but never in summer olympics
df[(df['Gold']==0) & (df['Gold.1']>0)]
df['country'] = df.index
df = df.set_index('Gold')
df.head()
df = df.reset_index()
df.head()
df = pd.read_csv('census.csv')
df.head()
df['SUMLEV'].unique() #40 belongs to state level data and 50 belongs to county level data
df = df[df['SUMLEV']==50]
df.head()
columns_to_keep = ['STNAME',
'CTYNAME',
'BIRTHS2010',
'BIRTHS2011',
'BIRTHS2012',
'BIRTHS2013',
'BIRTHS2014',
'BIRTHS2015',
'POPESTIMATE2010',
'POPESTIMATE2011',
'POPESTIMATE2012',
'POPESTIMATE2013',
'POPESTIMATE2014',
'POPESTIMATE2015'
]
df = df[columns_to_keep]
df.head()
df = df.set_index(['STNAME','CTYNAME'])
df.head()
df.loc['Michigan', 'Washtenaw County']
df.loc[[('Michigan','Washtenaw County'),('Michigan','Wayne County')]]
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
df
df = df.set_index([df.index, 'Name'])
df.index.names = ['Location', 'Name']
df = df.append(pd.Series(data={'Cost': 3.00, 'Item Purchased': 'Kitty Food'}, name=('Store 2', 'Kevyn')))
df
df.reset_index()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A Series is like a cross between a list and a dictionary. The items are stored in an order and there are labels
Step2: Querying a Series
Step3: iloc and loc are not methods, they are attributes.
Step4: Magic functions begin with a percentage sign. If we type % sign and then hit the Tab key, we can see a list of the available magic functions. You could write your own magic functions too, but that's a little bit outside of the scope of this course. We're actually going to use what's called a cellular magic function. These start with two percentage signs and modify a raptor code in the current Jupyter cell. The function we're going to use is called timeit. And as you may have guessed from the name, this function will run our code a few times to determine, on average, how long it takes.
Step5: Related feature in Pandas and NumPy is called broadcasting. With broadcasting, you can apply an operation to every value in the series, changing the series. For instance, if we wanted to increase every random variable by 2, we could do so quickly using the += operator directly on the series object.
Step6: But if you find yourself iterating through a series, you should question whether you're doing things in the best possible way. Here's how we would do this using the series set value method.
Step7: Amazing. Not only is it significantly faster, but it's more concise and maybe even easier to read too. The typical mathematical operations you would expect are vectorized, and the NumPy documentation outlines what it takes to create vectorized functions of your own. One last note on using the indexing operators to access series data. The .loc attribute lets you not only modify data in place, but also add new data as well. If the value you pass in as the index doesn't exist, then a new entry is added. And keep in mind, indices can have mixed types. While it's important to be aware of the typing going on underneath, Pandas will automatically change the underlying NumPy types as appropriate.
Step8: There are a couple of important considerations when using append. First, Pandas is going to take your series and try to infer the best data types to use. In this example, everything is a string, so there's no problems here. Second, the append method doesn't actually change the underlying series. It instead returns a new series which is made up of the two appended together. We can see this by going back and printing the original series of values and seeing that they haven't changed. This is actually a significant issue for new Pandas users who are used to objects being changed in place. So watch out for it, not just with append but with other Pandas functions as well.
Step9: Finally, we see that when we query the appended series for those who have cricket as their national sport, we don't get a single value, but a series itself. This is actually very common, and if you have a relational database background, this is very similar to every table query resulting in a return set which itself is a table.
Step10: What if we want to do column, for example we want to get a list of all the costs?
Step11: Since iloc and loc are used for row selection, the Panda's developers reserved indexing operator directly on the DataFrame for column selection. In a Panda's DataFrame, columns always have a name. So this selection is always label based, not as confusing as it was when using the square bracket operator on the series objects. For those familiar with relational databases, this operator is analogous to column projection.
Step12: Finally, since the result of using the indexing operator is the DataFrame or series, you can chain operations together. For instance, we could have rewritten the query for all Store 1 costs as
Step13: This looks pretty reasonable and gets us the result we wanted. But chaining can come with some costs and is best avoided if you can use another approach. In particular, chaining tends to cause Pandas to return a copy of the DataFrame instead of a view on the DataFrame. For selecting a data, this is not a big deal, though it might be slower than necessary. If you are changing data though, this is an important distinction and can be a source of error.
Step14: So that's selecting and projecting data from a DataFrame based on row and column labels. The key concepts to remember are that the rows and columns are really just for our benefit. Underneath this is just a two axis labeled array, and transposing the columns is easy. Also, consider the issue of chaining carefully, and try to avoid it, it can cause unpredictable results. Where your intent was to obtain a view of the data, but instead Pandas returns to you a copy. In the Panda's world, friends don't let friends chain calls. So if you see it, point it out, and share a less ambiguous solution.
Step15: It's easy to delete data in series and DataFrames, and we can use the drop function to do so. This function takes a single parameter, which is the index or roll label, to drop. This is another tricky place for new users to pandas. The drop function doesn't change the DataFrame by default. And instead, returns to you a copy of the DataFrame with the given rows removed. We can see that our original DataFrame is still intact. This is a very typical pattern in Pandas, where in place changes to a DataFrame are only done if need be, usually on changes involving indices. So it's important to be aware of. Drop has two interesting optional parameters. The first is called in place, and if it's set to true, the DataFrame will be updated in place, instead of a copy being returned. The second parameter is the axis, which should be dropped. By default, this value is 0, indicating the row axis. But you could change it to 1 if you want to drop a column.
Step16: There is a second way to drop a column, however. And that's directly through the use of the indexing operator, using the del keyword. This way of dropping data, however, takes immediate effect on the DataFrame and does not return a view.
Step17: Finally, adding a new column to the DataFrame is as easy as assigning it to some value. For instance, if we wanted to add a new location as a column with default value of none, we could do so by using the assignment operator after the square brackets. This broadcasts the default value to the new column immediately.
Step18: The common work flow is to read your data into a DataFrame then reduce this DataFrame to the particular columns or rows that you're interested in working with. As you've seen, the Panda's toolkit tries to give you views on a DataFrame. This is much faster than copying data and much more memory efficient too. But it does mean that if you're manipulating the data you have to be aware that any changes to the DataFrame you're working on may have an impact on the base data frame you used originally. Here's an example using our same purchasing DataFrame from earlier. We can create a series based on just the cost category using the square brackets.
Step19: Then we can increase the cost in this series using broadcasting.
Step20: Now if we look at our original DataFrame, we see those costs have risen as well. This is an important consideration to watch out for. If you want to explicitly use a copy, then you should consider calling the copy method on the DataFrame for it first.
Step21: A common workflow is to read the dataset in, usually from some external file. We saw previously how you can do this using Python, and lists, and dictionaries. You can imagine how you might use those dictionaries to create a Pandas DataFrame. Thankfully, Pandas has built-in support for delimited files such as CSV files as well as a variety of other data formats including relational databases, Excel, and HTML tables. I've saved a CSV file called olympics.csv, which has data from Wikipedia that contains a summary list of the medal various countries have won at the Olympics. We can take a look at this file using the shell command cat. Which we can invoke directly using the exclamation point. What happens here is that when the Jupyter notebook sees a line beginning with an exclamation mark, it sends the rest of the line to the operating system shell for evaluation. So cat works on Linux and Macs.
Step22: We see from the cat output that there seems to be a numeric list of columns followed by a bunch of column identifiers. The column identifiers have some odd looking characters in them. This is the unicode numero sign, which means number of. Then we have rows of data, all columns separated. We can read this into a DataFrame by calling the read_csv function of the module. When we look at the DataFrame we see that the first cell has an NaN in it since it's an empty value, and the rows have been automatically indexed for us.
Step23: It seems pretty clear that the first row of data in the DataFrame is what we really want to see as the column names. It also seems like the first column in the data is the country name, which we would like to make an index. Read csv has a number of parameters that we can use to indicate to Pandas how rows and columns should be labeled. For instance, we can use the index call to indicate which column should be the index and we can also use the header parameter to indicate which row from the data file should be used as the header.
Step24: Now this data came from the all time Olympic games medal table on Wikipedia. If we head to the page we could see that instead of running gold, silver and bronze in the pages, these nice little icons with a one, a two, and a three in them In our csv file these were represented with the strings 01 !, 02 !, and so on. We see that the column values are repeated which really isn't good practice. Panda's recognize this in a panda.1 and .2 to make things more unique. But this labeling isn't really as clear as it could be, so we should clean up the data file. We can of course do this just by going and editing the CSV file directly, but we can also set the column names using the Pandas name property. Panda stores a list of all of the columns in the .columns attribute.
Step25: We can change the values of the column names by iterating over this list and calling the rename method of the data frame. Here we just iterate through all of the columns looking to see if they start with a 01, 02, 03 or numeric character. If they do, we can call rename and set the column parameters to a dictionary with the keys being the column we want to replace and the value being the new value we want. Here we'll slice some of the old values in two, since we don't want to lose the unique appended values. We'll also set the ever-important in place parameter to true so Pandas knows to update this data frame directly.
Step26: Querying a DataFrame
Step27: The resultant series is indexed where the value of each cell is either true or false depending on whether a country has won at least one gold medal, and the index is the country name.
Step28: We see that the resulting DataFrame keeps the original indexed values, and only data from countries that met the condition are retained. All of the countries which did not meet the condition have NaN data instead. This is okay. Most statistical functions built into the DataFrame object ignore values of NaN.
Step29: Often we want to drop those rows which have no data. To do this, we can use the drop NA function. You can optionally provide drop NA the axis it should be considering. Remember that the axis is just an indicator for the columns or rows and that the default is zero, which means rows.
Step30: Extremely important, and often an issue for new users, is to remember that each Boolean mask needs to be encased in parenthesis because of the order of operations. This can cause no end of frustration if you're not used to it, so be careful.
Step31: You'll see that when we create a new index from an existing column it appears that a new first row has been added with empty values. This isn't quite what's happening. And we know this in part because an empty value is actually rendered either as a none or an NaN if the data type of the column is numeric. What's actually happened is that the index has a name. Whatever the column name was in the Jupiter notebook has just provided this in the output. We can get rid of the index completely by calling the function reset_index. This promotes the index into a column and creates a default numbered index.
Step32: One nice feature of pandas is that it has the option to do multi-level indexing. This is similar to composite keys in relational database systems. To create a multi-level index, we simply call set index and give it a list of columns that we're interested in promoting to an index.
Step33: I often find that I want to see a list of all the unique values in a given column. In this DataFrame, we see that the possible values for the sum level are using the unique function on the DataFrame. This is similar to the SQL distinct operator. Here we can run unique on the sum level of our current DataFrame and see that there are only two different values, 40 and 50.
Step34: Let's get rid of all of the rows that are summaries at the state level and just keep the county data.
Step35: Also while this data set is interesting for a number of different reasons, let's reduce the data that we're going to look at to just the total population estimates and the total number of births. We can do this by creating a list of column names that we want to keep then project those and assign the resulting DataFrame to our df variable.
Step36: The US Census data breaks down estimates of population data by state and county. We can load the data and set the index to be a combination of the state and county values and see how pandas handles it in a DataFrame. We do this by creating a list of the column identifiers we want to have indexed. And then calling set index with this list and assigning the output as appropriate. We see here that we have a dual index, first the state name and then the county name.
Step37: An immediate question which comes up is how we can query this DataFrame. For instance, we saw previously that the loc attribute of the DataFrame can take multiple arguments. And it could query both the row and the columns. When you use a MultiIndex, you must provide the arguments in order by the level you wish to query. Inside of the index, each column is called a level and the outermost column is level zero. For instance, if we want to see the population results from Washtenaw County, you'd want to the first argument as the state of Michigan.
Step38: You might be interested in just comparing two counties. For instance, Washtenaw and Wayne County which covers Detroit. To do this, we can pass the loc method, a list of tuples which describe the indices we wish to query. Since we have a MultiIndex of two values, the state and the county, we need to provide two values as each element of our filtering list.
Step39: Okay so that's how hierarchical indices work in a nutshell. They're a special part of the pandas library which I think can make management and reasoning about data easier. Of course hierarchical labeling isn't just for rows. For example, you can transpose this matrix and now have hierarchical column labels. And projecting a single column which has these labels works exactly the way you would expect it to.
Step40: Reindex the purchase records DataFrame to be indexed hierarchically, first by store, then by
Step41: If we want we can also reset the index as columns as follows
|
760 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
from IPython.html.widgets import interact, interactive, fixed
from plotting_function import plotter
f = open('two_star_test_sol+ic.npz','r')
r = np.load('two_star_test_sol+ic.npz')
sol_test = r['arr_0']
ic_test = r['arr_1']
f.close()
interact(plotter,ic=fixed(ic_test),sol=fixed(sol_test),n=(0,len(np.linspace(0,1,80))-1,1));
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading data back from npz file
Step2: I use interact on my plotter function to plot the positions of the stars and galaxies in my system at every time value, with a slider to choose which time value to view
|
761 | <ASSISTANT_TASK:>
Python Code:
from mnist_model import mnist_model
from keras_tqdm import TQDMCallback, TQDMNotebookCallback
mnist_model(0, [TQDMNotebookCallback()])
mnist_model(0, [TQDMCallback()])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TQDM Progress Bar (ipywidget)
Step2: TQDM Progress Bar (text)
|
762 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from numpy.random import randn
np.random.seed(101)
df = pd.DataFrame(randn(5, 4),
index = 'A B C D E'.split(),
columns = 'W X Y Z'.split())
df
df['W']
# Pass a list of column names
df[['W', 'Z']]
# SQL Syntax (NOT RECOMMENDED!)
df.W
type(df['W'])
df['new'] = df['W'] + df['Y']
df
df.drop('new',
axis = 1)
# Not inplace unless specified!
df
df.drop('new',
axis = 1,
inplace = True)
df
df.drop('E',
axis = 0)
df.loc['A']
df.iloc[2]
df.loc['B', 'Y']
df.loc[['A', 'B'],
['W', 'Y']]
df
df > 0
df[df > 0]
df[df['W'] > 0]
df[df['W'] > 0]['Y']
df[df['W'] > 0][['Y', 'X']]
df[(df['W'] > 0) & (df['Y'] > 1)]
df
# Reset to default 0,1...n index
df.reset_index()
newind = 'CA NY WY OR CO'.split()
df['States'] = newind
df
df.set_index('States')
df
df.set_index('States',
inplace = True)
df
# Index Levels
outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
inside = [1, 2 , 3, 1, 2, 3]
hier_index = list(zip(outside,inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
hier_index
df = pd.DataFrame(np.random.randn(6, 2),
index=hier_index,columns = ['A', 'B'])
df
df.loc['G1']
df.loc['G1'].loc[1]
df.index.names
df.index.names = ['Group', 'Num']
df
df.xs('G1')
df.xs(['G1', 1])
df.xs(1,
level = 'Num')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Selection and Indexing
Step2: DataFrame Columns are just Series
Step3: Creating a new column
Step4: Removing Columns
Step5: Can also drop rows this way
Step6: Selecting Rows
Step7: Or select based off of position instead of label
Step8: Selecting subset of rows and columns
Step9: Conditional Selection
Step10: For two conditions you can use | and & with parenthesis
Step11: More Index Details
Step12: Multi-Index and Index Hierarchy
Step13: Now let's show how to index this! For index hierarchy we use df.loc[], if this was on the columns axis, you would just use normal bracket notation df[]. Calling one level of the index returns the sub-dataframe
|
763 | <ASSISTANT_TASK:>
Python Code:
# 1. Input model parameters
parameters = pd.Series()
parameters['rhoa'] = .9
parameters['sigma'] = 0.001
print(parameters)
# 2. Define a function that evaluates the equilibrium conditions
def equilibrium_equations(variables_forward,variables_current,parameters):
# Parameters
p = parameters
# Variables
fwd = variables_forward
cur = variables_current
# Exogenous tfp
tfp_proc = p.rhoa*np.log(cur.a) - np.log(fwd.a)
# Stack equilibrium conditions into a numpy array
return np.array([
tfp_proc
])
# 3. Initialize the model
model = ls.model(equations = equilibrium_equations,
nstates=1,
varNames=['a'],
shockNames=['eA'],
parameters = parameters)
# 4. Have linearsolve compute the steady state numerically
guess = [1]
model.compute_ss(guess)
print(model.ss)
# 5. Find the log-linear approximation around the non-stochastic steady state and solve
model.approximate_and_solve()
# 6 (a) Compute impulse responses
model.impulse(T=41,t0=5,shock=None)
print(model.irs['eA'].head(10))
# 6 (b) Plot impulse responses
model.irs['eA'][['eA','a']].plot(lw='5',alpha=0.5,grid=True).legend(loc='upper right',ncol=2)
# 6(c) Compute stochastic simulation
model.stoch_sim(seed=192,covMat= [[parameters['sigma']]])
print(model.simulated.head(10))
# 6(d) Plot stochastic simulation
model.simulated[['eA','a']].plot(lw='5',alpha=0.5,grid=True).legend(loc='upper right',ncol=2)
# 1. Input model parameters
parameters = pd.Series()
parameters['rhoa'] = .9
parameters['sigma'] = 0.01
parameters['alpha'] = 0.35
parameters['delta'] = 0.025
parameters['s'] = 0.15
print(parameters)
# 2. Define a function that evaluates the equilibrium conditions
def equilibrium_equations(variables_forward,variables_current,parameters):
# Parameters
p = parameters
# Variables
fwd = variables_forward
cur = variables_current
# Production function
prod_fn = cur.a*cur.k**p.alpha - cur.y
# Capital evolution
capital_evolution = p.s*cur.a*cur.k**p.alpha + (1 - p.delta)*cur.k - fwd.k
# Exogenous tfp
tfp_proc = p.rhoa*np.log(cur.a) - np.log(fwd.a)
# Stack equilibrium conditions into a numpy array
return np.array([
prod_fn,
capital_evolution,
tfp_proc
])
# 3. Initialize the model
model = ls.model(equations = equilibrium_equations,
nstates=2,
varNames=['a','k','y'], # Any order as long as the state variables are named first
shockNames=['eA','eK'], # Name a shock for each state variable *even if there is no corresponding shock in the model*
parameters = parameters)
# 4. Have linearsolve compute the steady state numerically
guess = [1,4,1]
model.compute_ss(guess)
# 5. Find the log-linear approximation around the non-stochastic steady state and solve
model.approximate_and_solve()
# Print the coeficient matrix P
print(model.p)
# Print the coeficient matrix F
print(model.f)
# 6 (a) Compute impulse responses and print the computed impulse responses
model.impulse(T=41,t0=5,shock=None)
print(model.irs['eA'].head(10))
# 6(b) Plot the computed impulse responses to a TFP shock
fig = plt.figure(figsize=(12,4))
ax1 = fig.add_subplot(1,2,1)
model.irs['eA'][['a','y','k']].plot(lw='5',alpha=0.5,grid=True,ax = ax1).legend(loc='upper right',ncol=2)
ax2 = fig.add_subplot(1,2,2)
model.irs['eA'][['eA','a']].plot(lw='5',alpha=0.5,grid=True,ax = ax2).legend(loc='upper right',ncol=2)
# 6(c) Compute stochastic simulation and print the simulated values
model.stoch_sim(seed=192,covMat= [[parameters['sigma'],0],[0,0]])
print(model.simulated.head(10))
# 6(d) Plot the computed stochastic simulation
fig = plt.figure(figsize=(12,4))
ax1 = fig.add_subplot(1,2,1)
model.simulated[['a','y','k']].plot(lw='5',alpha=0.5,grid=True,ax = ax1).legend(loc='upper right',ncol=3)
ax2 = fig.add_subplot(1,2,2)
model.simulated[['eA','a']].plot(lw='5',alpha=0.5,grid=True,ax = ax2).legend(loc='upper right',ncol=2)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 2
Step2: The previous step constructs a log-linear approximation of the model and then solves for the endogenous variables as functions of the state variables and exogenous shocks only
|
764 | <ASSISTANT_TASK:>
Python Code:
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 測試資料:
Step2: (三)FeatureUnionc
Step3: (四)找到最佳的結果
|
765 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
np.__version__
author = 'kyubyong. longinglove@nate.com'
x = np.array([[1,4],[3,1]])
out = np.sort(x, axis=1)
x.sort(axis=1)
assert np.array_equal(out, x)
print out
surnames = ('Hertz', 'Galilei', 'Hertz')
first_names = ('Heinrich', 'Galileo', 'Gustav')
print np.lexsort((first_names, surnames))
x = np.array([[1,4],[3,1]])
out = np.argsort(x, axis=1)
print out
x = np.random.permutation(10)
print "x =", x
print "\nCheck the fifth element of this new array is 5, the first four elements are all smaller than 5, and 6th through the end are bigger than 5\n",
out = np.partition(x, 5)
x.partition(5) # in-place equivalent
assert np.array_equal(x, out)
print out
x = np.random.permutation(10)
print "x =", x
partitioned = np.partition(x, 3)
indices = np.argpartition(x, 3)
print "partitioned =", partitioned
print "indices =", partitioned
assert np.array_equiv(x[indices], partitioned)
x = np.random.permutation(10).reshape(2, 5)
print "x =", x
print "maximum values =", np.max(x, 1)
print "max indices =", np.argmax(x, 1)
print "minimum values =", np.min(x, 1)
print "min indices =", np.argmin(x, 1)
x = np.array([[np.nan, 4], [3, 2]])
print "maximum values ignoring NaNs =", np.nanmax(x, 1)
print "max indices =", np.nanargmax(x, 1)
print "minimum values ignoring NaNs =", np.nanmin(x, 1)
print "min indices =", np.nanargmin(x, 1)
x = np.array([[1, 2, 3], [1, 3, 5]])
print "Values bigger than 2 =", x[x>2]
print "Their indices are ", np.nonzero(x > 2)
assert np.array_equiv(x[x>2], x[np.nonzero(x > 2)])
assert np.array_equiv(x[x>2], np.extract(x > 2, x))
x = np.array([[1, 2, 3], [1, 3, 5]])
print np.flatnonzero(x>2)
assert np.array_equiv(np.flatnonzero(x), x.ravel().nonzero())
x = np.arange(-5, 4).reshape(3, 3)
print np.where(x <0, 0, x)
x = [1, 3, 5, 7, 9]
y = [0, 4, 2, 6]
np.searchsorted(x, y)
x = [[0,1,7,0,0],[3,0,0,2,19]]
print np.count_nonzero(x)
assert np.count_nonzero(x) == len(x[x!=0])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sorting
Step2: Q2. Sort pairs of surnames and first names and return their indices. (first by surname, then by name).
Step3: Q3. Get the indices that would sort x along the second axis.
Step4: Q4. Create an array such that its fifth element would be the same as the element of sorted x, and it divide other elements by their value.
Step5: Q5. Create the indices of an array such that its third element would be the same as the element of sorted x, and it divide other elements by their value.
Step6: Searching
Step7: Q7. Get the maximum and minimum values and their indices of x along the second axis, ignoring NaNs.
Step8: Q8. Get the values and indices of the elements that are bigger than 2 in x.
Step9: Q9. Get the indices of the elements that are bigger than 2 in the flattend x.
Step10: Q10. Check the elements of x and return 0 if it is less than 0, otherwise the element itself.
Step11: Q11. Get the indices where elements of y should be inserted to x to maintain order.
Step12: Counting
|
766 | <ASSISTANT_TASK:>
Python Code:
# To use the latest publish `kubeflow-metadata` library, you can run:
!pip install kubeflow-metadata --user
# Install other packages:
!pip install pandas --user
# Then restart the Notebook kernel.
import pandas
from kubeflow.metadata import metadata
from datetime import datetime
from uuid import uuid4
METADATA_STORE_HOST = "metadata-grpc-service.kubeflow" # default DNS of Kubeflow Metadata gRPC serivce.
METADATA_STORE_PORT = 8080
ws1 = metadata.Workspace(
# Connect to metadata service in namespace kubeflow in k8s cluster.
store=metadata.Store(grpc_host=METADATA_STORE_HOST, grpc_port=METADATA_STORE_PORT),
name="xgboost-synthetic",
description="workspace for xgboost-synthetic artifacts and executions",
labels={"n1": "v1"})
r = metadata.Run(
workspace=ws1,
name="xgboost-synthetic-faring-run" + datetime.utcnow().isoformat("T") ,
description="a notebook run",
)
exec = metadata.Execution(
name = "execution" + datetime.utcnow().isoformat("T") ,
workspace=ws1,
run=r,
description="execution for training xgboost-synthetic",
)
print("An execution was created with id %s" % exec.id)
date_set_version = "data_set_version_" + str(uuid4())
data_set = exec.log_input(
metadata.DataSet(
description="xgboost synthetic data",
name="synthetic-data",
owner="someone@kubeflow.org",
uri="file://path/to/dataset",
version="v1.0.0",
query="SELECT * FROM mytable"))
print("Data set id is {0.id} with version '{0.version}'".format(data_set))
model_version = "model_version_" + str(uuid4())
model = exec.log_output(
metadata.Model(
name="MNIST",
description="model to recognize handwritten digits",
owner="someone@kubeflow.org",
uri="gcs://my-bucket/mnist",
model_type="neural network",
training_framework={
"name": "tensorflow",
"version": "v1.0"
},
hyperparameters={
"learning_rate": 0.5,
"layers": [10, 3, 1],
"early_stop": True
},
version=model_version,
labels={"mylabel": "l1"}))
print(model)
print("\nModel id is {0.id} and version is {0.version}".format(model))
metrics = exec.log_output(
metadata.Metrics(
name="MNIST-evaluation",
description="validating the MNIST model to recognize handwritten digits",
owner="someone@kubeflow.org",
uri="gcs://my-bucket/mnist-eval.csv",
data_set_id=str(data_set.id),
model_id=str(model.id),
metrics_type=metadata.Metrics.VALIDATION,
values={"accuracy": 0.95},
labels={"mylabel": "l1"}))
print("Metrics id is %s" % metrics.id)
serving_application = metadata.Execution(
name="serving model",
workspace=ws1,
description="an execution to represent model serving component",
)
# Noticed we use model name, version, uri to uniquely identify existing model.
served_model = metadata.Model(
name="MNIST",
uri="gcs://my-bucket/mnist",
version=model.version,
)
m=serving_application.log_input(served_model)
print("Found the mode with id {0.id} and version '{0.version}'.".format(m))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a new Workspace and Run in a workspace
Step2: Create an execution in a run
Step3: Log a data set and a model
Step4: A Log_output log an artifact as a output of this execution. Here exec.log_output accept an artifact class as an argument, a Model is an artifact. Every artifacts has different paramenters such as name, uri, hyperparameters. The way to create Model artifact is calling ready-to-use APIs metadata.Model and provide arguments
Step5: Log the evaluation of a model
Step6: Add Metadata for serving the model
|
767 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bcc', 'sandbox-3', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
768 | <ASSISTANT_TASK:>
Python Code:
# change these to try this notebook out
PROJECT = <YOUR PROJECT>
BUCKET = <YOUR PROJECT>
REGION = <YOUR REGION>
import os
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = "2.1"
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
!gsutil ls gs://$BUCKET/taxifare/data
ls ./taxifare/trainer/
%%writefile ./taxifare/trainer/model.py
#TODO 1
import datetime
import logging
import os
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import callbacks
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow import feature_column as fc
logging.info(tf.version.VERSION)
CSV_COLUMNS = [
'fare_amount',
'pickup_datetime',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'passenger_count',
'key',
]
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], ['na'], [0.0], [0.0], [0.0], [0.0], [0.0], ['na']]
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
def features_and_labels(row_data):
for unwanted_col in ['key']:
row_data.pop(unwanted_col)
label = row_data.pop(LABEL_COLUMN)
return row_data, label
def load_dataset(pattern, batch_size, num_repeat):
dataset = tf.data.experimental.make_csv_dataset(
file_pattern=pattern,
batch_size=batch_size,
column_names=CSV_COLUMNS,
column_defaults=DEFAULTS,
num_epochs=num_repeat,
)
return dataset.map(features_and_labels)
def create_train_dataset(pattern, batch_size):
dataset = load_dataset(pattern, batch_size, num_repeat=None)
return dataset.prefetch(1)
def create_eval_dataset(pattern, batch_size):
dataset = load_dataset(pattern, batch_size, num_repeat=1)
return dataset.prefetch(1)
def parse_datetime(s):
if type(s) is not str:
s = s.numpy().decode('utf-8')
return datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S %Z")
def euclidean(params):
lon1, lat1, lon2, lat2 = params
londiff = lon2 - lon1
latdiff = lat2 - lat1
return tf.sqrt(londiff*londiff + latdiff*latdiff)
def get_dayofweek(s):
ts = parse_datetime(s)
return DAYS[ts.weekday()]
@tf.function
def dayofweek(ts_in):
return tf.map_fn(
lambda s: tf.py_function(get_dayofweek, inp=[s], Tout=tf.string),
ts_in
)
@tf.function
def fare_thresh(x):
return 60 * activations.relu(x)
def transform(inputs, NUMERIC_COLS, STRING_COLS, nbuckets):
# Pass-through columns
transformed = inputs.copy()
del transformed['pickup_datetime']
feature_columns = {
colname: fc.numeric_column(colname)
for colname in NUMERIC_COLS
}
# Scaling longitude from range [-70, -78] to [0, 1]
for lon_col in ['pickup_longitude', 'dropoff_longitude']:
transformed[lon_col] = layers.Lambda(
lambda x: (x + 78)/8.0,
name='scale_{}'.format(lon_col)
)(inputs[lon_col])
# Scaling latitude from range [37, 45] to [0, 1]
for lat_col in ['pickup_latitude', 'dropoff_latitude']:
transformed[lat_col] = layers.Lambda(
lambda x: (x - 37)/8.0,
name='scale_{}'.format(lat_col)
)(inputs[lat_col])
# Adding Euclidean dist (no need to be accurate: NN will calibrate it)
transformed['euclidean'] = layers.Lambda(euclidean, name='euclidean')([
inputs['pickup_longitude'],
inputs['pickup_latitude'],
inputs['dropoff_longitude'],
inputs['dropoff_latitude']
])
feature_columns['euclidean'] = fc.numeric_column('euclidean')
# hour of day from timestamp of form '2010-02-08 09:17:00+00:00'
transformed['hourofday'] = layers.Lambda(
lambda x: tf.strings.to_number(
tf.strings.substr(x, 11, 2), out_type=tf.dtypes.int32),
name='hourofday'
)(inputs['pickup_datetime'])
feature_columns['hourofday'] = fc.indicator_column(
fc.categorical_column_with_identity(
'hourofday', num_buckets=24))
latbuckets = np.linspace(0, 1, nbuckets).tolist()
lonbuckets = np.linspace(0, 1, nbuckets).tolist()
b_plat = fc.bucketized_column(
feature_columns['pickup_latitude'], latbuckets)
b_dlat = fc.bucketized_column(
feature_columns['dropoff_latitude'], latbuckets)
b_plon = fc.bucketized_column(
feature_columns['pickup_longitude'], lonbuckets)
b_dlon = fc.bucketized_column(
feature_columns['dropoff_longitude'], lonbuckets)
ploc = fc.crossed_column(
[b_plat, b_plon], nbuckets * nbuckets)
dloc = fc.crossed_column(
[b_dlat, b_dlon], nbuckets * nbuckets)
pd_pair = fc.crossed_column([ploc, dloc], nbuckets ** 4)
feature_columns['pickup_and_dropoff'] = fc.embedding_column(
pd_pair, 100)
return transformed, feature_columns
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_dnn_model(nbuckets, nnsize, lr):
# TODO
# input layer is all float except for pickup_datetime which is a string
STRING_COLS = ['pickup_datetime']
NUMERIC_COLS = (
set(CSV_COLUMNS) - set([LABEL_COLUMN, 'key']) - set(STRING_COLS)
)
inputs = {
colname: layers.Input(name=colname, shape=(), dtype='float32')
for colname in NUMERIC_COLS
}
inputs.update({
colname: layers.Input(name=colname, shape=(), dtype='string')
for colname in STRING_COLS
})
# transforms
transformed, feature_columns = transform(
inputs, NUMERIC_COLS, STRING_COLS, nbuckets=nbuckets)
dnn_inputs = layers.DenseFeatures(feature_columns.values())(transformed)
x = dnn_inputs
for layer, nodes in enumerate(nnsize):
x = layers.Dense(nodes, activation='relu', name='h{}'.format(layer))(x)
output = layers.Dense(1, name='fare')(x)
model = models.Model(inputs, output)
lr_optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(optimizer=lr_optimizer, loss='mse', metrics=[rmse, 'mse'])
return model
def train_and_evaluate(hparams):
batch_size = hparams['batch_size'] # TODO
nbuckets = hparams['nbuckets'] # TODO
lr = hparams['lr'] # TODO
nnsize = hparams['nnsize']
eval_data_path = hparams['eval_data_path']
num_evals = hparams['num_evals']
num_examples_to_train_on = hparams['num_examples_to_train_on']
output_dir = hparams['output_dir']
train_data_path = hparams['train_data_path']
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
savedmodel_dir = os.path.join(output_dir, 'export/savedmodel')
model_export_path = os.path.join(savedmodel_dir, timestamp)
checkpoint_path = os.path.join(output_dir, 'checkpoints')
tensorboard_path = os.path.join(output_dir, 'tensorboard')
if tf.io.gfile.exists(output_dir):
tf.io.gfile.rmtree(output_dir)
model = build_dnn_model(nbuckets, nnsize, lr)
logging.info(model.summary())
trainds = create_train_dataset(train_data_path, batch_size)
evalds = create_eval_dataset(eval_data_path, batch_size)
steps_per_epoch = num_examples_to_train_on // (batch_size * num_evals)
checkpoint_cb = callbacks.ModelCheckpoint(
checkpoint_path,
save_weights_only=True,
verbose=1
)
tensorboard_cb = callbacks.TensorBoard(tensorboard_path)
history = model.fit(
trainds,
validation_data=evalds,
epochs=num_evals,
steps_per_epoch=max(1, steps_per_epoch),
verbose=2, # 0=silent, 1=progress bar, 2=one line per epoch
callbacks=[checkpoint_cb, tensorboard_cb]
)
# Exporting the model with default serving function.
tf.saved_model.save(model, model_export_path)
return history
%%writefile taxifare/trainer/task.py
# TODO 1
import argparse
from trainer import model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--batch_size",
help="Batch size for training steps",
type=int,
default=32
)
parser.add_argument(
"--eval_data_path",
help="GCS location pattern of eval files",
required=True
)
parser.add_argument(
"--nnsize",
help="Hidden layer sizes (provide space-separated sizes)",
nargs="+",
type=int,
default=[32, 8]
)
parser.add_argument(
"--nbuckets",
help="Number of buckets to divide lat and lon with",
type=int,
default=10
)
parser.add_argument(
"--lr",
help = "learning rate for optimizer",
type = float,
default = 0.001
)
parser.add_argument(
"--num_evals",
help="Number of times to evaluate model on eval data training.",
type=int,
default=5
)
parser.add_argument(
"--num_examples_to_train_on",
help="Number of examples to train on.",
type=int,
default=100
)
parser.add_argument(
"--output_dir",
help="GCS location to write checkpoints and export models",
required=True
)
parser.add_argument(
"--train_data_path",
help="GCS location pattern of train files containing eval URLs",
required=True
)
parser.add_argument(
"--job-dir",
help="this model ignores this field, but it is required by gcloud",
default="junk"
)
args = parser.parse_args()
hparams = args.__dict__
hparams.pop("job-dir", None)
model.train_and_evaluate(hparams)
%%bash
EVAL_DATA_PATH=./taxifare/tests/data/taxi-valid*
TRAIN_DATA_PATH=./taxifare/tests/data/taxi-train*
OUTPUT_DIR=./taxifare-model
test ${OUTPUT_DIR} && rm -rf ${OUTPUT_DIR}
export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
python3 -m trainer.task \
--eval_data_path $EVAL_DATA_PATH \
--output_dir $OUTPUT_DIR \
--train_data_path $TRAIN_DATA_PATH \
--batch_size 5 \
--num_examples_to_train_on 100 \
--num_evals 1 \
--nbuckets 10 \
--lr 0.001 \
--nnsize 32 8
%%bash
# TODO 2
# Output directory and jobID
OUTDIR=gs://${BUCKET}/taxifare/trained_model_$(date -u +%y%m%d_%H%M%S)
JOBID=taxifare_$(date -u +%y%m%d_%H%M%S)
echo ${OUTDIR} ${REGION} ${JOBID}
gsutil -m rm -rf ${OUTDIR}
# Model and training hyperparameters
BATCH_SIZE=50
NUM_EXAMPLES_TO_TRAIN_ON=100
NUM_EVALS=100
NBUCKETS=10
LR=0.001
NNSIZE="32 8"
# GCS paths
GCS_PROJECT_PATH=gs://$BUCKET/taxifare
DATA_PATH=$GCS_PROJECT_PATH/data
TRAIN_DATA_PATH=$DATA_PATH/taxi-train*
EVAL_DATA_PATH=$DATA_PATH/taxi-valid*
gcloud ai-platform jobs submit training $JOBID \
--module-name=trainer.task \
--package-path=taxifare/trainer \
--staging-bucket=gs://${BUCKET} \
--python-version=3.7 \
--runtime-version=${TFVERSION} \
--region=${REGION} \
-- \
--eval_data_path $EVAL_DATA_PATH \
--output_dir $OUTDIR \
--train_data_path $TRAIN_DATA_PATH \
--batch_size $BATCH_SIZE \
--num_examples_to_train_on $NUM_EXAMPLES_TO_TRAIN_ON \
--num_evals $NUM_EVALS \
--nbuckets $NBUCKETS \
--lr $LR \
--nnsize $NNSIZE
%%writefile ./taxifare/Dockerfile
FROM gcr.io/deeplearning-platform-release/tf2-cpu
COPY . /code
RUN apt-get update && \
apt-get install --yes python3-pip && \
pip3 install /code
RUN python3 -m pip install --upgrade --quiet cloudml-hypertune
ENTRYPOINT ["python3", "/code/trainer/task.py"]
!gcloud auth configure-docker
%%bash
PROJECT_DIR=$(cd ./taxifare && pwd)
PROJECT_ID=$(gcloud config list project --format "value(core.project)")
IMAGE_NAME=taxifare_training_container
DOCKERFILE=$PROJECT_DIR/Dockerfile
IMAGE_URI=gcr.io/$PROJECT_ID/$IMAGE_NAME
docker build $PROJECT_DIR -f $DOCKERFILE -t $IMAGE_URI
docker push $IMAGE_URI
%%bash
PROJECT_ID=$(gcloud config list project --format "value(core.project)")
BUCKET=$PROJECT_ID
REGION="us-central1"
# Output directory and jobID
OUTDIR=gs://${BUCKET}/taxifare/trained_model
JOBID=taxifare_container_$(date -u +%y%m%d_%H%M%S)
echo ${OUTDIR} ${REGION} ${JOBID}
gsutil -m rm -rf ${OUTDIR}
# Model and training hyperparameters
BATCH_SIZE=50
NUM_EXAMPLES_TO_TRAIN_ON=100
NUM_EVALS=100
NBUCKETS=10
NNSIZE="32 8"
# AI-Platform machines to use for training
MACHINE_TYPE=n1-standard-4
SCALE_TIER=CUSTOM
# GCS paths.
GCS_PROJECT_PATH=gs://$BUCKET/taxifare
DATA_PATH=$GCS_PROJECT_PATH/data
TRAIN_DATA_PATH=$DATA_PATH/taxi-train*
EVAL_DATA_PATH=$DATA_PATH/taxi-valid*
IMAGE_NAME=taxifare_training_container
IMAGE_URI=gcr.io/$PROJECT_ID/$IMAGE_NAME
gcloud beta ai-platform jobs submit training $JOBID \
--staging-bucket=gs://$BUCKET \
--region=$REGION \
--master-image-uri=$IMAGE_URI \
--master-machine-type=$MACHINE_TYPE \
--scale-tier=$SCALE_TIER \
-- \
--eval_data_path $EVAL_DATA_PATH \
--output_dir $OUTDIR \
--train_data_path $TRAIN_DATA_PATH \
--batch_size $BATCH_SIZE \
--num_examples_to_train_on $NUM_EXAMPLES_TO_TRAIN_ON \
--num_evals $NUM_EVALS \
--nbuckets $NBUCKETS \
--nnsize $NNSIZE
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make code compatible with AI Platform Training Service
Step2: Move code into a python package
Step3: Paste existing code into model.py
Step4: Modify code to read data from and write checkpoint files to GCS
Step5: Run trainer module package locally
Step6: Run your training package on Cloud AI Platform
Step7: (Optional) Run your training package using Docker container
Step8: Remark
|
769 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
data = {'name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'year': [2012, 2012, 2013, 2014, 2014],
'reports': [4, 24, 31, 2, 3],
'coverage': [25, 94, 57, 62, 70]}
df = pd.DataFrame(data, index = ['Cochice', 'Pima', 'Santa Cruz', 'Maricopa', 'Yuma'])
df
df['name']
df[['name', 'reports']]
df[:2]
df[df['coverage'] > 50]
df[(df['coverage'] > 50) & (df['reports'] < 4)]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Dataframe
Step2: View Column
Step3: View Two Columns
Step4: View First Two Rows
Step5: View Rows Where Coverage Is Greater Than 50
Step6: View Rows Where Coverage Is Greater Than 50 And Reports Less Than 4
|
770 | <ASSISTANT_TASK:>
Python Code:
# packages for downloading the data
import os
import urllib
# packages for munging, plotting, machine learning
import pandas as pd
import numpy as np
import warnings
# xgboost uses the deprecated sklearn.cross_validate module, but we don't depend on it
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import xgboost
import matplotlib.pyplot as plt
%matplotlib inline
# package to help with parallelization
from concurrent import futures
# packages for CivisML
import civis
from civis.ml import ModelPipeline
print("Pandas version: {}".format(pd.__version__))
print("Civis version: {}".format(civis.__version__))
print("Xgboost version: {}".format(xgboost.__version__))
%%time
DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data"
if not os.path.isfile('breast_cancer_data.csv'):
print("Fetching data...")
opener = urllib.request.URLopener()
opener.retrieve(DATA_URL, "breast_cancer_data.csv")
print("File Size: {:.1f} KB".format(os.path.getsize('breast_cancer_data.csv') / (1024*100)))
print("Finished")
else:
print("breast_cancer_data.csv already downloaded")
COL_NAMES = ['sample_id', 'clump_thickness', 'uniformity_cell_size', 'uniformity_cell_shape',
'marginal_adhesion', 'epithelial_cell_size', 'bare_nuclei', 'bland_chromatin',
'normal_nucleoli', 'mitoses', 'is_cancer']
df = pd.read_csv("breast_cancer_data.csv", names=COL_NAMES, na_values='?')
print("Shape: ", df.shape)
df['is_cancer'] = [1 if val == 4 else (0 if val == 2 else np.nan) for val in df['is_cancer'] ]
# df.head()
workflows = ['sparse_logistic',
'random_forest_classifier',
'extra_trees_classifier']
models = []
for wf in workflows:
model = ModelPipeline(model=wf,
dependent_variable="is_cancer",
model_name=wf)
models.append(model)
cv_params = {'learning_rate': [0.01, 0.2],
'n_estimators': [50, 200],
'max_depth': [1, 2]}
model = ModelPipeline(model='gradient_boosting_classifier',
dependent_variable="is_cancer",
cross_validation_parameters=cv_params,
model_name='best_gbc_from_hyperparam_search')
models.append(model)
xgb = xgboost.XGBClassifier(n_estimators=100, learning_rate=0.01)
model = ModelPipeline(model=xgb,
dependent_variable='is_cancer',
model_name='xgb_classifier')
models.append(model)
%%time
future_jobs = [model.train(df=df) for model in models]
futures.wait(future_jobs)
all_success = all([model_fut.succeeded() for model_fut in future_jobs])
print("All successful: {}".format(all_success))
def extract_roc(fut_job):
metrics = fut_job.metrics
return metrics['roc_curve']['fpr'], metrics['roc_curve']['tpr'], metrics['roc_auc']
def plot_one_roc_curve(ax, model_name, fpr, tpr, roc_auc):
ax.set_ylabel("True Positive Rate")
ax.set_xlabel("False Positive Rate")
ax.set_xlim([-0.02, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_title(model_name)
ax.plot(fpr, tpr, color='blue', label="Area: {:.4f}".format(roc_auc))
ax.plot([0,1], [0,1], 'k--')
ax.legend(loc="lower right")
rocs = [extract_roc(fut_job) for fut_job in future_jobs]
model_names = [model.model_name for model in models]
fig, axes = plt.subplots(2, 3, sharex="col", sharey="row", figsize=(12,8))
fig.suptitle("Model Predictions Compared")
fig.subplots_adjust(hspace=0.4)
# Last axis object has no data, hide it
axes[-1, -1].axis('off')
axes = axes.ravel()[:5]
assert len(rocs) == len(model_names)
assert len(axes) == len(model_names)
for ax, model_name, roc in zip(axes, model_names, rocs):
fpr, tpr, roc_auc = roc
plot_one_roc_curve(ax, model_name, fpr, tpr, roc_auc);
test_set = df.sample(frac=0.2, random_state=47)
%%time
future_scores = [model.predict(df=test_set) for model in models]
futures.wait(future_scores)
all_success_scoring = all([score_fut.succeeded() for score_fut in future_scores])
print("All successful scoring: {}".format(all_success_scoring))
score_tables = [score_job.table for score_job in future_scores]
score_tables[0].head()
extra_trees_job_id = future_jobs[2].job_id
extra_trees_run_id = future_jobs[2].run_id
print("Job Id: {}".format(extra_trees_job_id))
print("Run Id: {}".format(extra_trees_run_id))
loaded_model = ModelPipeline.from_existing(extra_trees_job_id, extra_trees_run_id)
loaded_fut = loaded_model.predict(df=test_set)
print(loaded_fut.result()['state'])
loaded_fut.table.head()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Downloading Data
Step2: Munging Data
Step3: Training Models
Step4: Trying different hyperparameters
Step5: Creating your own estimator
Step6: Great! In about fifteen lines of code I've queued up about a dozen models, and can train them all in a loop. On the backend, CivisML will make it really simple to kick off parallel jobs for each of these models. Creating a ModelPipeline is much like creating a scikit-learn model, in that it doesn't cause anything to happen computationally until the user calls "train" or "predict."
Step7: The output of model.train() is a futures object. Calling .succeeded() on it will allow me to check the status of my job.
Step8: Visualizing Results
Step9: Scoring New, Unlabeled Data
Step10: Like training jobs, scoring jobs can also be fired off in parallel for some sweet speedy scoring goodness
Step11: Again, let's make sure that all our scoring jobs completed successfully, and re-run if necessary!
Step12: And now the most important part--getting predictions from my model!
Step13: Model Persistence
Step14: from_existing takes job_id and run_id, and reloads the trained model
|
771 | <ASSISTANT_TASK:>
Python Code:
class RevealAccess(object):
A data descriptor that sets and returns values
normally and prints a message logging their access.
Descriptor Example:
https://docs.python.org/3/howto/descriptor.html
def __init__(self, initval=None, name='var'):
self.val = initval
self.name = name
def __get__(self, obj, objtype):
print('Retrieving', self.name)
return self.val
def __set__(self, obj, val):
print('Updating', self.name)
self.val = val
class MyClass(object):
x = RevealAccess(10, 'var "x"')
y = 5
# Let's test...
m1 = MyClass()
m2 = MyClass()
print("m1.x: ", m1.x)
m1.x = 20
m2.x = 10
print("m1.x: ", m1.x)
print("m2.x: ", m2.x)
print("m1.y: ", m1.y)
print(m1.x is m2.x)
class Property(object):
"Emulate PyProperty_Type() in Objects/descrobject.c"
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
return self.fget(obj)
def __set__(self, obj, value):
if self.fset is None:
raise AttributeError("can't set attribute")
self.fset(obj, value)
def __delete__(self, obj):
if self.fdel is None:
raise AttributeError("can't delete attribute")
self.fdel(obj)
def getter(self, fget):
return type(self)(fget, self.fset, self.fdel, self.__doc__)
def setter(self, fset):
return type(self)(self.fget, fset, self.fdel, self.__doc__)
def deleter(self, fdel):
return Property(self.fget, self.fset, fdel, self.__doc__)
class C:
def getx(self):
print("getting...")
return self.__x
def setx(self, value):
print("setting...")
self.__x = value
def delx(self):
print("deleting...")
del self.__x
x = Property(getx, setx, delx, "I'm the 'x' property.")
class Generic:
def __init__(self, a=None, b=None):
self.__dict__['y'] = C()
self.y = a
self.__dict__['z'] = C()
self.z = b
@Property
def y(self):
return self.__dict__['y'].x
@y.setter
def y(self, val):
print("Generic setter for y")
self.__dict__['y'].x = val
@Property
def z(self):
return self.__dict__['z'].x
@z.setter
def z(self, val):
print("Generic setter for z")
self.__dict__['z'].x = val
me = Generic(3, "Hello")
print("me.y:", me.y)
print("me.z:", me.z)
little_me = Generic(4, "World")
print("little_me.y:", little_me.y)
print("little_me.z:", little_me.z)
me.y = 5
me.z = "Ciao"
little_me.y = 6
little_me.z = "Mondo"
print("me.y:", me.y)
print("me.z:", me.z)
print("little_me.y:", little_me.y)
print("little_me.z:", little_me.z)
class Generic:
def __init__(self, a=None, b=None):
self.y = a
self.z = b
@property
def y(self):
return self.__y
@y.setter
def y(self, val):
print("Generic setter for y")
self.__y = val
@property
def z(self):
return self.__z
@z.setter
def z(self, val):
print("Generic setter for z")
self.__z = val
me = Generic(3, "Hello")
print("me.y:", me.y)
print("me.z:", me.z)
little_me = Generic(4, "World")
print("little_me.y:", little_me.y)
print("little_me.z:", little_me.z)
me.y = 5
me.z = "Ciao"
little_me.y = 6
little_me.z = "Mondo"
print("me.y:", me.y)
print("me.z:", me.z)
print("little_me.y:", little_me.y)
print("little_me.z:", little_me.z)
class Generic2(Generic):
def method(this):
return ("this is: " +
("Instance" if isinstance(this, Generic2)
else "Class"))
class Generic3(Generic):
@classmethod
def method(this):
return ("this is: " +
("Instance" if isinstance(this, Generic2)
else "Class"))
me = Generic2(1,2)
print("On an instance: ", me.method())
print("On the class: ", Generic2.method(Generic2))
me = Generic3(1,2)
print("With @classmethod decorator: ", me.method())
import math
class Circle:
def __init__(self, r):
self.radius = r
@property
def area(self):
return self.radius ** 2 * math.pi
def __repr__(self):
return "Circle({})".format(self.radius)
the_circle = Circle(1)
print(the_circle) # triggers __repr__ in the absence of __str__
print("Area of the circle: {:f}".format(the_circle.area))
the_circle.radius = 2
print("Area of the circle: {:f}".format(the_circle.area))
try:
the_circle.area = 90
except AttributeError:
print("Can't set the area directly")
import unittest
class Circle:
setting either the radius or area attribute sets the other
as a dependent value. Initialized with radius only, unit
circle by default.
def __init__(self, radius = 1):
self.radius = radius
@property
def area(self):
return self._area
@property
def radius(self):
return self._radius
@area.setter
def area(self, value):
self._area = value
self._radius = math.sqrt(self._area / math.pi)
@radius.setter
def radius(self, value):
self._radius = value
self._area = math.pi * (self._radius ** 2)
def __repr__(self):
return "Circle(radius = {})".format(self.radius)
class TestCircle(unittest.TestCase):
def testArea(self):
the_circle = Circle(1)
self.assertEqual(the_circle.area, math.pi, "Uh oh")
def testRadius(self):
the_circle = Circle(1)
the_circle.area = math.pi * 4 # power rule
self.assertEqual(the_circle.radius, 2, "Uh oh")
a = TestCircle() # the test suite
suite = unittest.TestLoader().loadTestsFromModule(a)
unittest.TextTestRunner().run(suite) # run the test suite
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python for Everyone!<br/>Oregon Curriculum Network
Step2: y's value is an ordinary int, equivalently the value of MyClass.__dict__['y'], whereas the x attribute, a descriptor, will police getting and setting through __get__ and __set__ methods, using the name 'x' as a proxy to x.val behind the scenes (think of x.val as "more secret" as in less directly accessible).
Step3: The getter, setter and deleter methods allow swapping out new versions of fset, fget and fdel by keeping whatever is unchanged and making a new instance with a call to type(self) -- remember that types are callables.
Step4: Think of x as an object prepared to delegate to the three methods. Every time a new C instance is created, that instance is bound to a deeply internal "secret" __x. The public or class level x is a proxy to a set of instance methods living inside C.__dict__. Each instance of C talks to its respective self.__x i.e. don't confuse the shared nature of x with the private individuality of each self.__x
Step5: The reason for all the __getitem__ syntax i.e. talking to self.__dict__ in "longhand", is to avoid a recursive situation where a setter or getter calls itself. __getitem__ syntax lets us set and get in a way that bypasses __getattribute__ and its internal mechanisms, which are responsible for triggering the descriptor protocol in the first place.
Step6: Fun though that was, there's more indirection going on than necessary.
Step7: This time, we've cut out the middle man, C.
Step8: By the way, notice that method( ) has a single argument 'this', showing that 'self' is by convention and, furthermore, the value of 'this' will depend on whether method( ) is called
Step9: So that's a lot of fancy theory, but what might be a practical application of the above. Suppose we want a circle to let us modify its radius at will, and to treat area as an ordinary attribute nonetheless...
Step10: In decorating only the area method, we provide the area property with a getter, i.e. fget has been set to this method. No setter proxy (self.fset) has been defined, hence an assignment to the area property, which triggers its __set__ method, raises an AttributeError (see Property.__set__).
Step12: Might we make both radius and area into properties, such that setting either recalculates the other?
|
772 | <ASSISTANT_TASK:>
Python Code:
import os
os.environ['GRASS_OVERWRITE'] = '1'
import grass.script as gscript
gscript.run_command('g.region', n=225200, s=222500, w=637500, e=640000, raster='elevation')
gscript.run_command('v.random', output='input_points', npoints=20, seed=2, quiet=True)
from grass.pygrass.vector.geometry import Point
point = Point(638104, 223048)
elevation = 'elevation'
input_points = 'input_points'
viewshed_name = 'viewshed'
tmp_viewshed_name = 'tmp_viewshed'
viewshed_id = 1
gscript.run_command('r.viewshed', input=elevation, observer_elevation=3,
output=tmp_viewshed_name, coordinates=point.coords())
gscript.mapcalc(exp="{viewshed} = if({tmp}, {vid}, null())".format(viewshed=viewshed_name,
tmp=tmp_viewshed_name,
vid=viewshed_id))
cells = gscript.parse_command('r.univar', map=viewshed_name,
flags='g')['n']
print(cells)
print gscript.region()
area = float(cells) * gscript.region()['nsres'] * gscript.region()['nsres']
print(area)
visible_points = 'tmp_points'
gscript.run_command('r.to.vect', input=viewshed_name, output=viewshed_name,
type='area')
gscript.run_command('v.select', ainput=input_points, atype='point',
binput=viewshed_name, btype='area',
operator='overlap', flags='t', output=visible_points)
n_points_visible = gscript.vector_info_topo(visible_points)['points']
print n_points_visible
tmp_point = 'tmp_current_point'
if float(n_points_visible) >= 1:
gscript.write_command('v.in.ascii', input='-', stdin='%s|%s' % (point.x, point.y),
output=tmp_point)
distance = gscript.read_command('v.distance', from_=tmp_point, from_type='point', flags='p',
to=visible_points, to_type='point', upload='dist', dmin=1).strip()
distance = float(distance.splitlines()[1].split('|')[1])
else:
distance = 0
print(distance)
from grass.pygrass.vector import Vector
import grass.script as gscript
elevation = 'elevation'
input_points = 'input_points'
tmp_viewshed_name = 'tmp_viewshed'
tmp_visible_points = 'tmp_points'
tmp_point = 'tmp_current_point'
with Vector(input_points, mode='r') as points:
for point in points:
viewshed_id = str(point.cat)
viewshed_name = 'viewshed_' + viewshed_id
gscript.run_command('r.viewshed', input=elevation, observer_elevation=3,
output=tmp_viewshed_name, coordinates=point.coords())
gscript.mapcalc(exp="{viewshed} = if({tmp}, {vid}, null())".format(viewshed=viewshed_name,
tmp=tmp_viewshed_name,
vid=viewshed_id))
# viewshed size
cells = gscript.parse_command('r.univar', map=viewshed_name,
flags='g')['n']
area = float(cells) * gscript.region()['nsres'] * gscript.region()['nsres']
# visible points
gscript.run_command('r.to.vect', input=viewshed_name, output=viewshed_name,
type='area')
gscript.run_command('v.select', ainput=input_points, atype='point',
binput=viewshed_name, btype='area',
operator='overlap', flags='t', output=tmp_visible_points)
n_points_visible = gscript.vector_info_topo(tmp_visible_points)['points'] - 1
# distance to closest visible point
if float(n_points_visible) >= 1:
gscript.write_command('v.in.ascii', input='-', stdin='%s|%s' % (point.x, point.y),
output=tmp_point)
distance = gscript.read_command('v.distance', from_=tmp_point, from_type='point', flags='p',
to=tmp_visible_points, to_type='point', upload='dist', dmin=1).strip()
distance = float(distance.splitlines()[1].split('|')[1])
else:
distance = 0
print "%s, %d, %s, %.2f" % (viewshed_id, area, n_points_visible, distance)
from grass.pygrass.vector import Vector
import grass.script as gscript
elevation = 'elevation'
input_points = 'input_points'
#
# output vector
#
output_points = 'output_points'
tmp_viewshed_name = 'tmp_viewshed'
tmp_visible_points = 'tmp_points'
tmp_point = 'tmp_current_point'
#
# define columns of the attribute table of the output vector
#
columns = [('cat', 'INTEGER'),
('area', 'DOUBLE PRECISION'),
('n_points_visible', 'INTEGER'),
('distance_to_closest', 'DOUBLE PRECISION')]
#
# we can open the input vector and create and open the output vector at once
#
with Vector(input_points, mode='r') as points, \
Vector(output_points, mode='w', tab_cols=columns) as output:
for point in points:
viewshed_id = str(point.cat)
viewshed_name = 'viewshed_' + viewshed_id
gscript.run_command('r.viewshed', input=elevation, observer_elevation=3,
output=tmp_viewshed_name, coordinates=point.coords())
gscript.mapcalc(exp="{viewshed} = if({tmp}, {vid}, null())".format(viewshed=viewshed_name,
tmp=tmp_viewshed_name,
vid=viewshed_id))
# viewshed size
cells = gscript.parse_command('r.univar', map=viewshed_name,
flags='g')['n']
area = float(cells) * gscript.region()['nsres'] * gscript.region()['nsres']
# visible points
gscript.run_command('r.to.vect', input=viewshed_name, output=viewshed_name,
type='area')
gscript.run_command('v.select', ainput=input_points, atype='point',
binput=viewshed_name, btype='area',
operator='overlap', flags='t', output=tmp_visible_points)
n_points_visible = gscript.vector_info_topo(tmp_visible_points)['points'] - 1
# distance to closest visible point
if float(n_points_visible) >= 1:
gscript.write_command('v.in.ascii', input='-', stdin='%s|%s' % (point.x, point.y),
output=tmp_point)
distance = gscript.read_command('v.distance', from_=tmp_point, from_type='point', flags='p',
to=tmp_visible_points, to_type='point', upload='dist', dmin=1).strip()
distance = float(distance.splitlines()[1].split('|')[1])
else:
distance = 0
#
# write each point with its attributes
#
output.write(point, (area, n_points_visible, distance))
output.table.conn.commit()
print "%s, %d, %s, %.2f" % (viewshed_id, area, n_points_visible, distance)
with Vector(output_points, mode='r') as points:
# we can filter/sort the results
points.table.filters.select().order_by(u'area').get_sql()
print points.table.execute().fetchall()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We first generate some random input points within a specified region using v.random (we use a fixed seed here for reproducibility)
Step2: Workflow for 1 point
Step3: We now compute the viewshed from this point using r.viewshed and then change all invisible cells to null values (no data) and all visible cells to value 1 using raster algebra r.mapcalc. This is needed for converting it to the vector data model further on (vectorization).
Step4: As a first property of the viewshed, compute its area. The area is computed with r.univar as the number of non-null cells times the raster cell size. The result is in reported map units squared (i.e., square meters in this case).
Step5: The next task is to find and count the number of points contained in the input vector layer which are visible from the current point. One way to do this is to derive the vector layer of visible points by spatially overlapping the input points with the viewshed, for this see v.select. The viewshed must be first converted to the vector data model with r.to.vect.
Step6: We can now get the number of points using vector_info_topo, a wrapper function around v.info.
Step7: The last viewshed property we want to compute is the distance from the current point to the closest visible point.
Step8: Workflow for multiple points
Step9: Instead of printing the resulting properties on standard output, we save them into the attribute table of a new output vector layer. This we create by opening it in write mode and by passing as the parameters the columns of the attribute table.
Step10: Finally we will make sure that the new vector layer was created and attributes properly written
|
773 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tensorflow==2.4.1
!pip install tensorflow-quantum
# Update package resources to account for version changes.
import importlib, pkg_resources
importlib.reload(pkg_resources)
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
# visualization tools
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
qubit = cirq.GridQubit(0, 0)
# Define some circuits.
circuit1 = cirq.Circuit(cirq.X(qubit))
circuit2 = cirq.Circuit(cirq.H(qubit))
# Convert to a tensor.
input_circuit_tensor = tfq.convert_to_tensor([circuit1, circuit2])
# Define a circuit that we want to append
y_circuit = cirq.Circuit(cirq.Y(qubit))
# Instantiate our layer
y_appender = tfq.layers.AddCircuit()
# Run our circuit tensor through the layer and save the output.
output_circuit_tensor = y_appender(input_circuit_tensor, append=y_circuit)
print(tfq.from_tensor(input_circuit_tensor))
print(tfq.from_tensor(output_circuit_tensor))
def generate_data(qubits):
Generate training and testing data.
n_rounds = 20 # Produces n_rounds * n_qubits datapoints.
excitations = []
labels = []
for n in range(n_rounds):
for bit in qubits:
rng = np.random.uniform(-np.pi, np.pi)
excitations.append(cirq.Circuit(cirq.rx(rng)(bit)))
labels.append(1 if (-np.pi / 2) <= rng <= (np.pi / 2) else -1)
split_ind = int(len(excitations) * 0.7)
train_excitations = excitations[:split_ind]
test_excitations = excitations[split_ind:]
train_labels = labels[:split_ind]
test_labels = labels[split_ind:]
return tfq.convert_to_tensor(train_excitations), np.array(train_labels), \
tfq.convert_to_tensor(test_excitations), np.array(test_labels)
sample_points, sample_labels, _, __ = generate_data(cirq.GridQubit.rect(1, 4))
print('Input:', tfq.from_tensor(sample_points)[0], 'Output:', sample_labels[0])
print('Input:', tfq.from_tensor(sample_points)[1], 'Output:', sample_labels[1])
def cluster_state_circuit(bits):
Return a cluster state on the qubits in `bits`.
circuit = cirq.Circuit()
circuit.append(cirq.H.on_each(bits))
for this_bit, next_bit in zip(bits, bits[1:] + [bits[0]]):
circuit.append(cirq.CZ(this_bit, next_bit))
return circuit
SVGCircuit(cluster_state_circuit(cirq.GridQubit.rect(1, 4)))
def one_qubit_unitary(bit, symbols):
Make a Cirq circuit enacting a rotation of the bloch sphere about the X,
Y and Z axis, that depends on the values in `symbols`.
return cirq.Circuit(
cirq.X(bit)**symbols[0],
cirq.Y(bit)**symbols[1],
cirq.Z(bit)**symbols[2])
def two_qubit_unitary(bits, symbols):
Make a Cirq circuit that creates an arbitrary two qubit unitary.
circuit = cirq.Circuit()
circuit += one_qubit_unitary(bits[0], symbols[0:3])
circuit += one_qubit_unitary(bits[1], symbols[3:6])
circuit += [cirq.ZZ(*bits)**symbols[6]]
circuit += [cirq.YY(*bits)**symbols[7]]
circuit += [cirq.XX(*bits)**symbols[8]]
circuit += one_qubit_unitary(bits[0], symbols[9:12])
circuit += one_qubit_unitary(bits[1], symbols[12:])
return circuit
def two_qubit_pool(source_qubit, sink_qubit, symbols):
Make a Cirq circuit to do a parameterized 'pooling' operation, which
attempts to reduce entanglement down from two qubits to just one.
pool_circuit = cirq.Circuit()
sink_basis_selector = one_qubit_unitary(sink_qubit, symbols[0:3])
source_basis_selector = one_qubit_unitary(source_qubit, symbols[3:6])
pool_circuit.append(sink_basis_selector)
pool_circuit.append(source_basis_selector)
pool_circuit.append(cirq.CNOT(control=source_qubit, target=sink_qubit))
pool_circuit.append(sink_basis_selector**-1)
return pool_circuit
SVGCircuit(one_qubit_unitary(cirq.GridQubit(0, 0), sympy.symbols('x0:3')))
SVGCircuit(two_qubit_unitary(cirq.GridQubit.rect(1, 2), sympy.symbols('x0:15')))
SVGCircuit(two_qubit_pool(*cirq.GridQubit.rect(1, 2), sympy.symbols('x0:6')))
def quantum_conv_circuit(bits, symbols):
Quantum Convolution Layer following the above diagram.
Return a Cirq circuit with the cascade of `two_qubit_unitary` applied
to all pairs of qubits in `bits` as in the diagram above.
circuit = cirq.Circuit()
for first, second in zip(bits[0::2], bits[1::2]):
circuit += two_qubit_unitary([first, second], symbols)
for first, second in zip(bits[1::2], bits[2::2] + [bits[0]]):
circuit += two_qubit_unitary([first, second], symbols)
return circuit
SVGCircuit(
quantum_conv_circuit(cirq.GridQubit.rect(1, 8), sympy.symbols('x0:15')))
def quantum_pool_circuit(source_bits, sink_bits, symbols):
A layer that specifies a quantum pooling operation.
A Quantum pool tries to learn to pool the relevant information from two
qubits onto 1.
circuit = cirq.Circuit()
for source, sink in zip(source_bits, sink_bits):
circuit += two_qubit_pool(source, sink, symbols)
return circuit
test_bits = cirq.GridQubit.rect(1, 8)
SVGCircuit(
quantum_pool_circuit(test_bits[:4], test_bits[4:], sympy.symbols('x0:6')))
def create_model_circuit(qubits):
Create sequence of alternating convolution and pooling operators
which gradually shrink over time.
model_circuit = cirq.Circuit()
symbols = sympy.symbols('qconv0:63')
# Cirq uses sympy.Symbols to map learnable variables. TensorFlow Quantum
# scans incoming circuits and replaces these with TensorFlow variables.
model_circuit += quantum_conv_circuit(qubits, symbols[0:15])
model_circuit += quantum_pool_circuit(qubits[:4], qubits[4:],
symbols[15:21])
model_circuit += quantum_conv_circuit(qubits[4:], symbols[21:36])
model_circuit += quantum_pool_circuit(qubits[4:6], qubits[6:],
symbols[36:42])
model_circuit += quantum_conv_circuit(qubits[6:], symbols[42:57])
model_circuit += quantum_pool_circuit([qubits[6]], [qubits[7]],
symbols[57:63])
return model_circuit
# Create our qubits and readout operators in Cirq.
cluster_state_bits = cirq.GridQubit.rect(1, 8)
readout_operators = cirq.Z(cluster_state_bits[-1])
# Build a sequential model enacting the logic in 1.3 of this notebook.
# Here you are making the static cluster state prep as a part of the AddCircuit and the
# "quantum datapoints" are coming in the form of excitation
excitation_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state = tfq.layers.AddCircuit()(
excitation_input, prepend=cluster_state_circuit(cluster_state_bits))
quantum_model = tfq.layers.PQC(create_model_circuit(cluster_state_bits),
readout_operators)(cluster_state)
qcnn_model = tf.keras.Model(inputs=[excitation_input], outputs=[quantum_model])
# Show the keras plot of the model
tf.keras.utils.plot_model(qcnn_model,
show_shapes=True,
show_layer_names=False,
dpi=70)
# Generate some training data.
train_excitations, train_labels, test_excitations, test_labels = generate_data(
cluster_state_bits)
# Custom accuracy metric.
@tf.function
def custom_accuracy(y_true, y_pred):
y_true = tf.squeeze(y_true)
y_pred = tf.map_fn(lambda x: 1.0 if x >= 0 else -1.0, y_pred)
return tf.keras.backend.mean(tf.keras.backend.equal(y_true, y_pred))
qcnn_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
history = qcnn_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations, test_labels))
plt.plot(history.history['loss'][1:], label='Training')
plt.plot(history.history['val_loss'][1:], label='Validation')
plt.title('Training a Quantum CNN to Detect Excited Cluster States')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# 1-local operators to read out
readouts = [cirq.Z(bit) for bit in cluster_state_bits[4:]]
def multi_readout_model_circuit(qubits):
Make a model circuit with less quantum pool and conv operations.
model_circuit = cirq.Circuit()
symbols = sympy.symbols('qconv0:21')
model_circuit += quantum_conv_circuit(qubits, symbols[0:15])
model_circuit += quantum_pool_circuit(qubits[:4], qubits[4:],
symbols[15:21])
return model_circuit
# Build a model enacting the logic in 2.1 of this notebook.
excitation_input_dual = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state_dual = tfq.layers.AddCircuit()(
excitation_input_dual, prepend=cluster_state_circuit(cluster_state_bits))
quantum_model_dual = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_dual)
d1_dual = tf.keras.layers.Dense(8)(quantum_model_dual)
d2_dual = tf.keras.layers.Dense(1)(d1_dual)
hybrid_model = tf.keras.Model(inputs=[excitation_input_dual], outputs=[d2_dual])
# Display the model architecture
tf.keras.utils.plot_model(hybrid_model,
show_shapes=True,
show_layer_names=False,
dpi=70)
hybrid_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
hybrid_history = hybrid_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations,
test_labels))
plt.plot(history.history['val_custom_accuracy'], label='QCNN')
plt.plot(hybrid_history.history['val_custom_accuracy'], label='Hybrid CNN')
plt.title('Quantum vs Hybrid CNN performance')
plt.xlabel('Epochs')
plt.legend()
plt.ylabel('Validation Accuracy')
plt.show()
excitation_input_multi = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state_multi = tfq.layers.AddCircuit()(
excitation_input_multi, prepend=cluster_state_circuit(cluster_state_bits))
# apply 3 different filters and measure expectation values
quantum_model_multi1 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
quantum_model_multi2 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
quantum_model_multi3 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
# concatenate outputs and feed into a small classical NN
concat_out = tf.keras.layers.concatenate(
[quantum_model_multi1, quantum_model_multi2, quantum_model_multi3])
dense_1 = tf.keras.layers.Dense(8)(concat_out)
dense_2 = tf.keras.layers.Dense(1)(dense_1)
multi_qconv_model = tf.keras.Model(inputs=[excitation_input_multi],
outputs=[dense_2])
# Display the model architecture
tf.keras.utils.plot_model(multi_qconv_model,
show_shapes=True,
show_layer_names=True,
dpi=70)
multi_qconv_model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
multi_qconv_history = multi_qconv_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations,
test_labels))
plt.plot(history.history['val_custom_accuracy'][:25], label='QCNN')
plt.plot(hybrid_history.history['val_custom_accuracy'][:25], label='Hybrid CNN')
plt.plot(multi_qconv_history.history['val_custom_accuracy'][:25],
label='Hybrid CNN \n Multiple Quantum Filters')
plt.title('Quantum vs Hybrid CNN performance')
plt.xlabel('Epochs')
plt.legend()
plt.ylabel('Validation Accuracy')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 量子卷积神经网络
Step2: 安装 TensorFlow Quantum:
Step3: 现在,导入 TensorFlow 和模块依赖项:
Step4: 1. 构建 QCNN
Step5: 检查输入张量:
Step6: 检查输出张量:
Step8: 虽然不使用 tfq.layers.AddCircuit 也可以运行下面的示例,但这是一个理解如何将复杂的功能嵌入 TensorFlow 计算图的好机会。
Step9: 您可以看到,就像使用常规的机器学习一样,您创建了一个用于对模型进行基准测试的训练和测试集。利用以下代码段,您可以快速查看某些数据点:
Step11: 1.5 定义层
Step12: 显示 <a href="https
Step16: 1.5.2 QCNN 层
Step17: 要查看您创建的对象,请打印出单量子位酉电路:
Step18: 以及双量子位酉电路:
Step19: 以及双量子位池化电路:
Step21: 1.5.2.1 量子卷积
Step22: 显示(高度水平的)电路:
Step24: 1.5.2.2 量子池化
Step25: 检查池化组件电路:
Step27: 1.6 模型定义
Step28: 1.7 训练模型
Step30: 2. 混合模型
Step31: 2.1.2 训练模型
Step32: 如您所见,在适当的经典模型的帮助下,混合模型通常比纯量子版本收敛得更快。
Step33: 2.2.2 训练模型
|
774 | <ASSISTANT_TASK:>
Python Code:
# import libraries
import matplotlib
import IPython
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import pylab
import seaborn as sns
import sklearn as sk
%matplotlib inline
# Ignore for now!
x = np.array(np.linspace(0,10,400))
y = 10*x+3
y_obs = y+.3*np.random.random_integers(-100,101,size=x.size) #introducing noise - an artifact of experimentation
# Resize the numpy arrays - ignore for now
x.resize((x.size,1))
y_obs.resize((y_obs.size,1))
y.resize((y.size,1))
# Plot the data we have sampled.
plt.scatter(x,y_obs,c="g",s=6)
# Plot the true relationship between x and y we are trying to guess
plt.plot(x,y,color="red",alpha=0.4)
# Lets make a model using the first 100 data points!
#Pick any random 100 points
ctr = np.random.randint(0,400,100)
x_train1 = x[ctr]
y_train1 = y_obs[ctr]
from sklearn.linear_model import LinearRegression
lin_mod = LinearRegression()
lin_mod.fit(x_train1,y_train1)
# See what the learned coefficients are!
print "The model for y = ax+b gives a = %f, b=%f " % (lin_mod.coef_, lin_mod.intercept_)
# We have taken 3 samples - 1. a sample of 100 points from (x,y) values 2. a sample of 150 points from observed (x,y)
# 3. a sample of 200 points from observed (x,y)
# You can safely ignore the syntax if it is your first time reading or you are very unfamiliar wit numpy,python
ctr = np.random.randint(0,200,200)
x_train2 = x[ctr]
y_train2 = y_obs[ctr]
x_train3 = x
y_train3 = y_obs
# Ignore for now !!!
y_1 = lin_mod.predict(x)
lin_mod.fit(x_train2,y_train2)
y_2 = lin_mod.predict(x)
lin_mod.fit(x,y_obs)
y_3 = lin_mod.predict(x)
# Plotting the results of the linear model
# based on 100, 150 and 200 samples
real_line = plt.plot(x,y,color="red",label='actual line')
#raw = plt.scatter(x,y_obs,c="g",s=6,label='sampled data')
l1 = plt.plot(x,y_1,'--',c="blue",label='estimate from 100 samples',alpha=0.4)
l2 = plt.plot(x,y_2,'--',c="green",label='estimate from 150 samples',alpha=0.4)
l3 = plt.plot(x,y_3,'--',c="yellow",label='estimate from 200 samples',alpha=0.8)
plt.xlabel('x')
plt.ylabel('y')
plt.legend(labels =['actual line','estimate from 100 samples','estimate from 200 samples','estimate from 400 samples'],bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
raw = plt.scatter(x,y_obs,c="g",s=6,label='sampled data')
l1 = plt.plot(x,y_1,'--',c="blue",label='estimate from 100 samples')
l2 = plt.plot(x,y_2,'--',c="orange",label='estimate from 150 samples')
l3 = plt.plot(x,y_3,'--',c="black",label='estimate from 200 samples')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(labels =['estimate from 100 points','estimate from 200 points','estimate from 400 points','sampled data'],bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
num = 200
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
# make a complicated 25-degree model trained on the first 100 data points
model_crazy = make_pipeline(PolynomialFeatures(5), Ridge())
model_crazy.fit(x[:num],y_obs[:num])
y_4 = model_crazy.predict(x[:num])
# See how it compares to the simple fit made earlier
plt.scatter(x[:num],y_obs[:num],c='red')
plt.plot(x[:num],y_4)
plt.plot(x[:num],y_3[:num])
plt.title('y vs x and two models')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(labels =['degree-25 polynomial model','simple linear model','actual data'],bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0.)
plt.savefig('woohoo.png')
# Test the model on points 201-250, points it hasnt been trained on!
start = 201
stop = 250
y_5 = model_crazy.predict(x[start:stop])
plt.plot(x[start:stop],y_5)
plt.scatter(x[start:stop],y_obs[start:stop],c='r')
plt.plot(x[start:stop],y_3[start:stop])
plt.legend(labels =['degree-25 polynomial model','simple linear model','actual data'],bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig('not_really.png')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, as data scientists we dont know this relationship between y and x. Rather we have collected observations of y. These observations are bound to have some error - introduced by measurement, by the environment and so forth. This error is also called noise. Our goal is to be able to learn the relationship between x and y from an experiment in which we have collected a sample of some 200 (x,y) observations and ignore the noise in this collected data.
Step2: Goal
Step3: How much data?
Step4: Now, we've learnt 3 models using 100, 150 and 200 samples of (x,y) observations. Let's try to plot our prediction against the actual prediction line.
Step5: We can see that as the amount of data we take increases, our estimate keeps getting better! This is an important learning - the more data that we have, the better the model performs.
Step6: Which one of the above is a good model for the data?
Step7: The blue, more complicated model fits the data better in the sense of being closer to the data points. If you try and calculate its error vs the simple linear model, you'll find its lower [Optional
|
775 | <ASSISTANT_TASK:>
Python Code:
import sys, pprint, json
try:
import requests
except ImportError:
!conda install --yes --prefix {sys.prefix} requests
import requests
try:
from genson import SchemaBuilder
except ImportError:
!conda install --yes --prefix {sys.prefix} genson
from genson import SchemaBuilder
pp = pprint.PrettyPrinter(indent=2)
api = "https://qumranica.org/Scrollery/resources/cgi-bin/scrollery-cgi.pl"
r = requests.post(api, json={"transaction": "validateSession", "PASSWORD":"asdf", "USER_NAME":"test"})
session = r.json()['SESSION_ID']
r = requests.post(api, json={"transaction": "getCombs", "SESSION_ID":session})
scrolls = r.json()['results']
def scrollIdByName(name):
sid = None
for scroll in scrolls:
if name == scroll['name']:
sid = scroll['scroll_version_id']
break
return sid
selectedScroll = scrollIdByName('4Q51')
r = requests.post(api, json={"transaction": "getColOfComb", "scroll_version_id": selectedScroll, "SESSION_ID":session})
cols = r.json()['results']
print(json.dumps(cols, indent=2, sort_keys=True))
col2 = cols[1]
r = requests.post(api, json={"transaction": "getSignStreamOfFrag", "scroll_version_id": selectedScroll, "col_id": col2['col_id'], "SESSION_ID":session})
text = r.json()['text']
builder = SchemaBuilder()
builder.add_object(text)
print(json.dumps(builder.to_schema(), indent=2, sort_keys=False))
print(json.dumps(r.json(), indent=2, sort_keys=False))
#The following helpers serialize each element to a list, since they could be either a scalar or list
def serializeChars(sign):
if isinstance(sign['chars'], list):
return sign['chars']
else:
return [sign['chars']]
def serializeCharLetters(char):
if isinstance(char['sign_char'], list):
return char['sign_char']
else:
return [char['sign_char']]
def serializeCharAttributes(char):
try:
if isinstance(char['attributes'], list):
return char['attributes']
else:
return [char['attributes']]
except:
return []
def serializeAttrValues(attr):
if isinstance(attr['values'], list):
#These are ordered so we can easily open and close HTML tags
sortorder={
"SCROLL_START":0,
"COLUMN_START":1,
"LINE_START":2,
"LINE_END":3,
"COLUMN_END":4,
"SCROLL_END":5
}
return sorted(attr['values'], key=lambda k: sortorder[k['attribute_value']])
else:
return [attr['values']]
#This function formats the output
def outputAllText():
#Begin printing the output
print(r.json()['text'][0]['scroll_name'])
# Cycle through the cols/fragments
for fragment in r.json()['text'][0]['fragments']:
print(fragment['fragment_name'], end='')
#Cycle through the lines
for line in fragment['lines']:
print('\n', line['line_name'], '\t', end='')
#Cycle through the signs
for sign in line['signs']:
#Whether there is more than one sign possible, print the first
char = serializeChars(sign)[0]
letter = serializeCharLetters(char)[0]
print(letter, end='')
#Check the attributes (if there are any) to see if we have a space
attrs = serializeCharAttributes(char)
if len(attrs) > 0:
for attr in attrs:
values = serializeAttrValues(attr)
for value in values:
if value['attribute_value'] == 'SPACE':
print(' ', end='')
outputAllText()
def outputMinimalText():
#Begin printing the output
print(r.json()['text'][0]['scroll_name'])
# Cycle through the cols/fragments
for fragment in r.json()['text'][0]['fragments']:
print(fragment['fragment_name'], end='')
#Cycle through the lines
for line in fragment['lines']:
print('\n', line['line_name'], '\t', end='')
#Cycle through the signs
for sign in line['signs']:
#Whether there is more than one sign possible, print the first
char = serializeChars(sign)[0]
letter = serializeCharLetters(char)[0]
#Check the attributes for damage and to see if we have a space
attrs = serializeCharAttributes(char)
damaged = False
space = False
if len(attrs) > 0:
for attr in attrs:
values = serializeAttrValues(attr)
for value in values:
if value['attribute_value'] == 'SPACE':
space = True
if (value['attribute_value'] == 'INCOMPLETE_BUT_CLEAR'
or value['attribute_value'] == 'INCOMPLETE_AND_NOT_CLEAR') or (
attr['attribute_id'] == 6 and value['attribute_value'] == 'TRUE'):
damaged = True
if not damaged:
print(letter, end='')
if space:
print(' ', end='')
outputMinimalText()
def outputHTMLText():
print('<!DOCTYPE html>')
print('<html>')
print('<head>')
print('\t<meta charset="UTF-8">')
print('\t<title>SQE Transcription Output</title>')
print(
<style>
span.non-rcnst + span.reconstructed:before {
content: '[';
}
span.reconstructed + span.non-rcnst:before {
content: ']';
}
span.reconstructed:first-child:before {
content: '[';
}
span.reconstructed:last-child:after {
content: ']';
}
</style>
)
print('</head>')
print('\n<body>')
#Begin printing the output
print('\t<h1>', r.json()['text'][0]['scroll_name'], '</h1>')
# Cycle through the cols/fragments
for fragment in r.json()['text'][0]['fragments']:
#Cycle through the lines
for line in fragment['lines']:
#Cycle through the signs
for sign in line['signs']:
#Whether there is more than one sign possible, print the first
char = serializeChars(sign)[0]
letter = serializeCharLetters(char)[0]
#Check the attributes for damage and to see if we have a space
attrs = serializeCharAttributes(char)
damaged = False
space = False
if len(attrs) > 0:
for attr in attrs:
values = serializeAttrValues(attr)
for value in values:
if value['attribute_value'] == 'COLUMN_START':
print('\t<div dir="rtl">')
print('\t\t<h2>', fragment['fragment_name'], '</h2>')
print('\t\t<p>')
if value['attribute_value'] == 'COLUMN_END':
print('\t\t</p>')
print('\t</div>')
if value['attribute_value'] == 'LINE_START':
print('\t\t\t<div>')
print('\t\t\t\t<span class="line-name non-rcnst">', line['line_name'], '</span>')
print('\t\t\t\t<span>', end='')
if value['attribute_value'] == 'LINE_END':
print('</span>')
print('\t\t\t</div>')
if (value['attribute_value'] == 'INCOMPLETE_BUT_CLEAR'
or value['attribute_value'] == 'INCOMPLETE_AND_NOT_CLEAR') or (
attr['attribute_id'] == 6 and value['attribute_value'] == 'TRUE'):
damaged = True
if value['attribute_value'] == 'SPACE':
print(' ', end='')
else:
if value['attribute_value'] == 'INCOMPLETE_BUT_CLEAR':
print(f'<span class="incomplete-but-clear non-rcnst">{letter}ׄ</span>', end='')
elif value['attribute_value'] == 'INCOMPLETE_AND_NOT_CLEAR':
print(f'<span class="incomplete-and-not-clear non-rcnst">{letter}֯</span>', end='')
elif attr['attribute_id'] == 6 and value['attribute_value'] == 'TRUE':
print(f'<span class="reconstructed">{letter}</span>', end='')
elif value['attribute_value'] == 'ABOVE_LINE':
print(f'<span class="non-rcnst"><sup>{letter}</sup></span>', end='')
elif value['attribute_value'] == 'BELOW_LINE':
print(f'<span class="non-rcnst"><sub>{letter}</sub></span>', end='')
else: print(f'<span class="non-rcnst">{letter}</span>', end='')
print('</body>')
print('</html>')
outputHTMLText()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next get the login credentials
Step2: Making requests
Step3: Finding available cols/frags
Step4: transcriptions
Step5: The actual data looks like this
Step6: Since the data already comes in order, you could simply iterate over the lists to quickly see the text (note the helper functions at the beginning of the cell)
Step7: The previous method does not do any advanced checking to see if signs are damaged or reconstructed. It just prints the entirety of the transcribed text.
Step9: You could also serialize this to HTML by reading the all of the attribute tags more closely and adding some nice CSS.
|
776 | <ASSISTANT_TASK:>
Python Code:
def range_generator_function(stop):
Naive implementation of builtins.range generator.
# This function runs immediately, since it has no `yield` statements.
# It is a normal function, which happens to return a generator iterator.
print("Running line 1")
if not isinstance(stop, int):
raise TypeError('stop must be an int')
if stop < 0:
raise ValueError('stop must be >= 0')
print("Running line 2")
range_generator = _range_generator_function(stop=stop)
print("Running line 3")
return range_generator
def _range_generator_function(stop):
# This function does not run immediately, since it has `yield` statements.
# It is a generator function, and returns a generator iterator.
index = 0
print("Running line 4")
while index < stop:
print("Running line 5 with index", index)
yield index
print("Running line 6 with index", index)
index += 1
print("Running line 7 with index", index)
range_generator = range_generator_function(2) # Executes all prints in `range_generator_function()`,
range_generator # but none in `_range_generator_function()`.
import collections
isinstance(range_generator, collections.Iterable), isinstance(range_generator, collections.Iterator)
isinstance(range_generator, collections.Generator)
next(range_generator)
next(range_generator)
import traceback
try:
next(range_generator)
except StopIteration:
traceback.print_exc()
next(range_generator, 2) # Generator is exhausted, nothing more will get printed.
range_generator = range_generator_function(4)
for item in range_generator:
print('yielded', item)
def generator_function():
print((yield 0))
print((yield 1))
generator = generator_function()
print(next(generator)) # Advance generator to first `yield` statement.
item = generator.send('print this')
print('yielded', item)
try:
next(generator) # Same as `generator.send(None)`
except StopIteration:
pass
import traceback
class ExpectedError(Exception): pass
def generator_function():
for i in range(2):
try:
yield i
except ExpectedError as exc:
print('Caught exception', repr(exc))
continue
except Exception as exc:
print('Did not catch exception', repr(exc))
raise
return i
generator = generator_function()
next(generator)
item = generator.throw(ExpectedError)
print('yielded', item)
try:
generator.throw(KeyError('key'))
except KeyError:
traceback.print_exc()
generator = generator_function()
next(generator)
item = generator.throw(ExpectedError)
print('yielded', item)
try:
generator.throw(ExpectedError)
except StopIteration as exc:
traceback.print_exc()
print(repr(exc))
def generator_function():
try:
yield
except:
traceback.print_exc()
raise
print('About to yield 1')
yield 1
generator = generator_function()
next(generator)
generator.close()
def generator_function():
try:
yield 0
except:
raise KeyError('key')
generator = generator_function()
next(generator)
try:
generator.close()
except KeyError:
traceback.print_exc()
import collections
class StopAdder(Exception): pass
def adder_function():
total = 0
while True:
print('At start of adder loop, current total is', total)
try:
integers = (yield total)
except (Exception, GeneratorExit) as exc:
print('Adder received exception', repr(exc), 'and is returning with final total', total)
return total
if not isinstance(integers, (list, tuple)):
integers = [integers]
if integers and isinstance(integers[0], collections.Iterable):
integers = integers[0]
print('Adder received', integers)
total += sum(integers)
def send_values_into_adder(adder, *integers):
print('Sending', integers, 'into adder')
current_total = adder.send(integers)
print('Current total in adder is', current_total)
return current_total
adder = adder_function()
next(adder)
send_values_into_adder(adder)
print()
send_values_into_adder(adder, 10)
print()
send_values_into_adder(adder, 1, 2, 3)
print()
send_values_into_adder(adder, range(8))
print()
print('Sending StopAdder into adder')
try:
adder.throw(StopAdder)
except StopIteration as exc:
print('Final total from adder is', exc.value)
def generator_function():
yield 0
return
generator = generator_function()
next(generator)
import traceback
try:
next(generator)
except StopIteration:
traceback.print_exc()
import traceback
def generator_function_that_returns_a_value():
yield 0
return 'return_value'
generator = generator_function_that_returns_a_value()
next(generator)
try:
next(generator)
except StopIteration as exc:
traceback.print_exc()
print(repr(exc))
print(repr(exc.value))
try:
next(generator)
except StopIteration as exc:
traceback.print_exc() # Subsequent calls to `next()` do not use the return value
print(repr(exc)) # when raising `StopIteration`.
print(repr(exc.value))
for item in generator_function_that_returns_a_value():
print('yielded', item)
list(generator_function_that_returns_a_value())
import collections
class Adder(collections.Generator):
def __init__(self):
super().__init__()
self.total = 0
self.stopped = False
def __repr__(self):
return f"<{self.__class__.__name__}: total={self.total!r} stopped={self.stopped!r}>"
def send(self, integers):
if self.stopped:
raise StopIteration
print(f"At start of {self.send}, current total is", self.total)
if not isinstance(integers, (list, tuple)):
integers = [integers]
if integers and isinstance(integers[0], collections.Iterable):
integers = integers[0]
print(f"{self.send} received", integers)
self.total += sum(integers)
print(f"At end of {self.send}, returning current total", self.total)
return self.total
def throw(self, exc_type, exc_value=None, exc_traceback=None):
if self.stopped:
raise StopIteration
exc_info = (exc_type, exc_value, exc_traceback)
print(f"At start of {self.throw}, current total is", self.total)
self.stopped = True
print(f"{self.throw} received exception", exc_info, "and is returning with final total", self.total)
raise StopIteration(self.total)
def send_values_into_adder(adder, *integers):
print('Sending', integers, 'into', adder)
current_total = adder.send(integers)
print('Current total in', adder, 'is', current_total)
return current_total
adder = Adder()
print(adder)
print()
adder.send([])
print()
adder.send(10)
print()
adder.send([1, 2, 3])
print()
adder.send(range(8))
print()
print('Sending StopAdder into adder')
try:
adder.throw(StopAdder)
except StopIteration as exc:
print('Final total from adder is', exc.value)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The basics of generators
Step2: Advanced uses of generators
Step3: Generator.throw
Step4: Generator.close
Step5: Generators as suspendable/resumable program state
Step7: Generators that return values
Step8: It might be thought that generators will only pass value via yield, and will always have empty return statements. This is true up through Python 3.2, but that changed in Python 3.3. Now, just like in normal functions, a generator function can return arbitrary values with return statements (but note that this is illegal in earlier versions of Python, and will cause a SyntaxError).
Step9: Notice that the Python interpretter converted the return 'return_value' into a raise StopIteration('return_value'). Also notice that the exception object has a .value attribute, which holds this value.
Step10: Custom generator classes
|
777 | <ASSISTANT_TASK:>
Python Code:
l = []
for i in range(10):
l.append(2*i+1)
l
l = [2*i+1 for i in range(10)]
l
even = [n*n for n in range(20) if n % 2 == 0]
even
even = []
for n in range(20):
if n % 2 == 0:
even.append(n)
even
l = [1, 0, -2, 3, -1, -5, 0]
signum_l = [int(n / abs(n)) if n != 0 else 0 for n in l]
signum_l
l1 = [1, 2, 3]
l2 = [4, 5, 6]
[(i, j) for i in l1 for j in l2]
[(i, j) for j in l2 for i in l1]
matrix = [
[1, 2, 3],
[5, 6, 7]
]
[[e*e for e in row] for row in matrix]
i = (i for i in range(10))
type(i)
%%time
N = 8
s = sum([i*2 for i in range(int(10**N))])
print(s)
%%time
s = sum(i*2 for i in range(int(10**N)))
print(s)
even_numbers = (2*n for n in range(10))
even_numbers
for num in even_numbers:
print(num)
for num in even_numbers:
print(num)
# next(even_numbers) # raises StopIteration
class MyIterator:
def __init__(self):
self.iter_no = 5
def __iter__(self):
return self
def __next__(self):
if self.iter_no <= 0:
raise StopIteration()
self.iter_no -= 1
print("Returning {}".format(self.iter_no))
return self.iter_no
myiter = MyIterator()
for i in myiter:
print(i)
fruit_list = ["apple", "plum", "apple", "pear"]
fruits = {fruit.title() for fruit in fruit_list}
type(fruits), len(fruits), fruits
word_list = ["apple", "plum", "pear"]
word_length = {word: len(word) for word in word_list}
type(word_length), len(word_length), word_length
word_list = ["apple", "plum", "pear", "avocado"]
first_letters = {word[0]: word for word in word_list}
first_letters
def hungarian_vowels():
alphabet = ("a", "á", "e", "é", "i", "í", "o", "ó",
"ö", "ő", "u", "ú", "ü", "ű")
for vowel in alphabet:
yield vowel
type(hungarian_vowels())
for vowel in hungarian_vowels():
print(vowel)
gen = hungarian_vowels()
print("first iteration: {}".format(", ".join(gen)))
print("second iteration: {}".format(", ".join(gen)))
gen = hungarian_vowels()
while True:
try:
print("The next element is {}".format(next(gen)))
except StopIteration:
print("No more elements left :(")
break
numbers = [1, -2, 3, 1]
# print(", ".join(numbers)) # raises TypeError
print(", ".join(str(number) for number in numbers))
shopping_list = ["apple", "plum", "pear"]
shopping_list = ["apple", "plum", "pear"]
shopping_list = ["apple"]
print("The shopping list is:\n{0}".format(
"\n".join(
"item {0}: {1}".format(i+1, item)
for i, item in enumerate(shopping_list)
)
))
shopping_list = ["apple", "plum", "pear"]
for i, item in enumerate(shopping_list):
print("item {} {}".format(i+1, item))
shopping_list = {
"apple": 2,
"pear": 1,
"plum": 5,
}
print("\n".join(
"item {0}: {1}, quantity: {2}".format(i+1, item, quantity)
for i, (item, quantity) in enumerate(shopping_list.items()
)))
shopping_list = {
"apple": 2,
"pear": 1,
"plum": 5,
}
print("\n".join(
"item {0}: {1}, quantity: {2}".format(i+1, item, quantity)
for i, (item, quantity) in
enumerate(
sorted(shopping_list.items(),
key=lambda x: x[1], reverse=True)
)))
students = [
["Joe", "John", "Mary"],
["Tina", "Tony", "Jeff", "Béla"],
["Pete", "Dave"],
]
try:
int("abc")
except ValueError as e:
print(type(e), e)
print(e)
try:
age = int(input())
if age < 0:
raise Exception("Age cannot be negative")
except ValueError as e:
print("ValueError caught")
except Exception as e:
print("Other exception caught: {}".format(type(e)))
def age_printer(age):
next_age = age + 1
print("Next year your age will be " + next_age)
try:
your_age = input()
your_age = int(your_age)
age_printer(your_age)
except ValueError:
print("ValueError caught")
except TypeError:
print("TypeError caught")
def age_printer(age):
next_age = age + 1
print("Next year your age will be " + next_age)
try:
your_age = input()
your_age = int(your_age)
age_printer(your_age)
except (ValueError, TypeError) as e:
print("{} caught".format(type(e).__name__))
try:
age = int(input())
if age < 0:
raise Exception("Age cannot be negative")
except ValueError:
print("ValueError caught")
except:
#except Exception as e:
print("Something else caught")
try:
age = int(input())
if age < 0:
raise Exception("Age cannot be negative")
#except:
#print("Something else caught")
except ValueError:
print("ValueError caught")
try:
age = int(input())
if age < 0:
raise Exception("Age cannot be negative")
except Exception as e:
print("Exception caught: {}".format(type(e)))
except ValueError:
print("ValueError caught")
try:
age = int(input())
except Exception as e:
print(type(e), e)
finally:
print("this always runs")
try:
age = int(input())
except ValueError as e:
print("Exception", e)
else:
print("No exception was raised")
finally:
print("this always runs")
try:
int("not a number")
except Exception:
# raise
pass
class NegativeAgeError(Exception):
pass
try:
age = int(input())
if age < 0:
raise NegativeAgeError("Age cannot be negative. Invalid age: {}".format(age))
except NegativeAgeError as e:
print(e)
except Exception as e:
print("Something else happened. Caught {}, with message {}".format(type(e), e))
try:
int(input())
except ValueError:
print("not an int")
else:
print("looks like an int")
fh = []
while True:
try:
fh.append(open("abc.txt", "w"))
except OSError:
break
len(fh)
for f in fh:
f.close()
s1 = "important text"
fh = open("file.txt", "w")
# fh.write(s2) # raises NameError
fh.close()
from sys import stderr
fh = open("file.txt", "w")
try:
fh.write(important_variable)
except Exception as e:
stderr.write("{0} happened".format(type(e).__name__))
finally:
print("Closing file")
fh.close()
with open("file.txt", "w") as fh:
fh.write("abc\n")
# fh.write(important_variable) # raises NameError
class DummyContextManager:
def __init__(self, value):
self.value = value
def __enter__(self):
print("Dummy resource acquired")
return self.value
def __exit__(self, *args):
print("Dummy resource released")
with DummyContextManager(42) as d:
print("Resource: {}".format(d))
class DummyContextManager:
def __init__(self, value):
self.value = value
def __enter__(self):
print("Dummy resource acquired")
return self.value
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
print("{0} with value {1} caught\nTraceback: {2}".format(exc_type, exc_value, traceback))
print("Dummy resource released")
with DummyContextManager(42) as d:
print(d)
# raise ValueError("just because I can") # __exit__ will be called anyway
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: one-liner equivalent
Step2: The general form of list comprehension is
Step3: which is equivalent to
Step4: since this expression implements a filtering mechanism, there is no else clause
Step5: More than one sequence may be traversed. Is this depth-first or breadth-first traversal?
Step6: List comprehensions may be nested by replacing the first expression with another list comprehension
Step7: What is the type of a (list) comprehension?
Step8: Generator expressions
Step9: Generators do not generate a list in memory
Step10: therefore they can only be traversed once
Step11: the generator is empty after the first run
Step12: calling next() raises a StopIteration exception
Step13: these are actually the defining properties of the iteration protocol
Step14: Set and dict comprehension
Step15: if the expression in the generator is a key-value pair separated by a colon, it instantiates a dictionary
Step16: yield keyword
Step17: this function returns a generator object
Step18: The next function returns the next element of the generator.
Step19: Exercises
Step20: ~~~
Step21: Q. Print the following shopping list with quantities.
Step22: Q. Print the same format in alphabetical order.
Step23: Q. Print the list of students.
Step24: Q. Print one class-per-line and print the size of the class too
Step25: more than one except clauses may be defined
Step26: More than one type of exception can be handled in the same except clause
Step27: except without an Exception type
Step28: the empty except must be the last except block since it blocks all others
Step29: Base class' except clauses catch derived classes too
Step30: finally
Step31: else
Step32: raise keyword
Step33: Defining exceptions
Step34: Using exception for trial-and-error is considered Pythonic
Step35: Context managers
Step36: we need to manually close the file
Step37: the file is never closed, the file descriptor is leaked
Step38: Context managers handle this automatically
Step39: Defining context managers
Step40: __exit__ takes 3 extra arguments that describe the exception
|
778 | <ASSISTANT_TASK:>
Python Code:
from polyglot.downloader import downloader
print(downloader.supported_languages_table("ner2", 3))
%%bash
polyglot download embeddings2.en ner2.en
from polyglot.text import Text
blob = The Israeli Prime Minister Benjamin Netanyahu has warned that Iran poses a "threat to the entire world".
text = Text(blob)
text.entities
for sent in text.sentences:
print(sent, "\n")
for entity in sent.entities:
print(entity.tag, entity)
benjamin = sent.entities[1]
sent.words[benjamin.start: benjamin.end]
!polyglot --lang en tokenize --input testdata/cricket.txt | polyglot --lang en ner | tail -n 20
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download Necessary Models
Step3: Example
Step4: We can query all entities mentioned in a text.
Step5: Or, we can query entites per sentence
Step6: By doing more careful inspection of the second entity Benjamin Netanyahu, we can locate the position of the entity within the sentence.
Step7: Command Line Interface
|
779 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -q -U pip==20.2
!pip install -q fairness-indicators
!pip install -q witwidget
import os
import tempfile
import apache_beam as beam
import numpy as np
import pandas as pd
from datetime import datetime
import pprint
from google.protobuf import text_format
import tensorflow_hub as hub
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_data_validation as tfdv
from tfx_bsl.tfxio import tensor_adapter
from tfx_bsl.tfxio import tf_example_record
from tensorflow_model_analysis.addons.fairness.post_export_metrics import fairness_indicators
from tensorflow_model_analysis.addons.fairness.view import widget_view
from fairness_indicators.tutorial_utils import util
from witwidget.notebook.visualization import WitConfigBuilder
from witwidget.notebook.visualization import WitWidget
from tensorflow_metadata.proto.v0 import schema_pb2
download_original_data = False #@param {type:"boolean"}
if download_original_data:
train_tf_file = tf.keras.utils.get_file('train_tf.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/train_tf.tfrecord')
validate_tf_file = tf.keras.utils.get_file('validate_tf.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/validate_tf.tfrecord')
# The identity terms list will be grouped together by their categories
# (see 'IDENTITY_COLUMNS') on threshould 0.5. Only the identity term column,
# text column and label column will be kept after processing.
train_tf_file = util.convert_comments_data(train_tf_file)
validate_tf_file = util.convert_comments_data(validate_tf_file)
else:
train_tf_file = tf.keras.utils.get_file('train_tf_processed.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/train_tf_processed.tfrecord')
validate_tf_file = tf.keras.utils.get_file('validate_tf_processed.tfrecord',
'https://storage.googleapis.com/civil_comments_dataset/validate_tf_processed.tfrecord')
stats = tfdv.generate_statistics_from_tfrecord(data_location=train_tf_file)
tfdv.visualize_statistics(stats)
BASE_DIR = tempfile.gettempdir()
TEXT_FEATURE = 'comment_text'
LABEL = 'toxicity'
FEATURE_MAP = {
# Label:
LABEL: tf.io.FixedLenFeature([], tf.float32),
# Text:
TEXT_FEATURE: tf.io.FixedLenFeature([], tf.string),
# Identities:
'sexual_orientation':tf.io.VarLenFeature(tf.string),
'gender':tf.io.VarLenFeature(tf.string),
'religion':tf.io.VarLenFeature(tf.string),
'race':tf.io.VarLenFeature(tf.string),
'disability':tf.io.VarLenFeature(tf.string),
}
def train_input_fn():
def parse_function(serialized):
parsed_example = tf.io.parse_single_example(
serialized=serialized, features=FEATURE_MAP)
# Adds a weight column to deal with unbalanced classes.
parsed_example['weight'] = tf.add(parsed_example[LABEL], 0.1)
return (parsed_example,
parsed_example[LABEL])
train_dataset = tf.data.TFRecordDataset(
filenames=[train_tf_file]).map(parse_function).batch(512)
return train_dataset
model_dir = os.path.join(BASE_DIR, 'train', datetime.now().strftime(
"%Y%m%d-%H%M%S"))
embedded_text_feature_column = hub.text_embedding_column(
key=TEXT_FEATURE,
module_spec='https://tfhub.dev/google/nnlm-en-dim128/1')
classifier = tf.estimator.DNNClassifier(
hidden_units=[500, 100],
weight_column='weight',
feature_columns=[embedded_text_feature_column],
optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.003),
loss_reduction=tf.losses.Reduction.SUM,
n_classes=2,
model_dir=model_dir)
classifier.train(input_fn=train_input_fn, steps=1000)
def eval_input_receiver_fn():
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_placeholder')
# This *must* be a dictionary containing a single key 'examples', which
# points to the input placeholder.
receiver_tensors = {'examples': serialized_tf_example}
features = tf.io.parse_example(serialized_tf_example, FEATURE_MAP)
features['weight'] = tf.ones_like(features[LABEL])
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=features[LABEL])
tfma_export_dir = tfma.export.export_eval_savedmodel(
estimator=classifier,
export_dir_base=os.path.join(BASE_DIR, 'tfma_eval_model'),
eval_input_receiver_fn=eval_input_receiver_fn)
#@title Fairness Indicators Computation Options
tfma_eval_result_path = os.path.join(BASE_DIR, 'tfma_eval_result')
#@markdown Modify the slice_selection for experiments on other identities.
slice_selection = 'sexual_orientation' #@param ["sexual_orientation", "gender", "religion", "race", "disability"]
print(f'Slice selection: {slice_selection}')
#@markdown Confidence Intervals can help you make better decisions regarding your data, but as it requires computing multiple resamples, is slower particularly in the colab environment that cannot take advantage of parallelization.
compute_confidence_intervals = False #@param {type:"boolean"}
print(f'Compute confidence intervals: {compute_confidence_intervals}')
# Define slices that you want the evaluation to run on.
eval_config_pbtxt =
model_specs {
label_key: "%s"
}
metrics_specs {
metrics {
class_name: "FairnessIndicators"
config: '{ "thresholds": [0.1, 0.3, 0.5, 0.7, 0.9] }'
}
}
slicing_specs {} # overall slice
slicing_specs {
feature_keys: ["%s"]
}
options {
compute_confidence_intervals { value: %s }
disabled_outputs { values: "analysis" }
}
% (LABEL, slice_selection, compute_confidence_intervals)
eval_config = text_format.Parse(eval_config_pbtxt, tfma.EvalConfig())
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=tfma_export_dir)
schema = text_format.Parse(
tensor_representation_group {
key: ""
value {
tensor_representation {
key: "comment_text"
value {
dense_tensor {
column_name: "comment_text"
shape {}
}
}
}
}
}
feature {
name: "comment_text"
type: BYTES
}
feature {
name: "toxicity"
type: FLOAT
}
feature {
name: "sexual_orientation"
type: BYTES
}
feature {
name: "gender"
type: BYTES
}
feature {
name: "religion"
type: BYTES
}
feature {
name: "race"
type: BYTES
}
feature {
name: "disability"
type: BYTES
}
, schema_pb2.Schema())
tfxio = tf_example_record.TFExampleRecord(
file_pattern=validate_tf_file,
schema=schema,
raw_record_column_name=tfma.ARROW_INPUT_COLUMN)
tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
arrow_schema=tfxio.ArrowSchema(),
tensor_representations=tfxio.TensorRepresentations())
with beam.Pipeline() as pipeline:
(pipeline
| 'ReadFromTFRecordToArrow' >> tfxio.BeamSource()
| 'ExtractEvaluateAndWriteResults' >> tfma.ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
output_path=tfma_eval_result_path,
tensor_adapter_config=tensor_adapter_config))
eval_result = tfma.load_eval_result(output_path=tfma_eval_result_path)
DEFAULT_MAX_EXAMPLES = 1000
# Load 100000 examples in memory. When first rendered,
# What-If Tool should only display 1000 of these due to browser constraints.
def wit_dataset(file, num_examples=100000):
dataset = tf.data.TFRecordDataset(
filenames=[file]).take(num_examples)
return [tf.train.Example.FromString(d.numpy()) for d in dataset]
wit_data = wit_dataset(train_tf_file)
config_builder = WitConfigBuilder(wit_data[:DEFAULT_MAX_EXAMPLES]).set_estimator_and_feature_spec(
classifier, FEATURE_MAP).set_label_vocab(['non-toxicity', LABEL]).set_target_feature(LABEL)
wit = WitWidget(config_builder)
event_handlers={'slice-selected':
wit.create_selection_callback(wit_data, DEFAULT_MAX_EXAMPLES)}
widget_view.render_fairness_indicator(eval_result=eval_result,
slicing_column=slice_selection,
event_handlers=event_handlers
)
pp = pprint.PrettyPrinter()
print("Slices:")
pp.pprint(eval_result.get_slice_names())
print("\nMetrics:")
pp.pprint(eval_result.get_metric_names())
baseline_slice = ()
heterosexual_slice = (('sexual_orientation', 'heterosexual'),)
print("Baseline metric values:")
pp.pprint(eval_result.get_metrics_for_slice(baseline_slice))
print("\nHeterosexual metric values:")
pp.pprint(eval_result.get_metrics_for_slice(heterosexual_slice))
pp.pprint(eval_result.get_metrics_for_all_slices())
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction to Fairness Indicators
Step2: You must restart the Colab runtime after installing. Select Runtime > Restart runtime from the Colab menu.
Step3: Download and analyze the data
Step4: Use TFDV to analyze the data and find potential problems in it, such as missing values and data imbalances, that can lead to fairness disparities.
Step5: TFDV shows that there are some significant imbalances in the data which could lead to biased model outcomes.
Step6: Next, set up an input function to feed data into the model. Add a weight column to each example and upweight the toxic examples to account for the class imbalance identified by the TFDV. Use only identity features during the evaluation phase, as only the comments are fed into the model during training.
Step7: Train the model
Step8: Analyze the model
Step11: Compute Fairness Metrics
Step12: Visualize data using the What-if Tool
Step13: Render Fairness Indicators
Step14: With this particular dataset and task, systematically higher false positive and false negative rates for certain identities can lead to negative consequences. For example, in a content moderation system, a higher-than-overall false positive rate for a certain group can lead to those voices being silenced. Thus, it is important to regularly evaluate these types of criteria as you develop and improve models, and utilize tools such as Fairness Indicators, TFDV, and WIT to help illuminate potential problems. Once you've identified fairness issues, you can experiment with new data sources, data balancing, or other techniques to improve performance on underperforming groups.
Step15: Use get_metrics_for_slice() to get the metrics for a particular slice as a dictionary mapping metric names to metric values.
Step16: Use get_metrics_for_all_slices() to get the metrics for all slices as a dictionary mapping each slice to the corresponding metrics dictionary you obtain from running get_metrics_for_slice() on it.
|
780 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
from pprint import pprint
import shutil
import subprocess
import urllib.request
import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm
from matplotlib.patches import Rectangle
import openmc.data
openmc.data.atomic_mass('Fe54')
openmc.data.NATURAL_ABUNDANCE['H2']
openmc.data.atomic_weight('C')
url = 'https://anl.box.com/shared/static/kxm7s57z3xgfbeq29h54n7q6js8rd11c.ace'
filename, headers = urllib.request.urlretrieve(url, 'gd157.ace')
# Load ACE data into object
gd157 = openmc.data.IncidentNeutron.from_ace('gd157.ace')
gd157
total = gd157[1]
total
total.xs
total.xs['294K'](1.0)
total.xs['294K']([1.0, 2.0, 3.0])
gd157.energy
energies = gd157.energy['294K']
total_xs = total.xs['294K'](energies)
plt.loglog(energies, total_xs)
plt.xlabel('Energy (eV)')
plt.ylabel('Cross section (b)')
pprint(list(gd157.reactions.values())[:10])
n2n = gd157[16]
print('Threshold = {} eV'.format(n2n.xs['294K'].x[0]))
n2n.xs
xs = n2n.xs['294K']
plt.plot(xs.x, xs.y)
plt.xlabel('Energy (eV)')
plt.ylabel('Cross section (b)')
plt.xlim((xs.x[0], xs.x[-1]))
n2n.products
neutron = n2n.products[0]
neutron.distribution
dist = neutron.distribution[0]
dist.energy_out
for e_in, e_out_dist in zip(dist.energy[::5], dist.energy_out[::5]):
plt.semilogy(e_out_dist.x, e_out_dist.p, label='E={:.2f} MeV'.format(e_in/1e6))
plt.ylim(ymax=1e-6)
plt.legend()
plt.xlabel('Outgoing energy (eV)')
plt.ylabel('Probability/eV')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cm = matplotlib.cm.Spectral_r
# Determine size of probability tables
urr = gd157.urr['294K']
n_energy = urr.table.shape[0]
n_band = urr.table.shape[2]
for i in range(n_energy):
# Get bounds on energy
if i > 0:
e_left = urr.energy[i] - 0.5*(urr.energy[i] - urr.energy[i-1])
else:
e_left = urr.energy[i] - 0.5*(urr.energy[i+1] - urr.energy[i])
if i < n_energy - 1:
e_right = urr.energy[i] + 0.5*(urr.energy[i+1] - urr.energy[i])
else:
e_right = urr.energy[i] + 0.5*(urr.energy[i] - urr.energy[i-1])
for j in range(n_band):
# Determine maximum probability for a single band
max_prob = np.diff(urr.table[i,0,:]).max()
# Determine bottom of band
if j > 0:
xs_bottom = urr.table[i,1,j] - 0.5*(urr.table[i,1,j] - urr.table[i,1,j-1])
value = (urr.table[i,0,j] - urr.table[i,0,j-1])/max_prob
else:
xs_bottom = urr.table[i,1,j] - 0.5*(urr.table[i,1,j+1] - urr.table[i,1,j])
value = urr.table[i,0,j]/max_prob
# Determine top of band
if j < n_band - 1:
xs_top = urr.table[i,1,j] + 0.5*(urr.table[i,1,j+1] - urr.table[i,1,j])
else:
xs_top = urr.table[i,1,j] + 0.5*(urr.table[i,1,j] - urr.table[i,1,j-1])
# Draw rectangle with appropriate color
ax.add_patch(Rectangle((e_left, xs_bottom), e_right - e_left, xs_top - xs_bottom,
color=cm(value)))
# Overlay total cross section
ax.plot(gd157.energy['294K'], total.xs['294K'](gd157.energy['294K']), 'k')
# Make plot pretty and labeled
ax.set_xlim(1.0, 1.0e5)
ax.set_ylim(1e-1, 1e4)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Energy (eV)')
ax.set_ylabel('Cross section(b)')
gd157.export_to_hdf5('gd157.h5', 'w')
gd157_reconstructed = openmc.data.IncidentNeutron.from_hdf5('gd157.h5')
np.all(gd157[16].xs['294K'].y == gd157_reconstructed[16].xs['294K'].y)
h5file = h5py.File('gd157.h5', 'r')
main_group = h5file['Gd157/reactions']
for name, obj in sorted(list(main_group.items()))[:10]:
if 'reaction_' in name:
print('{}, {}'.format(name, obj.attrs['label'].decode()))
n2n_group = main_group['reaction_016']
pprint(list(n2n_group.values()))
n2n_group['294K/xs'].value
# Download ENDF file
url = 'https://t2.lanl.gov/nis/data/data/ENDFB-VII.1-neutron/Gd/157'
filename, headers = urllib.request.urlretrieve(url, 'gd157.endf')
# Load into memory
gd157_endf = openmc.data.IncidentNeutron.from_endf(filename)
gd157_endf
elastic = gd157_endf[2]
elastic.xs
elastic.xs['0K'](0.0253)
gd157_endf.resonances.ranges
[(r.energy_min, r.energy_max) for r in gd157_endf.resonances.ranges]
# Create log-spaced array of energies
resolved = gd157_endf.resonances.resolved
energies = np.logspace(np.log10(resolved.energy_min),
np.log10(resolved.energy_max), 1000)
# Evaluate elastic scattering xs at energies
xs = elastic.xs['0K'](energies)
# Plot cross section vs energies
plt.loglog(energies, xs)
plt.xlabel('Energy (eV)')
plt.ylabel('Cross section (b)')
resolved.parameters.head(10)
gd157.add_elastic_0K_from_endf('gd157.endf')
gd157[2].xs
# Download ENDF file
url = 'https://t2.lanl.gov/nis/data/data/ENDFB-VII.1-neutron/H/2'
filename, headers = urllib.request.urlretrieve(url, 'h2.endf')
# Run NJOY to create deuterium data
h2 = openmc.data.IncidentNeutron.from_njoy('h2.endf', temperatures=[300., 400., 500.], stdout=True)
h2[2].xs
url = 'https://github.com/mit-crpg/WMP_Library/releases/download/v1.1/092238.h5'
filename, headers = urllib.request.urlretrieve(url, '092238.h5')
u238_multipole = openmc.data.WindowedMultipole.from_hdf5('092238.h5')
u238_multipole(1.0, 294)
E = np.linspace(5, 25, 1000)
plt.semilogy(E, u238_multipole(E, 293.606)[1])
E = np.linspace(6.1, 7.1, 1000)
plt.semilogy(E, u238_multipole(E, 0)[1])
plt.semilogy(E, u238_multipole(E, 900)[1])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Physical Data
Step2: The IncidentNeutron class
Step3: Cross sections
Step4: Cross sections for each reaction can be stored at multiple temperatures. To see what temperatures are available, we can look at the reaction's xs attribute.
Step5: To find the cross section at a particular energy, 1 eV for example, simply get the cross section at the appropriate temperature and then call it as a function. Note that our nuclear data uses eV as the unit of energy.
Step6: The xs attribute can also be called on an array of energies.
Step7: A quick way to plot cross sections is to use the energy attribute of IncidentNeutron. This gives an array of all the energy values used in cross section interpolation for each temperature present.
Step8: Reaction Data
Step9: Let's suppose we want to look more closely at the (n,2n) reaction. This reaction has an energy threshold
Step10: The (n,2n) cross section, like all basic cross sections, is represented by the Tabulated1D class. The energy and cross section values in the table can be directly accessed with the x and y attributes. Using the x and y has the nice benefit of automatically acounting for reaction thresholds.
Step11: To get information on the energy and angle distribution of the neutrons emitted in the reaction, we need to look at the products attribute.
Step12: We see that the neutrons emitted have a correlated angle-energy distribution. Let's look at the energy_out attribute to see what the outgoing energy distributions are.
Step13: Here we see we have a tabulated outgoing energy distribution for each incoming energy. Note that the same probability distribution classes that we could use to create a source definition are also used within the openmc.data package. Let's plot every fifth distribution to get an idea of what they look like.
Step14: Unresolved resonance probability tables
Step15: Exporting HDF5 data
Step16: With few exceptions, the HDF5 file encodes the same data as the ACE file.
Step17: And one of the best parts of using HDF5 is that it is a widely used format with lots of third-party support. You can use h5py, for example, to inspect the data.
Step18: So we see that the hierarchy of data within the HDF5 mirrors the hierarchy of Python objects that we manipulated before.
Step19: Working with ENDF files
Step20: Just as before, we can get a reaction by indexing the object directly
Step21: However, if we look at the cross section now, we see that it isn't represented as tabulated data anymore.
Step22: If had Cython installed when you built/installed OpenMC, you should be able to evaluate resonant cross sections from ENDF data directly, i.e., OpenMC will reconstruct resonances behind the scenes for you.
Step23: When data is loaded from an ENDF file, there is also a special resonances attribute that contains resolved and unresolved resonance region data (from MF=2 in an ENDF file).
Step24: We see that $^{157}$Gd has a resolved resonance region represented in the Reich-Moore format as well as an unresolved resonance region. We can look at the min/max energy of each region by doing the following
Step25: With knowledge of the energy bounds, let's create an array of energies over the entire resolved resonance range and plot the elastic scattering cross section.
Step26: Resonance ranges also have a useful parameters attribute that shows the energies and widths for resonances.
Step27: Heavy-nuclide resonance scattering
Step28: Let's check to make sure that we have both the room temperature elastic scattering cross section as well as a 0K cross section.
Step29: Generating data from NJOY
Step30: Now we can use our h2 object just as we did before.
Step31: Note that 0 K elastic scattering data is automatically added when using from_njoy() so that resonance elastic scattering treatments can be used.
Step32: The WindowedMultipole object can be called with energy and temperature values. Calling the object gives a tuple of 3 cross sections
Step33: An array can be passed for the energy argument.
Step34: The real advantage to multipole is that it can be used to generate cross sections at any temperature. For example, this plot shows the Doppler broadening of the 6.67 eV resonance between 0 K and 900 K.
|
781 | <ASSISTANT_TASK:>
Python Code:
import sys
try:
import docplex.mp
except:
raise Exception('Please install docplex. See https://pypi.org/project/docplex/')
try:
import cplex
except:
raise Exception('Please install CPLEX. See https://pypi.org/project/cplex/')
B = [15, 15, 15]
C = [
[ 6, 10, 1],
[12, 12, 5],
[15, 4, 3],
[10, 3, 9],
[8, 9, 5]
]
A = [
[ 5, 7, 2],
[14, 8, 7],
[10, 6, 12],
[ 8, 4, 15],
[ 6, 12, 5]
]
from docplex.mp.environment import Environment
env = Environment()
env.print_information()
from docplex.mp.model import Model
mdl = Model("GAP per Wolsey")
print("#As={}, #Bs={}, #Cs={}".format(len(A), len(B), len(C)))
number_of_cs = len(C)
# variables
x_vars = [mdl.binary_var_list(c, name=None) for c in C]
# constraints
cts = mdl.add_constraints(mdl.sum(xv) <= 1 for xv in x_vars)
mdl.add_constraints(mdl.sum(x_vars[ii][j] * A[ii][j] for ii in range(number_of_cs)) <= bs for j, bs in enumerate(B))
# objective
total_profit = mdl.sum(mdl.scal_prod(x_i, c_i) for c_i, x_i in zip(C, x_vars))
mdl.maximize(total_profit)
mdl.print_information()
s = mdl.solve()
assert s is not None
obj = s.objective_value
print("* GAP with no relaxation run OK, best objective is: {:g}".format(obj))
for ct in cts:
mdl.remove_constraint(ct)
#p_vars are the penalties attached to violating the constraints
p_vars = mdl.continuous_var_list(C, name='p') # new for relaxation
# new version of the approximated constraint where we apply the penalties
mdl.add_constraints(mdl.sum(xv) == 1 - pv for xv, pv in zip(x_vars, p_vars))
;
#Define the maximum number of iterations
max_iters = 10
number_of_cs = len(C)
c_range = range(number_of_cs)
# Langrangian relaxation loop
eps = 1e-6
loop_count = 0
best = 0
initial_multiplier = 1
multipliers = [initial_multiplier] * len(C)
# Objective function
# I'd write the key perfromance indicator (kpi) as
# total_profit = mdl.sum(mdl.sum(x_vars[task][worker] * C[task][worker]) for task, worker in zip(tasks, workers))
total_profit = mdl.sum(mdl.scal_prod(x_i, c_i) for c_i, x_i in zip(C, x_vars))
mdl.add_kpi(total_profit, "Total profit")
print("starting the loop")
while loop_count <= max_iters:
loop_count += 1
# Rebuilt at each loop iteration
total_penalty = mdl.scal_prod(p_vars, multipliers)
mdl.maximize(total_profit + total_penalty)
s = mdl.solve()
if not s:
print("*** solve fails, stopping at iteration: %d" % loop_count)
break
best = s.objective_value
penalties = [pv.solution_value for pv in p_vars]
print('%d> new lagrangian iteration:\n\t obj=%g, m=%s, p=%s' % (loop_count, best, str(multipliers), str(penalties)))
do_stop = True
justifier = 0
for k in c_range:
penalized_violation = penalties[k] * multipliers[k]
if penalized_violation >= eps:
do_stop = False
justifier = penalized_violation
break
if do_stop:
print("* Lagrangian relaxation succeeds, best={:g}, penalty={:g}, #iterations={}"
.format(best, total_penalty.solution_value, loop_count))
break
else:
# Update multipliers and start the loop again.
scale_factor = 1.0 / float(loop_count)
multipliers = [max(multipliers[i] - scale_factor * penalties[i], 0.) for i in c_range]
print('{0}> -- loop continues, m={1!s}, justifier={2:g}'.format(loop_count, multipliers, justifier))
print(best)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If CPLEX is not installed, you can install CPLEX Community edition.
Step2: 2. Model the data
Step3: 3. Set up the prescriptive model
Step4: We will firt create an optimization problem, composed of 2 basic constraints blocks, then we will resolve it using Lagrangian Relaxation on 1 of the constraints block.
Step5: 3.2 Define the decision variables
Step6: 3.3 Define the business constraints
Step7: 3.4. Solve the model
Step8: 3.5. Solve the model with Lagrangian Relaxation method
|
782 | <ASSISTANT_TASK:>
Python Code:
# Authors: Chris Holdgraf <choldgraf@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Nicolas Barascud <nicolas.barascud@ens.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from os.path import join
import mne
from mne.decoding import ReceptiveField
from sklearn.model_selection import KFold
from sklearn.preprocessing import scale
path = mne.datasets.mtrf.data_path()
decim = 2
data = loadmat(join(path, 'speech_data.mat'))
raw = data['EEG'].T
speech = data['envelope'].T
sfreq = float(data['Fs'])
sfreq /= decim
speech = mne.filter.resample(speech, down=decim, npad='auto')
raw = mne.filter.resample(raw, down=decim, npad='auto')
# Read in channel positions and create our MNE objects from the raw data
montage = mne.channels.make_standard_montage('biosemi128')
info = mne.create_info(montage.ch_names, sfreq, 'eeg', montage=montage)
raw = mne.io.RawArray(raw, info)
n_channels = len(raw.ch_names)
# Plot a sample of brain and stimulus activity
fig, ax = plt.subplots()
lns = ax.plot(scale(raw[:, :800][0].T), color='k', alpha=.1)
ln1 = ax.plot(scale(speech[0, :800]), color='r', lw=2)
ax.legend([lns[0], ln1[0]], ['EEG', 'Speech Envelope'], frameon=False)
ax.set(title="Sample activity", xlabel="Time (s)")
mne.viz.tight_layout()
# Define the delays that we will use in the receptive field
tmin, tmax = -.2, .4
# Initialize the model
rf = ReceptiveField(tmin, tmax, sfreq, feature_names=['envelope'],
estimator=1., scoring='corrcoef')
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Prepare model data (make time the first dimension)
speech = speech.T
Y, _ = raw[:] # Outputs for the model
Y = Y.T
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
scores = np.zeros((n_splits, n_channels))
for ii, (train, test) in enumerate(cv.split(speech)):
print('split %s / %s' % (ii + 1, n_splits))
rf.fit(speech[train], Y[train])
scores[ii] = rf.score(speech[test], Y[test])
# coef_ is shape (n_outputs, n_features, n_delays). we only have 1 feature
coefs[ii] = rf.coef_[:, 0, :]
times = rf.delays_ / float(rf.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis=0)
mean_scores = scores.mean(axis=0)
# Plot mean prediction scores across all channels
fig, ax = plt.subplots()
ix_chs = np.arange(n_channels)
ax.plot(ix_chs, mean_scores)
ax.axhline(0, ls='--', color='r')
ax.set(title="Mean prediction score", xlabel="Channel", ylabel="Score ($r$)")
mne.viz.tight_layout()
# Print mean coefficients across all time delays / channels (see Fig 1 in [1])
time_plot = 0.180 # For highlighting a specific time.
fig, ax = plt.subplots(figsize=(4, 8))
max_coef = mean_coefs.max()
ax.pcolormesh(times, ix_chs, mean_coefs, cmap='RdBu_r',
vmin=-max_coef, vmax=max_coef, shading='gouraud')
ax.axvline(time_plot, ls='--', color='k', lw=2)
ax.set(xlabel='Delay (s)', ylabel='Channel', title="Mean Model\nCoefficients",
xlim=times[[0, -1]], ylim=[len(ix_chs) - 1, 0],
xticks=np.arange(tmin, tmax + .2, .2))
plt.setp(ax.get_xticklabels(), rotation=45)
mne.viz.tight_layout()
# Make a topographic map of coefficients for a given delay (see Fig 2C in [1])
ix_plot = np.argmin(np.abs(time_plot - times))
fig, ax = plt.subplots()
mne.viz.plot_topomap(mean_coefs[:, ix_plot], pos=info, axes=ax, show=False,
vmin=-max_coef, vmax=max_coef)
ax.set(title="Topomap of model coefficients\nfor delay %s" % time_plot)
mne.viz.tight_layout()
# We use the same lags as in [1]. Negative lags now index the relationship
# between the neural response and the speech envelope earlier in time, whereas
# positive lags would index how a unit change in the amplitude of the EEG would
# affect later stimulus activity (obviously this should have an amplitude of
# zero).
tmin, tmax = -.2, 0.
# Initialize the model. Here the features are the EEG data. We also specify
# ``patterns=True`` to compute inverse-transformed coefficients during model
# fitting (cf. next section). We'll use a ridge regression estimator with an
# alpha value similar to [1].
sr = ReceptiveField(tmin, tmax, sfreq, feature_names=raw.ch_names,
estimator=1e4, scoring='corrcoef', patterns=True)
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
patterns = coefs.copy()
scores = np.zeros((n_splits,))
for ii, (train, test) in enumerate(cv.split(speech)):
print('split %s / %s' % (ii + 1, n_splits))
sr.fit(Y[train], speech[train])
scores[ii] = sr.score(Y[test], speech[test])[0]
# coef_ is shape (n_outputs, n_features, n_delays). We have 128 features
coefs[ii] = sr.coef_[0, :, :]
patterns[ii] = sr.patterns_[0, :, :]
times = sr.delays_ / float(sr.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis=0)
mean_patterns = patterns.mean(axis=0)
mean_scores = scores.mean(axis=0)
max_coef = np.abs(mean_coefs).max()
max_patterns = np.abs(mean_patterns).max()
y_pred = sr.predict(Y[test])
time = np.linspace(0, 2., 5 * int(sfreq))
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(time, speech[test][sr.valid_samples_][:int(5 * sfreq)],
color='grey', lw=2, ls='--')
ax.plot(time, y_pred[sr.valid_samples_][:int(5 * sfreq)], color='r', lw=2)
ax.legend([lns[0], ln1[0]], ['Envelope', 'Reconstruction'], frameon=False)
ax.set(title="Stimulus reconstruction")
ax.set_xlabel('Time (s)')
mne.viz.tight_layout()
time_plot = (-.140, -.125) # To average between two timepoints.
ix_plot = np.arange(np.argmin(np.abs(time_plot[0] - times)),
np.argmin(np.abs(time_plot[1] - times)))
fig, ax = plt.subplots(1, 2)
mne.viz.plot_topomap(np.mean(mean_coefs[:, ix_plot], axis=1),
pos=info, axes=ax[0], show=False,
vmin=-max_coef, vmax=max_coef)
ax[0].set(title="Model coefficients\nbetween delays %s and %s"
% (time_plot[0], time_plot[1]))
mne.viz.plot_topomap(np.mean(mean_patterns[:, ix_plot], axis=1),
pos=info, axes=ax[1],
show=False, vmin=-max_patterns, vmax=max_patterns)
ax[1].set(title="Inverse-transformed coefficients\nbetween delays %s and %s"
% (time_plot[0], time_plot[1]))
mne.viz.tight_layout()
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data from the publication
Step2: Create and fit a receptive field model
Step3: Investigate model coefficients
Step4: Create and fit a stimulus reconstruction model
Step5: Visualize stimulus reconstruction
Step6: Investigate model coefficients
|
783 | <ASSISTANT_TASK:>
Python Code:
#pandas is commonly imported as pd
import pandas as pd
#We'll import the other libraries as needed
print("Split on comma as strings")
csv_row = '1,2.0,Three point five,True'
print(csv_row.split(','))
print("\nSplit on comma and converted to ints")
csv_row = '1,2,3,4,5,6,7,8,9'
print([int(c) for c in csv_row.split(',')])
import csv
with open('data/volcanoes.csv') as fin:
csv_data = csv.reader(fin)
#csv.reader is a generator
#for row in csv_data: do stuff
print(csv_data.__next__())
print()
print(csv_data.__next__())
with open('data/volcanoes.csv') as fin:
csv_dict_data = csv.DictReader(fin)
for row in csv_dict_data:
print(row)
print()
print('Name:', row['Name'], '\nType:', row['Type'])
break
csv_df = pd.read_csv(open('data/volcanoes.csv'))
csv_df.head()
import openpyxl
# You can also use xlrd, xlsxwriter, and a host of others.
# See http://www.python-excel.org/ for all your options.
wb = openpyxl.Workbook()
ws = wb.create_sheet("NewSheet")
ws['A1'] = "Name"
ws['A2'] = "Michael"
ws['B1'] = "fav_color"
ws['B2'] = "Purple"
wb.save("data/ExcelData.xlsx")
excel_row = 'data/ExcelData.xlsx'
data = openpyxl.load_workbook(excel_row)
for sheet in data:
for row in sheet:
print(row)
print(*[cell.value for cell in row])
file = "data/volcanoes.xlsx"
# Pandas excel loader is built off of several other excel readers,
# such as openXLRD and xlsxwriter
# this is reflected in how many ways there are to read in an excel file.
basicLoad = pd.read_excel(file)
alternateLoad = pd.ExcelFile(file)
basicLoad.head()
# read_excel defaults to read the first sheet in an excel book
# For a comprehensive list of parameters for read_excel, see: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_excel.html
# you can specify the sheets you want by name
sheetByName = pd.read_excel(file, sheetname="volcanoes")
# by index
sheetsByIndex = pd.read_excel(file, sheetname=[0])
# if you don't know which sheets you want, you can specify header=None and
# all sheets will be loaded in a nested structure:
allSheetsByHeader = pd.read_excel(file, header=None)
allSheetsBySheets = pd.read_excel(file, sheetname=0)
# You can skip rows or columns
cols = ['Number', 'Name', 'Country', 'Region', 'Type', 'Activity Evidence',
'Last Known Eruption', 'Latitude', 'Longitude', 'Elevation (Meters)',
'Dominant Rock Type', 'Tectonic Setting']
subset = pd.read_excel(file, skip_footer=5, skiprows=2, names=cols)
import json
color_data = [
{"name": "Michael", "fav_color": "purple"},
{"name": "Casey", "fav_color": "turquoise"}
]
#load, loads, dump, and dumps are common library APIs
#load/dump tkae file objects while loads/dumps take strings
print(json.dumps(color_data))
json_data = json.load(open('data/volcanoes.json'))
print('Rows:', len(json_data))
json_df = pd.read_json('data/volcanoes.json')
json_df.head()
import xml.etree.ElementTree as ET
tree = ET.parse('data/volcano.xml')
#ET.fromstring(some_xml_string)
root = tree.getroot()
for item in root:
print(item.tag, ' -- ', item.text)
import yaml
yaml_data = yaml.load(open('data/volcanoes.yml'))
print('Rows:', len(yaml_data))
yaml_df = pd.io.json.json_normalize(yaml.load(open('data/volcanoes.yml')))
yaml_df.head()
import pickle
print('Raw hex format:\n', pickle.dumps(color_data))
pickle.dump(color_data, open('data/colors.pkl', 'wb'))
new_data = pickle.load(open('data/colors.pkl', 'rb'))
print("\nSame? :", color_data == new_data)
color_df = pd.DataFrame(pd.read_pickle('data/colors.pkl'))
color_df
color_df.to_pickle('data/color_df.pkl')
#Create full JSON from CSV
with open('data/volcanoes.csv') as fin:
data = [dict(row) for row in csv.DictReader(fin)]
json.dump(data, open('data/volcanoes.json', 'w'))
#Create single element XML from the JSON data
#This library is not included
from dicttoxml import dicttoxml
print(dicttoxml(json_data[0]), file=open('data/volcano.xml'))
#Create full YAML from the JSON data
yaml.dump(json_data, open('data/volcanoes.yml', 'w'))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: .csv — Comma-Separated Values
Step2: But there are more than just strings or rows with a single data type. We can use the csv library to handle edge cases and memory management for us.
Step3: Pandas
Step4: .xls .xlsx — Excel Spreadsheet
Step5: Pandas
Step6: .json — JavaScript Object Notation
Step7: Pandas
Step8: .xml — eXtensible Markup Language
Step9: Pandas
Step10: Pandas
Step11: .p .pkl — Pickle
Step12: Pandas
Step13: Bonus
|
784 | <ASSISTANT_TASK:>
Python Code:
import pypsa
import pandas as pd
import os
n = pypsa.examples.ac_dc_meshed(from_master=True)
n.generators.loc[n.generators.carrier == "gas", "p_nom_extendable"] = False
n.generators.loc[n.generators.carrier == "gas", "ramp_limit_down"] = 0.2
n.generators.loc[n.generators.carrier == "gas", "ramp_limit_up"] = 0.2
n.add(
"StorageUnit",
"su",
bus="Manchester",
marginal_cost=10,
inflow=50,
p_nom_extendable=True,
capital_cost=10,
p_nom=2000,
efficiency_dispatch=0.5,
cyclic_state_of_charge=True,
state_of_charge_initial=1000,
)
n.add(
"StorageUnit",
"su2",
bus="Manchester",
marginal_cost=10,
p_nom_extendable=True,
capital_cost=50,
p_nom=2000,
efficiency_dispatch=0.5,
carrier="gas",
cyclic_state_of_charge=False,
state_of_charge_initial=1000,
)
n.storage_units_t.state_of_charge_set.loc[n.snapshots[7], "su"] = 100
n.add("Bus", "storebus", carrier="hydro", x=-5, y=55)
n.madd(
"Link",
["battery_power", "battery_discharge"],
"",
bus0=["Manchester", "storebus"],
bus1=["storebus", "Manchester"],
p_nom=100,
efficiency=0.9,
p_nom_extendable=True,
p_nom_max=1000,
)
n.madd(
"Store",
["store"],
bus="storebus",
e_nom=2000,
e_nom_extendable=True,
marginal_cost=10,
capital_cost=10,
e_nom_max=5000,
e_initial=100,
e_cyclic=True,
);
from pypsa.linopt import get_var, linexpr, join_exprs, define_constraints
def minimal_state_of_charge(n, snapshots):
vars_soc = get_var(n, "StorageUnit", "state_of_charge")
lhs = linexpr((1, vars_soc))
define_constraints(n, lhs, ">", 50, "StorageUnit", "soc_lower_bound")
def fix_link_cap_ratio(n, snapshots):
vars_link = get_var(n, "Link", "p_nom")
eff = n.links.at["battery_power", "efficiency"]
lhs = linexpr(
(1, vars_link["battery_power"]), (-eff, vars_link["battery_discharge"])
)
define_constraints(n, lhs, "=", 0, "battery_discharge", attr="fixratio")
def fix_bus_production(n, snapshots):
total_demand = n.loads_t.p_set.sum().sum()
prod_per_bus = (
linexpr((1, get_var(n, "Generator", "p")))
.groupby(n.generators.bus, axis=1)
.apply(join_exprs)
)
define_constraints(
n, prod_per_bus, ">=", total_demand / 5, "Bus", "production_share"
)
def extra_functionalities(n, snapshots):
minimal_state_of_charge(n, snapshots)
fix_link_cap_ratio(n, snapshots)
fix_bus_production(n, snapshots)
n.lopf(
pyomo=False,
extra_functionality=extra_functionalities,
keep_shadowprices=["Bus", "battery_discharge", "StorageUnit"],
)
n.constraints
n.links.loc[["battery_power", "battery_discharge"], ["p_nom_opt"]]
n.storage_units_t.state_of_charge
n.generators_t.p.groupby(n.generators.bus, axis=1).sum().sum() / n.loads_t.p.sum().sum()
n.dualvalues
from pypsa.linopt import get_dual
get_dual(n, "StorageUnit", "soc_lower_bound")
get_dual(n, "battery_discharge", "fixratio")
get_dual(n, "Bus", "production_share")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Modify the network a bit
Step2: Add ramp limit
Step3: Add additional storage units (cyclic and non-cyclic) and fix one state_of_charge
Step4: Add an additional store
Step5: Extra functionalities
Step6: One of the most important functions is linexpr which take one or more tuples of coefficient and variable pairs which should go into the left hand side (lhs) of the constraint.
Step7: Fix the ratio between ingoing and outgoing capacity of the Store
Step8: Every bus must in total produce the 20% of the total demand
Step9: Combine them ...
Step10: ...and run the lopf with pyomo=False
Step11: The keep_shadowprices argument in the lopf now decides which shadow prices (SP) should be retrieved. It can either be set to True, then all SP are kept. It also can be a list of names of the constraints. Therefore the name argument in define_constraints is necessary, in our case 'battery_discharge', 'StorageUnit' and 'Bus'.
Step12: The last three entries show our constraints. As 'soc_lower_bound' is time-dependent, the pnl value is set to True.
Step13: Looks good! Now, let's see which dual values were parsed. Therefore we have a look into n.dualvalues
Step14: Again we see the last two entries reflect our constraints (the values in the columns play only a minor role). Having a look what the values are
|
785 | <ASSISTANT_TASK:>
Python Code:
import sys
path_to_foxi = '/Users/Rob/work/foxi' # Give your path to foxi here.
sys.path.append(path_to_foxi + '/foxisource/')
from foxi import foxi
# These imports aren't stricly necessary to run foxi but they will be useful in our examples.
import numpy as np
from scipy.stats import multivariate_normal
# Choose a state of low information to start with.
mock_posterior_best_fit = np.asarray([np.random.uniform(-1.0,1.0),
np.random.uniform(-1.0,1.0),
np.random.uniform(-1.0,1.0),
np.random.uniform(-1.0,1.0),
np.random.uniform(-1.0,1.0)])
# Set the posterior best fit to be a unit vector.
mock_posterior_best_fit = mock_posterior_best_fit/np.sqrt(np.sum(mock_posterior_best_fit**2.0))
# Set the posterior Fisher matrix to be diagonal for simplicity.
mock_posterior_fisher_matrix = np.asarray([[ np.random.uniform(0.01,1.0), 0.0, 0.0, 0.0, 0.0],
[ 0.0, np.random.uniform(0.01,1.0), 0.0, 0.0, 0.0], # This obviously assumes that the
[ 0.0, 0.0, np.random.uniform(0.01,1.0), 0.0, 0.0], # covariance matrix is constant
[ 0.0, 0.0, 0.0, np.random.uniform(0.01,1.0), 0.0], # with respect to the parameters.
[ 0.0, 0.0, 0.0, 0.0, np.random.uniform(0.01,1.0)]])
# Give the posterior a unit-determinant Fisher matrix so that other realisations are comparable.
mock_posterior_fisher_matrix = mock_posterior_fisher_matrix/(np.linalg.det(mock_posterior_fisher_matrix)**(1.0/5.0))
# Quick inversion to generate the samples and mimic some weights too.
number_of_posterior_samples = 10**3
mock_posterior_covariance_matrix = np.linalg.inv(mock_posterior_fisher_matrix)
mock_posterior_samples = np.random.multivariate_normal(mock_posterior_best_fit,
mock_posterior_covariance_matrix,
number_of_posterior_samples)
mock_posterior_sample_weights = multivariate_normal.pdf(mock_posterior_samples,
mean=mock_posterior_best_fit,
cov=mock_posterior_covariance_matrix)
mock_posterior_samples_output = np.insert(mock_posterior_samples,0,mock_posterior_sample_weights,axis=1)
# Let's output this data to a file in the '/foxichains' directory which mimics a real MCMC output.
np.savetxt(path_to_foxi + '/foxichains/mock_posterior_samples.txt', mock_posterior_samples_output, delimiter='\t')
# Feel free to vary these (though consider the number of model pairs to compare grows like Nm*(Nm-1)/2).
Nm = 10
Rm = 0.0001
number_of_prior_samples = 5*10**2
# Making the model spread relatively small - not strictly necessary but improves convergence properties.
hyperprior_covariance_matrix = 0.1*mock_posterior_covariance_matrix
# Generate the positions.
mock_prior_positions = np.random.multivariate_normal(mock_posterior_best_fit,hyperprior_covariance_matrix,Nm)
# Generate the 5D hypersphere samples and output to text files in the '/foxipriors' directory.
for im in range(0,Nm):
R1 = np.random.uniform(0.0,Rm,size=number_of_prior_samples)
R2 = np.random.uniform(0.0,Rm,size=number_of_prior_samples)
R3 = np.random.uniform(0.0,Rm,size=number_of_prior_samples)
R4 = np.random.uniform(0.0,Rm,size=number_of_prior_samples)
R5 = np.random.uniform(0.0,Rm,size=number_of_prior_samples)
angle1 = np.random.uniform(0.0,np.pi,size=number_of_prior_samples)
angle2 = np.random.uniform(0.0,np.pi,size=number_of_prior_samples)
angle3 = np.random.uniform(0.0,np.pi,size=number_of_prior_samples)
angle4 = np.random.uniform(0.0,2.0*np.pi,size=number_of_prior_samples)
parameter1 = R1*np.cos(angle1) + mock_prior_positions[im][0]
parameter2 = R2*np.sin(angle1)*np.cos(angle2) + mock_prior_positions[im][1]
parameter3 = R3*np.sin(angle1)*np.sin(angle2)*np.cos(angle3) + mock_prior_positions[im][2]
parameter4 = R4*np.sin(angle1)*np.sin(angle2)*np.sin(angle3)*np.cos(angle4) + mock_prior_positions[im][3]
parameter5 = R5*np.sin(angle1)*np.sin(angle2)*np.sin(angle3)*np.sin(angle4) + mock_prior_positions[im][4]
mock_prior_samples = np.asarray([parameter1,parameter2,parameter3,parameter4,parameter5]).T
np.savetxt(path_to_foxi + '/foxipriors/mock_prior' + str(im+1) + '_samples.txt', mock_prior_samples, delimiter='\t')
# Specify the polynomial baheviour of the forecast Fisher matrix with respect to the fiducial points.
def fisher_matrix(fiducial_point):
mock_forecast_fisher_matrix = np.zeros((5,5))
mock_forecast_fisher_matrix += mock_posterior_fisher_matrix
mock_forecast_fisher_matrix[0][0] += mock_posterior_fisher_matrix[0][0]*((fiducial_point[0]-mock_posterior_best_fit[0])**2.0)
mock_forecast_fisher_matrix[1][1] += mock_posterior_fisher_matrix[1][1]*((fiducial_point[1]-mock_posterior_best_fit[1])**2.0)
mock_forecast_fisher_matrix[2][2] += mock_posterior_fisher_matrix[2][2]*((fiducial_point[2]-mock_posterior_best_fit[2])**2.0)
mock_forecast_fisher_matrix[3][3] += mock_posterior_fisher_matrix[3][3]*((fiducial_point[3]-mock_posterior_best_fit[3])**2.0)
mock_forecast_fisher_matrix[4][4] += mock_posterior_fisher_matrix[4][4]*((fiducial_point[4]-mock_posterior_best_fit[4])**2.0)
return mock_forecast_fisher_matrix
foxi_instance = foxi(path_to_foxi)
chains_filename = 'mock_posterior_samples.txt'
# Note that the column numbers start from 0...
parameter_column_numbers = [1,2,3,4,5]
weights_column_number = 0
# Simply set this to the number of samples generated - this can be useful to get results out as a sanity check.
number_of_samples_to_read_in = number_of_posterior_samples
foxi_instance.set_chains(chains_filename,
parameter_column_numbers,
number_of_samples_to_read_in,
weights_column=weights_column_number, # All points are given weight 1 if this is ignored.
column_types=None) # No transformations needed here.
# One could have ['flat','log10','log'] specified for each column.
# List the model file names to compute the expected utilities for.
model_name_list = ['mock_prior' + str(im+1) + '_samples.txt' for im in range(0,Nm)]
# List the column numbers to use for each file of prior samples.
prior_column_numbers = [[0,1,2,3,4] for im in range(0,Nm)]
# Once again, simply set this to the number of samples we made earlier for each prior.
number_of_prior_points_to_read_in = number_of_prior_samples
foxi_instance.set_model_name_list(model_name_list,
prior_column_numbers,
number_of_prior_points_to_read_in,
prior_column_types=None) # Once again, no transformations needed here.
foxi_instance.set_fisher_matrix(fisher_matrix)
mix_models = True # Set this to 'True' so that the expected utilities for all possible model pairs are calculated.
# The default is 'False' which calculates the utilities all with respect to the reference model
# here the 0-element in 'model_name_list'.
foxi_instance.run_foxifish(mix_models=mix_models)
foxiplot_file_name = 'foxiplots_data_mix_models.txt' # This is the generic name that 'run_foxifish' will set, change
# this to whatever you like as long as the file is in '/foxioutput'.
# If 'mix_models = False' then remove the 'mix_models' tag.
# Set this to the number of samples generated - useful to vary to check convergence though we will make plots later.
number_of_foxiplot_samples_to_read_in = number_of_posterior_samples
# We must set this feature to 'flat' in each column to perform no post-processing transformation that reweights the chains.
# This can be a little redundant as it makes more numerical sense to simply generate new chains.
post_chains_column_types = ['flat','flat','flat','flat','flat']
# Set this to 'True' for the output to include fully-formatted LaTeX tables!
TeX_output = True
# For the truly lazy - you can set the TeX name for each model in the table output too.
model_TeX_names = [r'${\cal M}_' + str(i) + r'$' for i in range(0,Nm)]
foxi_instance.rerun_foxi(foxiplot_file_name,
number_of_foxiplot_samples_to_read_in,
post_chains_column_types,
model_name_TeX_input=model_TeX_names,
TeX_output=TeX_output)
print(open(path_to_foxi + '/foxioutput/foxiplots_data_summary.txt', 'r').read())
print(open(path_to_foxi + '/foxioutput/foxiplots_data_summary_TeX.txt', 'r').read())
# Simply specify the number of bins in which to re-calculate the expected utilty with more samples.
number_of_bins = 50
# We have already specified the other inputs here so let's simply generate the plots!
foxi_instance.plot_convergence(foxiplot_file_name,
number_of_bins,
number_of_foxiplot_samples_to_read_in,
post_chains_column_types)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup of the current experiment
Step2: Setup of the model priors
Step3: Setup of the forecast Fisher matrix
Step4: Running the main foxi algorithm
Step5: The next step is to specify where our posterior samples of the current data are, how many samples to read in, what weights they carry and identify if there are any transformations to be done on each column (the latter will be no in this case).
Step6: Now that the posterior chains have been set, we can do the same for our list of prior samples.
Step7: Notice that the output here is from a quick KDE of each set of prior samples. How these are used depends on the specific numerical situation, which is once again covered in more detail in Hardwick, Vennin & Wands (2018). Our final step before running foxi is to give it a python function which returns the forecast Fisher matrix at each fiducial point (i.e. evaluated at each point of the current data chains). We do this like so
Step8: All of the necessary settings have now been applied for a full run of the foxi code. Depending on how many points were introduced at the level of the priors and chains, as well as the length of time it takes to evaluate the forecast Fisher matrix at each fiducial point, we may wish to continue on a compute cluster. For this simple example we have chosen, it should be enough to simply run locally on a laptop in less than 10 minutes. Our main run command is the following
Step9: Don't be too alarmed by a few error messages about dividing by zero
Step10: Once again, don't be too alarmed by a few value error messages. If there are any error messages related to converting strings to floats, this is likely to do with a delimiter problem in the text files that were read in. Tab delimited data should work fine (or at least give a number of spaces). Note also that the number of model pairs quoted here is actually the number of unique pairs + the auto-pairs (i.e. Model_i - Model_j as well as Model_i - Model_i) so this number is an additional +N_m larger than the number of unique pairs. A summary of the main results can be read from this file
Step11: One thing to avoid in these results is if the quoted expected Kullback-Leibler divergence <DKL> is of roughly the same value as the natural log of the number of points in the posterior chains - which in this example is $\ln (10^3) \simeq 6.9$. More chain points must be used for larger values since otherwise the value of <DKL> is only a lower bound.
Step12: Another useful tool provided by foxi is to generate plots illustrating the numerical convergence (or lack thereof) of the calculated expected utilities.
|
786 | <ASSISTANT_TASK:>
Python Code:
from __future__ import division
import re
import numpy as np
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
%matplotlib inline
#%qtconsole
!rm train_ect.vw.cache
!rm mnist_train_ect.model
!vw -d data/mnist_train.vw -b 19 --ect 10 -f mnist_train_ect.model -q ii --passes 100 -l 0.4 --early_terminate 3 --cache_file train_ect.vw.cache --power_t 0.6
!rm predict_ect.txt
!vw -t data/mnist_test.vw -i mnist_train_ect.model -p predict_ect.txt
y_true=[]
with open("data/mnist_test.vw", 'rb') as f:
for line in f:
m = re.search('^\d+', line)
if m:
found = m.group()
y_true.append(int(found))
y_pred = []
with open("predict_ect.txt", 'rb') as f:
for line in f:
m = re.search('^\d+', line)
if m:
found = m.group()
y_pred.append(int(found))
target_names = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"] # NOTE: plus one
def plot_confusion_matrix(cm,
target_names,
title='Proportional Confusion matrix: VW ect on 784 pixels',
cmap=plt.cm.Paired):
given a confusion matrix (cm), make a nice plot
see the skikit-learn documentation for the original done for the iris dataset
plt.figure(figsize=(8, 6))
plt.imshow((cm/cm.sum(axis=1)), interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
cm = confusion_matrix(y_true, y_pred)
print(cm)
model_accuracy = sum(cm.diagonal())/len(y_pred)
model_misclass = 1 - model_accuracy
print("\nModel accuracy: {0}, model misclass rate: {1}".format(model_accuracy, model_misclass))
plot_confusion_matrix(cm, target_names)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Train
Step2: Predict
Step4: Analyze
|
787 | <ASSISTANT_TASK:>
Python Code:
# If you'd like to download it through the command line...
!curl -O http://www.cs.cornell.edu/home/llee/data/convote/convote_v1.1.tar.gz
# And then extract it through the command line...
!tar -zxf convote_v1.1.tar.gz
# glob finds files matching a certain filename pattern
import glob
# Give me all the text files
paths = glob.glob('convote_v1.1/data_stage_one/development_set/*')
paths[:5]
len(paths)
speeches = []
for path in paths:
with open(path) as speech_file:
speech = {
'pathname': path,
'filename': path.split('/')[-1],
'content': speech_file.read()
}
speeches.append(speech)
speeches_df = pd.DataFrame(speeches)
speeches_df.head()
speeches_df['content'].head(5)
from sklearn.feature_extraction.text import CountVectorizer
count_vectorizer = CountVectorizer(stop_words='english')
X = count_vectorizer.fit_transform(speeches_df['content'])
X.toarray()
pd.DataFrame(X.toarray())
pd.DataFrame(X.toarray(), columns=count_vectorizer.get_feature_names())
from sklearn.feature_extraction.text import CountVectorizer
count_vectorizer = CountVectorizer(stop_words='english',max_features=100)
X = count_vectorizer.fit_transform(speeches_df['content'])
X.toarray()
pd.DataFrame(X.toarray())
pd.DataFrame(X.toarray(), columns=count_vectorizer.get_feature_names())
df=pd.DataFrame(X.toarray(), columns=count_vectorizer.get_feature_names())
df['chairman'].value_counts().head(5) #Chairman is NOT mentioned in 250 speeches.
len(df[df['chairman']==0])
len(df[(df['chairman']==0) & (df['mr']==0)])
df['thank'].max()
df[df['thank']==9] #Speech No 9
ctdf=df[(df['china']!=0) & (df['trade']!=0)]
nctdf=pd.DataFrame([ctdf['china'], ctdf['trade'], ctdf['china'] + ctdf['trade']], index=["china", "trade", "china+trade"]).T
nctdf.sort_values(by='china+trade',ascending=False).head(3)
porter_stemmer = PorterStemmer()
def stemming_tokenizer(str_input):
words = re.sub(r"[^A-Za-z0-9\-]", " ", str_input).lower().split()
words = [porter_stemmer.stem(word) for word in words]
return words
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer(stop_words='english', tokenizer=stemming_tokenizer, use_idf=False, norm='l1')
X = tfidf_vectorizer.fit_transform(speeches_df['content'])
newdf=pd.DataFrame(X.toarray(), columns=tfidf_vectorizer.get_feature_names())
newdf.head(5)
# index 0 is the first speech, which was the first one imported.
paths[0]
# Pass that into 'cat' using { } which lets you put variables in shell commands
# that way you can pass the path to cat
!type "convote_v1.1\data_stage_one\development_set\052_400011_0327014_DON.txt"
#!type "{paths[0].replace("/","\\")}"
!type "{paths[0].replace("/","\\")}"
ecdf=pd.DataFrame([newdf['elect'], newdf['chao'], newdf['elect'] + newdf['chao']], index=["elections", "chaos", "elections+chaos"]).T
ecdf.sort_values(by='elections+chaos',ascending=False).head(5)
#SIMPLE COUNTING VECTORIZER
count_vectorizer = CountVectorizer(stop_words='english',max_features=100)
X = count_vectorizer.fit_transform(speeches_df['content'])
from sklearn.cluster import KMeans
number_of_clusters = 8
km = KMeans(n_clusters=number_of_clusters)
km.fit(X)
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(number_of_clusters):
top_ten_words = [terms[ind] for ind in order_centroids[i, :5]]
print("Cluster {}: {}".format(i, ' '.join(top_ten_words)))
#SIMPLE TERM FREQUENCY VECTORIZER
vectorizer = TfidfVectorizer(use_idf=True, tokenizer=stemming_tokenizer, stop_words='english')
X = vectorizer.fit_transform(speeches_df['content'])
from sklearn.cluster import KMeans
number_of_clusters = 8
km = KMeans(n_clusters=number_of_clusters)
km.fit(X)
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(number_of_clusters):
top_ten_words = [terms[ind] for ind in order_centroids[i, :5]]
print("Cluster {}: {}".format(i, ' '.join(top_ten_words)))
# SIMPLE TFIDF VECTORIZER
vectorizer = TfidfVectorizer(tokenizer=stemming_tokenizer,use_idf=False,stop_words='english')
X = vectorizer.fit_transform(speeches_df['content'])
from sklearn.cluster import KMeans
number_of_clusters = 8
km = KMeans(n_clusters=number_of_clusters)
km.fit(X)
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(number_of_clusters):
top_ten_words = [terms[ind] for ind in order_centroids[i, :5]]
print("Cluster {}: {}".format(i, ' '.join(top_ten_words)))
paths = glob.glob('hp/*')
paths[0]
len(paths)
speeches = []
for path in paths:
with open(path) as speech_file:
speech = {
'pathname': path,
'filename': path.split('/')[-1],
'content': speech_file.read()
}
speeches.append(speech)
hpfanfic_df = pd.DataFrame(speeches)
hpfanfic_df.head()
hpfanfic_df['content'].head(5)
vectorizer = TfidfVectorizer(use_idf=True, max_features=10000, stop_words='english')
X = vectorizer.fit_transform(hpfanfic_df['content'])
print(vectorizer.get_feature_names()[:10])
df = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
df.head(5)
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(number_of_clusters):
top_ten_words = [terms[ind] for ind in order_centroids[i, :5]]
print("Cluster {}: {}".format(i, ' '.join(top_ten_words)))
hpfanfic_df['category'] = km.labels_
hpfanfic_df.head()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can explore the files if you'd like, but we're going to get the ones from convote_v1.1/data_stage_one/development_set/. It's a bunch of text files.
Step2: So great, we have 702 of them. Now let's import them.
Step3: In class we had the texts variable. For the homework can just do speeches_df['content'] to get the same sort of list of stuff.
Step4: Doing our analysis
Step5: Okay, it's far too big to even look at. Let's try to get a list of features from a new CountVectorizer that only takes the top 100 words.
Step6: Now let's push all of that into a dataframe with nicely named columns.
Step7: Everyone seems to start their speeches with "mr chairman" - how many speeches are there total, and many don't mention "chairman" and how many mention neither "mr" nor "chairman"?
Step8: What is the index of the speech thank is the most thankful, a.k.a. includes the word 'thank' the most times?
Step9: If I'm searching for China and trade, what are the top 3 speeches to read according to the CountVectoriser?
Step10: Now what if I'm using a TfidfVectorizer?
Step11: What's the content of the speeches? Here's a way to get them
Step12: Now search for something else! Another two terms that might show up. elections and chaos? Whatever you thnik might be interesting.
Step13: Enough of this garbage, let's cluster
Step14: Which one do you think works the best?
|
788 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tensorflow-gpu==2.0.0-rc1
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
test_images = test_images.reshape((10000, 28, 28, 1))
# 픽셀 값을 0~1 사이로 정규화합니다.
train_images, test_images = train_images / 255.0, test_images / 255.0
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.summary()
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(test_acc)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 합성곱 신경망
Step2: MNIST 데이터셋 다운로드하고 준비하기
Step3: 합성곱 층 만들기
Step4: 지금까지 모델의 구조를 출력해 보죠.
Step5: 위에서 Conv2D와 MaxPooling2D 층의 출력은 (높이, 너비, 채널) 크기의 3D 텐서입니다. 높이와 너비 차원은 네트워크가 깊어질수록 감소하는 경향을 가집니다. Conv2D 층에서 출력 채널의 수는 첫 번째 매개변수에 의해 결정됩니다(예를 들면, 32 또는 64). 일반적으로 높이와 너비가 줄어듦에 따라 (계산 비용 측면에서) Conv2D 층의 출력 채널을 늘릴 수 있습니다.
Step6: 최종 모델의 구조를 확인해 보죠.
Step7: 여기에서 볼 수 있듯이 두 개의 Dense 층을 통과하기 전에 (4, 4, 64) 출력을 (1024) 크기의 벡터로 펼쳤습니다.
Step8: 모델 평가
|
789 | <ASSISTANT_TASK:>
Python Code:
ratings = pd.read_csv('../raw-data/BX-Book-Ratings.csv', encoding='iso-8859-1', sep = ';')
ratings.columns = ['user_id', 'isbn', 'book_rating']
print(ratings.dtypes)
print()
print(ratings.head())
print()
print("Data Points :", ratings.shape[0])
books = pd.read_csv('../raw-data/BX-Books.csv', sep=';', encoding = 'iso-8859-1', dtype =str)
del books['Image-URL-L']
del books['Image-URL-M']
del books['Image-URL-S']
del books['Book-Author']
del books['Publisher']
print('Number of Books == Number of ISBN ? ', books["Book-Title"].nunique() == books["ISBN"].nunique())
book_dict = books[["Book-Title","ISBN"]].set_index("Book-Title").to_dict()["ISBN"]
books['new_isbn'] = books["Book-Title"].apply(lambda x: book_dict[x])
print('Number of Books == Number of ISBN ? ', books["Book-Title"].nunique() == books["new_isbn"].nunique())
books['isbn'] = books['new_isbn']
del books['ISBN']
del books['new_isbn']
newdf = ratings[ratings.book_rating>0]
joined = books.merge(newdf, on ='isbn')
print(newdf.shape)
datasets = []
for j in [100, 150, 200, 300, 500]:
df = joined.groupby('isbn').count().sort_values('user_id', ascending =False)[0:j].index.values
test = joined.groupby('user_id').count().sort_values('isbn', ascending = False)[:20000].index.values
newdf = joined[joined.user_id.isin(test) & joined.isbn.isin(df)]
data = newdf[newdf['user_id'].isin(newdf['user_id'].value_counts()[newdf['user_id'].value_counts()>1].index)]
print("users books")
print(data.user_id.nunique(), data.isbn.nunique())
print()
print('Sparsity :', data.shape[0]/(data.user_id.nunique() * data.isbn.nunique()))
print()
print(data.shape)
print()
print(data.groupby('user_id').count().sort_values('isbn', ascending = False).mean())
print()
datasets.append(data)
data = datasets[0]
rows = data.user_id.unique()
cols = data['Book-Title'].unique()
print(data.user_id.nunique(), data.isbn.nunique())
data = data[['user_id', 'Book-Title', 'book_rating']]
print("Sparsity :", 100 - (data.shape[0]/(len(cols)*len(rows)) * 100))
idict = dict(zip(cols, range(len(cols))))
udict = dict(zip(rows, range(len(rows))))
data.user_id = [
udict[i] for i in data.user_id
]
data['Book-Title'] = [
idict[i] for i in data['Book-Title']
]
nmat = data.as_matrix()
nmat
def rmse(ypred, ytrue):
ypred = ypred[ytrue.nonzero()].flatten()
ytrue = ytrue[ytrue.nonzero()].flatten()
return np.sqrt(mean_squared_error(ypred, ytrue))
def mae(ypred, ytrue):
ypred = ypred[ytrue.nonzero()].flatten()
ytrue = ytrue[ytrue.nonzero()].flatten()
return mean_absolute_error(ypred, ytrue)
def predict_naive(user, item):
prediction = imean1[item] + umean1[user] - amean1
return prediction
x1, x2 = train_test_split(nmat, test_size = 0.2, random_state =42)
naive = np.zeros((len(rows),len(cols)))
for row in x1:
naive[row[0], row[1]] = row[2]
predictions = []
targets = []
amean1 = np.mean(naive[naive!=0])
umean1 = sum(naive.T) / sum((naive!=0).T)
imean1 = sum(naive) / sum((naive!=0))
umean1 = np.where(np.isnan(umean1), amean1, umean1)
imean1 = np.where(np.isnan(imean1), amean1, imean1)
print('Naive---')
for row in x2:
user, item, actual = row[0], row[1], row[2]
predictions.append(predict_naive(user, item))
targets.append(actual)
print('rmse %.4f' % rmse(np.array(predictions), np.array(targets)))
print('mae %.4f' % mae(np.array(predictions), np.array(targets)))
print()
def cos(mat, a, b):
if a == b:
return 1
aval = mat.T[a].nonzero()
bval = mat.T[b].nonzero()
corated = np.intersect1d(aval, bval)
if len(corated) == 0:
return 0
avec = np.take(mat.T[a], corated)
bvec = np.take(mat.T[b], corated)
val = 1 - cosine(avec, bvec)
if np.isnan(val):
return 0
return val
def adjcos(mat, a, b, umean):
if a == b:
return 1
aval = mat.T[a].nonzero()
bval = mat.T[b].nonzero()
corated = np.intersect1d(aval, bval)
if len(corated) == 0:
return 0
avec = np.take(mat.T[a], corated)
bvec = np.take(mat.T[b], corated)
avec1 = avec - umean[corated]
bvec1 = bvec - umean[corated]
val = 1 - cosine(avec1, bvec1)
if np.isnan(val):
return 0
return val
def pr(mat, a, b, imean):
if a == b:
return 1
aval = mat.T[a].nonzero()
bval = mat.T[b].nonzero()
corated = np.intersect1d(aval, bval)
if len(corated) < 2:
return 0
avec = np.take(mat.T[a], corated)
bvec = np.take(mat.T[b], corated)
avec1 = avec - imean[a]
bvec1 = bvec - imean[b]
val = 1 - cosine(avec1, bvec1)
if np.isnan(val):
return 0
return val
def euc(mat, a, b):
if a == b:
return 1
aval = mat.T[a].nonzero()
bval = mat.T[b].nonzero()
corated = np.intersect1d(aval, bval)
if len(corated) == 0:
return 0
avec = np.take(mat.T[a], corated)
bvec = np.take(mat.T[b], corated)
dist = np.sqrt(np.sum(a-b)**2)
val = 1/(1+dist)
if np.isnan(val):
return 0
return val
def itemsimilar(mat, option):
amean = np.mean(mat[mat!=0])
umean = sum(mat.T) / sum((mat!=0).T)
imean = sum(mat) / sum((mat!=0))
umean = np.where(np.isnan(umean), amean, umean)
imean = np.where(np.isnan(imean), amean, imean)
n = mat.shape[1]
sim_mat = np.zeros((n, n))
if option == 'pr':
#print("PR")
for i in range(n):
for j in range(n):
sim_mat[i][j] = pr(mat, i, j, imean)
sim_mat = (sim_mat + 1)/2
elif option == 'cos':
#print("COS")
for i in range(n):
for j in range(n):
sim_mat[i][j] = cos(mat, i, j)
elif option == 'adjcos':
#print("ADJCOS")
for i in range(n):
for j in range(n):
sim_mat[i][j] = adjcos(mat, i, j, umean)
sim_mat = (sim_mat + 1)/2
elif option == 'euc':
#print("EUCLIDEAN")
for i in range(n):
for j in range(n):
sim_mat[i][j] = euc(mat, i, j)
else:
#print("Hello")
sim_mat = cosine_similarity(mat.T)
return sim_mat, amean, umean, imean
def predict(user, item, mat, item_similarity, amean, umean, imean, k=20):
nzero = mat[user].nonzero()[0]
if len(nzero) == 0:
return amean
baseline = imean + umean[user] - amean
choice = nzero[item_similarity[item, nzero].argsort()[::-1][:k]]
prediction = ((mat[user, choice] - baseline[choice]).dot(item_similarity[item, choice])/ sum(item_similarity[item, choice])) + baseline[item]
if np.isnan(prediction):
prediction = amean
if prediction > 10:
prediction = 10
if prediction < 1:
prediction = 1
return prediction
def get_results(X, option, rows, cols, folds, k, timing = False):
kf = KFold(n_splits=folds, shuffle = True, random_state=42)
count = 1
rmse_list = []
mae_list = []
trmse_list = []
tmae_list = []
for train_index, test_index in kf.split(X):
print("---------- Fold ", count, "---------------")
train_data, test_data = X[train_index], X[test_index]
full_mat = np.zeros((rows, cols))
for row in train_data:
full_mat[row[0], row[1]] = row[2]
if timing:
start = time.time()
item_similarity, amean, umean, imean = itemsimilar(full_mat, option)
if timing:
end = time.time()
train_time = end - start
print("Training Time : ", train_time)
preds = []
real = []
for row in train_data:
user_id, isbn, rating = row[0], row[1], row[2]
preds.append(predict(user_id, isbn, full_mat, item_similarity, amean, umean, imean, k))
real.append(rating)
err1 = rmse(np.array(preds), np.array(real))
err2 = mae(np.array(preds), np.array(real))
trmse_list.append(err1)
tmae_list.append(err2)
print('Train Errors')
print('RMSE : %.4f' % err1)
print('MAE : %.4f' % err2)
preds = []
real = []
if timing:
start = time.time()
for row in test_data:
user_id, isbn, rating = row[0], row[1], row[2]
preds.append(predict(user_id, isbn, full_mat, item_similarity, amean, umean, imean, k))
real.append(rating)
if timing:
end = time.time()
test_time = end - start
print("Prediction Time : ", test_time)
err1 = rmse(np.array(preds), np.array(real))
err2 = mae(np.array(preds), np.array(real))
rmse_list.append(err1)
mae_list.append(err2)
print('Test Errors')
print('RMSE : %.4f' % err1)
print('MAE : %.4f' % err2)
count+=1
if timing:
return train_time, test_time
print("-------------------------------------")
print("Training Avg Error:")
print("AVG RMSE :", str(np.mean(trmse_list)))
print("AVG MAE :", str(np.mean(tmae_list)))
print()
print("Testing Avg Error:")
print("AVG RMSE :", str(np.mean(rmse_list)))
print("AVG MAE :", str(np.mean(mae_list)))
print(" ")
return np.mean(mae_list), np.mean(rmse_list)
sims = []
sims_rmse = []
for arg in ['euc','cos','','pr','adjcos']:
each_sims = []
each_sims_rmse = []
for k in [2, 3, 4, 5, 10, 15, 20, 25]:
print(arg, k)
ans1, ans2 = get_results(nmat, arg, len(rows), len(cols), 5 ,k)
each_sims.append(ans1)
each_sims_rmse.append(ans2)
print()
print("Best K Value for ", arg)
print()
print("Min MAE")
print(np.min(each_sims), np.argmin(each_sims))
print("Min RMSE")
print(np.min(each_sims_rmse), np.argmin(each_sims_rmse))
print()
sims.append(each_sims)
sims_rmse.append(each_sims_rmse)
cos_res = sims[1]
euc_res = sims[0]
pr_res = sims[3]
adjcos_res = sims[4]
k = [2, 3, 4, 5, 10, 15, 20, 25]
results_df1 = pd.DataFrame({'K': k, 'COS': cos_res, 'EUC': euc_res, 'Pearson': pr_res, 'Adjusted Cosine': adjcos_res})
plot1 = results_df1.plot(x='K', y=['COS', 'EUC', 'Pearson', 'Adjusted Cosine'], ylim=(0.95, 1.1), title = 'Item-Item CF: MAE for different similarity metrics at different Ks')
fig = plot1.get_figure()
fig.savefig('../figures/Kmae_item.png')
cos_res = sims_rmse[1]
euc_res = sims_rmse[0]
pr_res = sims_rmse[3]
adjcos_res = sims_rmse[4]
k = [2, 3, 4, 5, 10, 15, 20, 25]
results_df1 = pd.DataFrame({'K': k, 'COS': cos_res, 'EUC': euc_res, 'Pearson': pr_res, 'Adjusted Cosine': adjcos_res})
plot1 = results_df1.plot(x='K', y=['COS', 'EUC', 'Pearson', 'Adjusted Cosine'], ylim=(1.5, 1.6), title = 'Item-Item CF: RMSE for different similarity metrics at different Ks')
fig = plot1.get_figure()
fig.savefig('../figures/Krmse_item.png')
import time
trtimer = []
tetimer = []
for data1 in datasets:
rows1 = data1.user_id.unique()
cols1 = data1['Book-Title'].unique()
print(data1.user_id.nunique(), data1.isbn.nunique())
data1 = data1[['user_id', 'Book-Title', 'book_rating']]
idict = dict(zip(cols1, range(len(cols1))))
udict = dict(zip(rows1, range(len(rows1))))
data1.user_id = [
udict[i] for i in data1.user_id
]
data1['Book-Title'] = [
idict[i] for i in data1['Book-Title']
]
nmat1 = data1.as_matrix()
trt, tet = get_results(nmat1, 'euc', len(rows1), len(cols1), 5, 5, True)
trtimer.append(trt)
tetimer.append(tet)
print()
results_df1 = pd.DataFrame({'Items': [100, 150, 200, 300, 500], 'Time': trtimer})
plot1 = results_df1.plot(x='Items', y='Time', ylim=(0, 80), title = 'Item-Item CF: Time to train over dataset with increase in items')
fig = plot1.get_figure()
fig.savefig('../figures/traintime.png')
results_df1 = pd.DataFrame({'Items': [100, 150, 200, 300, 500], 'Time': tetimer})
plot1 = results_df1.plot(x='Items', y='Time', ylim=(0, 1), title = 'Item-Item CF: Time to Predict over Test Set with increase in items')
fig = plot1.get_figure()
fig.savefig('../figures/testtime.png')
full_mat = np.zeros((len(rows),len(cols)))
for row in nmat:
full_mat[row[0], row[1]] = row[2]
item_similarity, amean, umean, imean = itemsimilar(full_mat, 'euc')
def getmrec(full_mat, user_id, item_similarity, k, m, idict, cov = False):
n = item_similarity.shape[0]
nzero = full_mat[user_id].nonzero()[0]
preds = {}
for row in range(n):
preds[row] = predict(user_id, row, full_mat, item_similarity, amean, umean, imean, k)
flipped_dict = dict(zip(idict.values(), idict.keys()))
if not cov:
print("Books Read -----")
for i in nzero:
print(flipped_dict[i])
del preds[i]
res = sorted(preds.items(), key=lambda x: x[1], reverse = True)
ans = [flipped_dict[i[0]] for i in res[:m]]
return ans
for m in [5, 8, 10, 15]:
cov = []
for i in range(len(rows)):
cov.extend(getmrec(full_mat, i, item_similarity, 5, m, idict, True))
print("Coverage with", m, "recs:", len(set(cov)), "%")
getmrec(full_mat, 313, item_similarity, 5, 10, idict)
from surprise import evaluate, Reader, Dataset, SVD, NMF, GridSearch, KNNWithMeans
reader = Reader(rating_scale=(1, 10))
data2 = Dataset.load_from_df(data[['user_id', 'Book-Title', 'book_rating']], reader)
data2.split(5)
param_grid = {'n_factors': [30, 40, 50, 60, 70], 'n_epochs': [40, 50, 60], 'reg_pu': [0.001, 0.1, 1],
'reg_qi': [ 0.1, 1, 3, 5]}
grid_search = GridSearch(NMF, param_grid, measures=['RMSE', 'MAE'])
grid_search.evaluate(data2)
results_df = pd.DataFrame.from_dict(grid_search.cv_results)
print(results_df)
print(grid_search.best_score['RMSE'])
print(grid_search.best_params['RMSE'])
print(grid_search.best_score['MAE'])
print(grid_search.best_params['MAE'])
maelist = []
rmselist = []
factors = [20, 30, 40 ,50 ,60, 70, 80]
for i in factors:
algo = NMF(n_factors = i, reg_pu = 0.001, reg_qi = 3)
perf = evaluate(algo, data2)
maelist.append(np.mean(perf['mae']))
rmselist.append(np.mean(perf['rmse']))
results_df = pd.DataFrame({'Factors': factors, 'MAE': maelist, 'RMSE': rmselist})
plot1 = results_df.plot(x='Factors', y=['MAE', 'RMSE'], ylim=(0.9, 1.7), title = 'NMF: evaluation metrics vs number of latent factors')
fig = plot1.get_figure()
fig.savefig('../figures/NMFfactor.png')
from collections import defaultdict
def get_top_n(predictions, n=10):
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_n[uid].append((iid, est))
# Then sort the predictions for each user and retrieve the k highest ones.
for uid, user_ratings in top_n.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = user_ratings[:n]
return top_n
trainset = data2.build_full_trainset()
algo = NMF(n_epochs = 60, n_factors = 50, reg_pu = 0.001, reg_qi = 3)
algo.train(trainset)
# Than predict ratings for all pairs (u, i) that are NOT in the training set.
testset = trainset.build_anti_testset()
predictions = algo.test(testset)
top_n = get_top_n(predictions, n=10)
# Print the recommended items for each user
def recbooks(mat, user_id, idict, cov = False):
full_mat = np.zeros((len(rows),len(cols)))
for row in mat:
full_mat[row[0], row[1]] = row[2]
nzero = full_mat[user_id].nonzero()[0]
flipped_dict = dict(zip(idict.values(), idict.keys()))
ans = [flipped_dict[i[0]] for i in top_n[user_id]]
if not cov:
print("Books Read -----")
for i in nzero:
print(flipped_dict[i])
print()
print("Recs -----")
for i in ans:
print(i)
return ans
recbooks(nmat, 1,idict)
for m in [5, 8, 10, 15]:
cov = []
top_n = get_top_n(predictions, m)
for i in range(len(rows)):
cov.extend(recbooks(nmat, i,idict, True))
print("Coverage with", m, "recs:", len(set(cov)), "%")
trtimer = []
tetimer = []
for data4 in datasets:
rows4 = data4.user_id.unique()
cols4 = data4['Book-Title'].unique()
print(data4.user_id.nunique(), data4.isbn.nunique())
data4 = data4[['user_id', 'Book-Title', 'book_rating']]
idict = dict(zip(cols4, range(len(cols4))))
udict = dict(zip(rows4, range(len(rows4))))
data4.user_id = [
udict[i] for i in data4.user_id
]
data4['Book-Title'] = [
idict[i] for i in data4['Book-Title']
]
start = time.time()
reader = Reader(rating_scale=(1, 10))
data4 = Dataset.load_from_df(data4[['user_id', 'Book-Title', 'book_rating']], reader)
data4.split(5)
trainset = data4.build_full_trainset()
algo = NMF(n_epochs = 60, n_factors = 70, reg_pu = 0.001, reg_qi = 5)
algo.train(trainset)
end = time.time()
trt = end - start
print(trt)
testset = trainset.build_testset()
start = time.time()
predictions = algo.test(testset)
end = time.time()
tet = end - start
print(tet)
trtimer.append(trt)
tetimer.append(tet)
print()
results_df1 = pd.DataFrame({'Items': [100, 150, 200, 300, 500], 'Time': trtimer})
plot1 = results_df1.plot(x='Items', y='Time', ylim=(0, 25), title = 'NMF Scaling: Time to train the dataset with increase in items')
fig = plot1.get_figure()
fig.savefig('../figures/traintimeNMF.png')
results_df1 = pd.DataFrame({'Items': [100, 150, 200, 300, 500], 'Time': tetimer})
plot1 = results_df1.plot(x='Items', y='Time', ylim=(0, 1), title = 'NMF Scaling: Time to Predict over Test Set with increase in items')
fig = plot1.get_figure()
fig.savefig('../figures/testtimeNMF.png')
sim_options = {
'name': 'MSD',
'user_based' : False
}
algo = KNNWithMeans(sim_options = sim_options, k = 5, min_k =2)
perf = evaluate(algo, data2)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the Books Dataset
Step2: Some Books don't have unique ISBN, creating a 1
Step3: Data Preparation/ Cleaning <br>
Step4: Sampling <br>
Step5: Taking Dataset with 100 items
Step6: Function for Evaluation Metrics
Step7: Our Naive Baseline for any user i, item j prediction is to assign it with (sum of mean rating given by user i (umean[i]), mean rating received by item j (imean[j]) substracting average rating over entire dataset. (amean)) <br><br>
Step8: Following are the functions to calculate pairwise similarity between two items
Step9: Function item similar returns matrix of pairwise similarity between all items based on the option provided. Also return amean (global mean rating), umean (average rating of each user), imean (Average rating of each item)
Step10: Predict function is used to get recommended rating by user i for item j.
Step11: get_results function is our function to cross_val setup and changing the parameter of this function will help to tune hyperparameter k (nearest neighbours)
Step12: Grid Search for best K for item-item CF using all the similarity metric implemented.
Step13: Plot of MAE
Step14: Plot of RMSE
Step15: We observe that there is no significant change in rmse and mae values beyond k =5, simple explaination of these can be that average books rated per user is around 3.3
Step16: getmrec function is used to get top m recommendation for a user_id based on the similarity matrix (option), k neighbours.
Step17: Algo 2
Step18: 60 latent factor seem to be optimal,
Step19: Plot of varying evaluation metrics vs number of latent factors for NMF
Step20: RecBooks is used to recommend books to a user.
Step21: Coverage
Step22: NMF Scaling with items
Step23: Comparing our implementation with Surprise
|
790 | <ASSISTANT_TASK:>
Python Code:
# %load selectors_echo_server.py
import selectors
import socket
mysel = selectors.DefaultSelector()
keep_running = True
def read(connection, mask):
"Callback for read events"
global keep_running
client_address = connection.getpeername()
print('read({})'.format(client_address))
data = connection.recv(1024)
if data:
# A readable client socket has data
print(' received {!r}'.format(data))
connection.sendall(data)
else:
# Interpret empty result as closed connection
print(' closing')
mysel.unregister(connection)
connection.close()
# Tell the main loop to stop
keep_running = False
def accept(sock, mask):
"Callback for new connections"
new_connection, addr = sock.accept()
print('accept({})'.format(addr))
new_connection.setblocking(False)
mysel.register(new_connection, selectors.EVENT_READ, read)
server_address = ('localhost', 10000)
print('starting up on {} port {}'.format(*server_address))
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(False)
server.bind(server_address)
server.listen(5)
mysel.register(server, selectors.EVENT_READ, accept)
while keep_running:
print('waiting for I/O')
for key, mask in mysel.select(timeout=1):
callback = key.data
callback(key.fileobj, mask)
print('shutting down')
mysel.close()
# %load selectors_echo_client.py
import selectors
import socket
mysel = selectors.DefaultSelector()
keep_running = True
outgoing = [
b'It will be repeated.',
b'This is the message. ',
]
bytes_sent = 0
bytes_received = 0
# Connecting is a blocking operation, so call setblocking()
# after it returns.
server_address = ('localhost', 10000)
print('connecting to {} port {}'.format(*server_address))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(server_address)
sock.setblocking(False)
# Set up the selector to watch for when the socket is ready
# to send data as well as when there is data to read.
mysel.register(
sock,
selectors.EVENT_READ | selectors.EVENT_WRITE,
)
while keep_running:
print('waiting for I/O')
for key, mask in mysel.select(timeout=1):
connection = key.fileobj
client_address = connection.getpeername()
print('client({})'.format(client_address))
if mask & selectors.EVENT_READ:
print(' ready to read')
data = connection.recv(1024)
if data:
# A readable client socket has data
print(' received {!r}'.format(data))
bytes_received += len(data)
# Interpret empty result as closed connection,
# and also close when we have received a copy
# of all of the data sent.
keep_running = not (
data or
(bytes_received and
(bytes_received == bytes_sent))
)
if mask & selectors.EVENT_WRITE:
print(' ready to write')
if not outgoing:
# We are out of messages, so we no longer need to
# write anything. Change our registration to let
# us keep reading responses from the server.
print(' switching to read-only')
mysel.modify(sock, selectors.EVENT_READ)
else:
# Send the next message.
next_msg = outgoing.pop()
print(' sending {!r}'.format(next_msg))
sock.sendall(next_msg)
bytes_sent += len(next_msg)
print('shutting down')
mysel.unregister(connection)
connection.close()
mysel.close()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: When read() receives no data from the socket, it interprets the read event as the other side of the connection being closed instead of sending data. It removes the socket from the selector and closes it. In order to avoid an infinite loop, this server also shuts itself down after it has finished communicating with a single client.
|
791 | <ASSISTANT_TASK:>
Python Code:
%mkdir hello
%cd hello
%%file hello.cpp
#include <iostream>
using std::cout;
using std::endl;
int main() {
cout << "Hello, world" << endl;
}
! g++ hello.cpp -o hello
! ./hello
%cd ..
%mkdir add1
%cd add1
%%file add.cpp
#include <iostream>
using std::cout;
using std::endl;
double add(double a, double b) {
return a + b;
}
int main() {
double a = 1.0, b= 2.0;
double c = add(a, b);
cout << a << " + " << b << " = " << c << endl;
}
! g++ add.cpp -o add
! ./add
%cd ..
%mkdir add2
%cd add2
%%file add.hpp
#pragma once
double add(double a, double b);
%%file add.cpp
double add(double a, double b) {
return a + b;
}
%%file add_driver.cpp
#include "add.hpp"
#include <iostream>
using std::cout;
using std::endl;
int main() {
double a = 1.0, b = 2.0;
double c = add(a, b);
cout << a << " + " << b << " = " << c << endl;
}
%%bash
g++ add_driver.cpp add.cpp -o add_driver
./add_driver
%cd ..
%mkdir add3
%cp add2/add.cpp add2/add.hpp add2/add_driver.cpp add3/
%cd add3
%%file Makefile
add_driver: add_driver.o add.o
g++ add_driver.o add.o -o add_driver
add_driver.o: add_driver.cpp add.hpp
g++ -c add_driver.cpp
add.o: add.cpp
g++ -c add.cpp
.PHONY: clean
clean:
rm add_driver *.o
! make
! make clean
! make
%%file Makefile
# Declaration of variables
CC = g++
CC_FLAGS = -Wall -std=c++11
# File names
EXEC = add_driver
SOURCES = $(wildcard *.cpp)
OBJECTS = $(SOURCES:.cpp=.o)
# Main target
$(EXEC): $(OBJECTS)
$(CC) $(OBJECTS) -o $(EXEC)
# To obtain object files
%.o: %.cpp
$(CC) -c $(CC_FLAGS) $< -o $@
# To remove generated files
clean:
rm -f $(EXEC) $(OBJECTS)
%%bash
make clean
make
./add_driver
%cd ..
%mkdir linker
%cd linker
%%file test_linker.cpp
#include <cmath>
#include <iostream>
using std::cout;
using std::endl;
int main() {
cout << "2^10 = " << pow(2, 10) << endl;
}
%%file Makefile
# Declaration of variables
CC = g++
CC_FLAGS = -Wall -std=c++11
# File names
EXEC = test_linker
SOURCES = $(wildcard *.cpp)
OBJECTS = $(SOURCES:.cpp=.o)
# Main target
$(EXEC): $(OBJECTS)
$(CC) $(OBJECTS) -o $(EXEC)
# To obtain object files
%.o: %.cpp
$(CC) -c $(CC_FLAGS) $< -o $@
# To remove generated files
clean:
rm -f $(EXEC) $(OBJECTS)
! make
! ./test_linker
%cd ..
%mkdir arrays
%cd arrays
%%file arrays.cpp
#include <cmath>
#include <iostream>
using std::cout;
using std::endl;
int main() {
// pointers and address-of opertor
int a = 1, b = 2;
int *p = &a, *q = &b;
cout << a << ", " << b << endl;
cout << *p << ", " << *q << endl;
cout << p << ", " << q << endl;
// An array name is just a pointer
int ms[] = {1,2,3,4};
// using indexing
cout << ms[0] << ", " << ms[1] << endl;
// using pointer arithmetic
cout << *(ms) << ", " << *(ms + 0) << ", " << *(ms + 2) << endl;
cout << 2[ms] << ", " << 3[ms] << endl; // wait, what??
// size of an array
cout << sizeof(ms)/sizeof(*ms) << endl;
}
%%file Makefile
# Declaration of variables
CC = g++
CC_FLAGS = -Wall -std=c++11
# File names
EXEC = arrays
SOURCES = $(wildcard *.cpp)
OBJECTS = $(SOURCES:.cpp=.o)
# Main target
$(EXEC): $(OBJECTS)
$(CC) $(OBJECTS) -o $(EXEC)
# To obtain object files
%.o: %.cpp
$(CC) -c $(CC_FLAGS) $< -o $@
# To remove generated files
clean:
rm -f $(EXEC) $(OBJECTS)
%%bash
make
./arrays
%cd ..
%mkdir loops
%cd loops
%%file loops.cpp
#include <cmath>
#include <iostream>
using std::cout;
using std::endl;
int main() {
double xs[] = {0,1,2,3,4,5,6,7,8,9};
// looping with an index
for (int i=0; i<sizeof(xs)/sizeof(*xs); i++) {
cout << pow(xs[i], 2) << " ";
}
cout << endl;
// looping with an iterator
for (auto it=std::begin(xs); it!=std::end(xs); it++) {
cout << pow(*it, 2) << " ";
}
cout << endl;
// ranged for loop
for (auto x : xs) {
cout << pow(x, 2) << " ";
}
cout << endl;
}
%%file Makefile
# Declaration of variables
CC = g++
CC_FLAGS = -Wall -std=c++11
# File names
EXEC = loops
SOURCES = $(wildcard *.cpp)
OBJECTS = $(SOURCES:.cpp=.o)
# Main target
$(EXEC): $(OBJECTS)
$(CC) $(OBJECTS) -o $(EXEC)
# To obtain object files
%.o: %.cpp
$(CC) -c $(CC_FLAGS) $< -o $@
# To remove generated files
clean:
rm -f $(EXEC) $(OBJECTS)
%%bash
make clean
make
./loops
%cd ..
%mkdir funcs1
%cd funcs1
%%file funcs1.cpp
#include <iostream>
using std::cout;
using std::endl;
double sum(double *xs, int n) {
double s = 0.0;
for (int i=0; i<n; i++) {
s += xs[i];
}
return s;
}
void triple(double *xs, double * ys, int n) {
for (int i=0; i<n; i++) {
ys[i] = 3 * xs[i];
}
}
int main() {
double xs[] = {1,2,3,4};
int n = sizeof(xs)/sizeof(*xs);
cout << sum(xs, n) << endl;
double ys[n];
triple(xs, ys, n);
for (auto y : ys) {
cout << y << " ";
}
cout << endl;
}
%%file Makefile
# Declaration of variables
CC = g++
CC_FLAGS = -Wall -std=c++11
# File names
EXEC = funcs1
SOURCES = $(wildcard *.cpp)
OBJECTS = $(SOURCES:.cpp=.o)
# Main target
$(EXEC): $(OBJECTS)
$(CC) $(OBJECTS) -o $(EXEC)
# To obtain object files
%.o: %.cpp
$(CC) -c $(CC_FLAGS) $< -o $@
# To remove generated files
clean:
rm -f $(EXEC) $(OBJECTS)
%%bash
make
./funcs1
%cd ..
%mkdir funcs2
%cd funcs2
%%file funcs2.cpp
#include <iostream>
using std::cout;
using std::endl;
int main() {
double k = 5.0;
double a = 1.0, b = 2.0;
auto add1 = [](int a, int b) { return a + b; };
auto add2 = [k](int a, int b) { return a + b + k; };
auto add3 = [&k](int a, int b) { return a + b + k; };
k *= 2;
cout << "Lambda: " << add1(a, b) << endl;
cout << "Lambda with capture by value: " << add2(a, b) << endl;
cout << "Lmabda with capture by reference: " << add3(a, b) << endl;
}
%%file Makefile
# Declaration of variables
CC = g++
CC_FLAGS = -Wall -std=c++11
# File names
EXEC = funcs2
SOURCES = $(wildcard *.cpp)
OBJECTS = $(SOURCES:.cpp=.o)
# Main target
$(EXEC): $(OBJECTS)
$(CC) $(OBJECTS) -o $(EXEC)
# To obtain object files
%.o: %.cpp
$(CC) -c $(CC_FLAGS) $< -o $@
# To remove generated files
clean:
rm -f $(EXEC) $(OBJECTS)
%%bash
make clean
make
./funcs2
%cd ..
%mkdir templates
%cd templates
%%file templates.cpp
#include <iostream>
#include <vector>
#include <list>
#include <numeric>
using std::cout;
using std::endl;
using std::list;
using std::vector;
template<typename T>
T sum(vector<T> xs) {
T s = 0.0;
for (auto x : xs) {
s += x;
}
return s;
}
int main(){
vector<int> ns = {1,2,3};
vector<double> xs = {4.5, 6.4, 7.8};
// sum works with integers
cout << "Sum of ints: " << sum(ns) << endl;
// sum works with doubles
cout << "Sum of doubles: " << sum(xs) << endl;
// iota from the numeric library behaves like range
list<int> ys(10);
std::iota(ys.begin(), ys.end(), -3);
// accumulate from the numeric library behavses like reduce with default operation of addition
cout << "Sum from iota: " << std::accumulate(ys.begin(), ys.end(), 6.0) << endl;
// Note that the initial value determines the template type
cout << "Sum of doubles using accumulate: " << std::accumulate(xs.begin(), xs.end(), 0.0) << endl;
cout << "Surpise: " << std::accumulate(xs.begin(), xs.end(), 0) << endl;
// The binary operation can be user-defined
auto op = std::multiplies<int>();
cout << "Product of ints: " << std::accumulate(ns.begin(), ns.end(), 1, op) << endl;
}
%%file Makefile
# Declaration of variables
CC = g++
CC_FLAGS = -Wall -std=c++11
# File names
EXEC = templates
SOURCES = $(wildcard *.cpp)
OBJECTS = $(SOURCES:.cpp=.o)
# Main target
$(EXEC): $(OBJECTS)
$(CC) $(OBJECTS) -o $(EXEC)
# To obtain object files
%.o: %.cpp
$(CC) -c $(CC_FLAGS) $< -o $@
# To remove generated files
clean:
rm -f $(EXEC) $(OBJECTS)
%%bash
make clean
make
./templates
%cd ..
%mkdir func_ptrs
%cd func_ptrs
%%file func_ptrs.cpp
#include <numeric>
#include <functional>
#include <iostream>
#include <vector>
using std::cout;
using std::endl;
using std::vector;
double sum(vector<double> xs) {
return std::accumulate(xs.begin(), xs.end(), 0.0);
}
double prod(vector<double> xs) {
return std::accumulate(xs.begin(), xs.end(), 1.0, std::multiplies<double>());
}
// funciton pointers in C++ are easy
using func = std::function<double(double)>;
// now you can pass in a funciton as an argument
double mystery(double x, func f) {
return f(x);
}
double foo(double x) {
return 2*x + 1;
}
double bar(double x) {
return 42*x;
}
int main() {
vector<double> xs = {1.2, 2.3};
cout << sum(xs) << endl;
cout << prod(xs) << endl;
// auto can crate iterables of functions!
auto funcs = {sum, prod};
for (auto f: funcs) {
cout << f(xs) << endl;
}
int x = 2;
cout << mystery(x, foo) << endl;
cout << mystery(x, bar) << endl;
cout << mystery(x, [](double x) {return x*x;}) << endl;
}
%%file Makefile
# Declaration of variables
CC = g++
CC_FLAGS = -Wall -std=c++11
# File names
EXEC = func_ptrs
SOURCES = $(wildcard *.cpp)
OBJECTS = $(SOURCES:.cpp=.o)
# Main target
$(EXEC): $(OBJECTS)
$(CC) $(OBJECTS) -o $(EXEC)
# To obtain object files
%.o: %.cpp
$(CC) -c $(CC_FLAGS) $< -o $@
# To remove generated files
clean:
rm -f $(EXEC) $(OBJECTS)
%%bash
make clean
make
./func_ptrs
%cd ..
%mkdir numeric
%cd numeric
%%file numeric.cpp
#include <iostream>
#include <fstream>
#include <armadillo>
using std::cout;
using std::ofstream;
int main()
{
using namespace arma;
vec u = linspace<vec>(0,1,5);
vec v = ones<vec>(5);
mat A = randu<mat>(4,5); // uniform random deviates
mat B = randn<mat>(4,5); // normal random deviates
cout << "\nVecotrs in Armadillo\n";
cout << u << endl;
cout << v << endl;
cout << u.t() * v << endl;
cout << "\nRandom matrices in Armadillo\n";
cout << A << endl;
cout << B << endl;
cout << A * B.t() << endl;
cout << A * v << endl;
cout << "\nQR in Armadillo\n";
mat Q, R;
qr(Q, R, A.t() * A);
cout << Q << endl;
cout << R << endl;
}
%%file Makefile
# Declaration of variables
CC = g++
CC_FLAGS = -Wall -std=c++11
LD_FLAGS = -larmadillo # Add library for linking
# File names
EXEC = numeric
SOURCES = $(wildcard *.cpp)
OBJECTS = $(SOURCES:.cpp=.o)
# Main target
$(EXEC): $(OBJECTS)
$(CC) $(LD_FLAGS) $(OBJECTS) -o $(EXEC)
# To obtain object files
%.o: %.cpp
$(CC) -c $(CC_FLAGS) $< -o $@
# To remove generated files
clean:
rm -f $(EXEC) $(OBJECTS)
%%bash
make clean
make
./numeric
%cd ..
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A simple function
Step2: Using header files
Step3: Notes
Step4: A reusable Makefile
Step5: Linking to a library
Step6: Arrays, pointers and dereferencing
Step7: Loops in C++
Step8: Functions in C++
Step9: Anonymous functions
Step10: Templates
Step11: Function pointers
Step12: Using a numeric library
|
792 | <ASSISTANT_TASK:>
Python Code:
# imports
from importlib import reload
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from astropy import units as u
from frb.halos import ModifiedNFW
from frb import halos as frb_halos
from frb import igm as frb_igm
from frb.figures import utils as ff_utils
from matplotlib import pyplot as plt
plt.rcParams['font.size'] = 17
help(frb_igm.f_diffuse)
# Define redshifts
zvals = np.linspace(0, 8)
# Get <n_e>
f_diffuse, rho_diffuse = frb_igm.f_diffuse(zvals, return_rho = True)
# Plot
fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7))
fig.tight_layout()
ax1 = axs[0]
ax1.plot(zvals, f_diffuse, lw=2)
ax1.set_ylabel(r'$\langle f_{diffuse, cosmic}\rangle$')
ax2 = axs[1]
ax2.plot(zvals, rho_diffuse.to('Msun*Mpc**-3'), lw=2)
ax2.set_yscale("log")
ax2.set_xlabel('z')
ax2.set_ylabel(r'$\langle \rho_{diffuse, cosmic}\rangle$ $M_\odot~Mpc^{-3}$')
plt.show()
help(frb_igm.ne_cosmic)
# Define redshifts
zvals = np.linspace(0, 8)
# Get <n_e>
avg_ne = frb_igm.ne_cosmic(zvals)
# Visualize
fig = plt.figure(figsize = (10, 6))
plt.plot(zvals, avg_ne, label=r'$\langle n_{e, cosmic}\rangle$', lw=2)
plt.yscale("log")
plt.legend(loc = "upper left")
plt.xlabel('z')
plt.ylabel(r'$\langle n_{e, cosmic}\rangle$ [$cm^{-3}$]')
plt.show()
help(frb_igm.average_DM)
DM_cosmic, zvals = frb_igm.average_DM(8, cumul=True)
# Visualize
fig = plt.figure(figsize = (10, 6))
plt.plot(zvals, DM_cosmic, lw=2)
plt.xlabel('z')
plt.ylabel(r'$\langle DM_{cosmic}\rangle$ $pc~cm^{-3}$')
plt.show()
help(frb_igm.average_DMhalos)
# evaluation
frb_igm.average_DMhalos(0.1)
# get cumulative DM_halos
dm, zvals = frb_igm.average_DMhalos(0.1, cumul = True)
dm
zvals
fhot_array = [0.2, 0.5, 0.75]
rmax_array = [0.5, 1.0 , 2.0]
# <DM_halos> for different f_hot
fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7))
fig.tight_layout()
ax1 = axs[0]
for f_hot in fhot_array:
DM_halos, zeval = frb_igm.average_DMhalos(3, f_hot = f_hot, cumul=True)
ax1.plot(zeval, DM_halos, label="{:0.1f}".format(f_hot))
ax1.legend(title="f_hot")
ax1.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$')
# <DM_halos> for different rmax
ax2 = axs[1]
for rmax in rmax_array:
DM_halos, zeval = frb_igm.average_DMhalos(3, rmax = rmax, cumul = True)
ax2.plot(zeval, DM_halos, label="{:0.1f}".format(rmax))
ax2.legend(title="rmax")
ax2.set_xlabel('z')
ax2.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$')
plt.show()
# Limits of calculation
frb_igm.average_DMhalos(3.1)
# Failure above redshift 5
frb_igm.average_DMhalos(5.1)
help(frb_igm.average_DMIGM)
# Sanity check. <DM_cosmic> - (<DM_halos> + <DM_IGM) = 0
dm, zvals = frb_igm.average_DM(0.1, cumul= True)
dm_halos, _ = frb_igm.average_DMhalos(0.1, cumul = True)
dm_igm, _ = frb_igm.average_DMIGM(0.1, cumul = True)
plt.plot(zvals, dm - dm_halos - dm_igm)
plt.ylabel(r"DM $pc~cm^{-3}$")
plt.xlabel("z")
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: $\langle \rho_{diffuse, cosmic}\rangle$
Step2: $\langle n_{e,cosmic}\rangle$
Step3: $\langle DM_{cosmic}\rangle$
Step4: $\langle DM_{halos}\rangle$ and $\langle DM_{IGM}\rangle$
|
793 | <ASSISTANT_TASK:>
Python Code:
# to generate gifs
!pip install imageio
from __future__ import absolute_import, division, print_function
# Import TensorFlow >= 1.10 and enable eager execution
import tensorflow as tf
tf.enable_eager_execution()
import os
import time
import numpy as np
import glob
import matplotlib.pyplot as plt
import PIL
import imageio
from IPython import display
(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
# We are normalizing the images to the range of [-1, 1]
train_images = (train_images - 127.5) / 127.5
BUFFER_SIZE = 60000
BATCH_SIZE = 256
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
class Generator(tf.keras.Model):
def __init__(self):
super(Generator, self).__init__()
self.fc1 = tf.keras.layers.Dense(7*7*64, use_bias=False)
self.batchnorm1 = tf.keras.layers.BatchNormalization()
self.conv1 = tf.keras.layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same', use_bias=False)
self.batchnorm2 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2DTranspose(32, (5, 5), strides=(2, 2), padding='same', use_bias=False)
self.batchnorm3 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False)
def call(self, x, training=True):
x = self.fc1(x)
x = self.batchnorm1(x, training=training)
x = tf.nn.relu(x)
x = tf.reshape(x, shape=(-1, 7, 7, 64))
x = self.conv1(x)
x = self.batchnorm2(x, training=training)
x = tf.nn.relu(x)
x = self.conv2(x)
x = self.batchnorm3(x, training=training)
x = tf.nn.relu(x)
x = tf.nn.tanh(self.conv3(x))
return x
class Discriminator(tf.keras.Model):
def __init__(self):
super(Discriminator, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')
self.conv2 = tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')
self.dropout = tf.keras.layers.Dropout(0.3)
self.flatten = tf.keras.layers.Flatten()
self.fc1 = tf.keras.layers.Dense(1)
def call(self, x, training=True):
x = tf.nn.leaky_relu(self.conv1(x))
x = self.dropout(x, training=training)
x = tf.nn.leaky_relu(self.conv2(x))
x = self.dropout(x, training=training)
x = self.flatten(x)
x = self.fc1(x)
return x
generator = Generator()
discriminator = Discriminator()
# Defun gives 10 secs/epoch performance boost
generator.call = tf.contrib.eager.defun(generator.call)
discriminator.call = tf.contrib.eager.defun(discriminator.call)
def discriminator_loss(real_output, generated_output):
# [1,1,...,1] with real output since it is true and we want
# our generated examples to look like it
real_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=tf.ones_like(real_output), logits=real_output)
# [0,0,...,0] with generated images since they are fake
generated_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=tf.zeros_like(generated_output), logits=generated_output)
total_loss = real_loss + generated_loss
return total_loss
def generator_loss(generated_output):
return tf.losses.sigmoid_cross_entropy(tf.ones_like(generated_output), generated_output)
discriminator_optimizer = tf.train.AdamOptimizer(1e-4)
generator_optimizer = tf.train.AdamOptimizer(1e-4)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
EPOCHS = 150
noise_dim = 100
num_examples_to_generate = 16
# keeping the random vector constant for generation (prediction) so
# it will be easier to see the improvement of the gan.
random_vector_for_generation = tf.random_normal([num_examples_to_generate,
noise_dim])
def generate_and_save_images(model, epoch, test_input):
# make sure the training parameter is set to False because we
# don't want to train the batchnorm layer when doing inference.
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4,4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
def train(dataset, epochs, noise_dim):
for epoch in range(epochs):
start = time.time()
for images in dataset:
# generating noise from a uniform distribution
noise = tf.random_normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
generated_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(generated_output)
disc_loss = discriminator_loss(real_output, generated_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.variables))
if epoch % 1 == 0:
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
random_vector_for_generation)
# saving (checkpoint) the model every 15 epochs
if epoch % 15 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time taken for epoch {} is {} sec'.format(epoch + 1,
time.time()-start))
# generating after the final epoch
display.clear_output(wait=True)
generate_and_save_images(generator,
epochs,
random_vector_for_generation)
train(train_dataset, EPOCHS, noise_dim)
# restoring the latest checkpoint in checkpoint_dir
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
def display_image(epoch_no):
return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))
display_image(EPOCHS)
with imageio.get_writer('dcgan.gif', mode='I') as writer:
filenames = glob.glob('image*.png')
filenames = sorted(filenames)
last = -1
for i,filename in enumerate(filenames):
frame = 2*(i**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
# this is a hack to display the gif inside the notebook
os.system('cp dcgan.gif dcgan.gif.png')
display.Image(filename="dcgan.gif.png")
#from google.colab import files
#files.download('dcgan.gif')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import TensorFlow and enable eager execution
Step2: Load the dataset
Step3: Use tf.data to create batches and shuffle the dataset
Step4: Write the generator and discriminator models
Step5: Define the loss functions and the optimizer
Step6: Checkpoints (Object-based saving)
Step7: Training
Step8: Restore the latest checkpoint
Step9: Display an image using the epoch number
Step10: Generate a GIF of all the saved images.
Step11: To downlod the animation from Colab uncomment the code below
|
794 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy
import csv
data = open('../data/data.csv', 'r').readlines()
fieldnames = ['x', 'y', 'z', 'unmasked', 'synapses']
reader = csv.reader(data)
reader.next()
rows = [[int(col) for col in row] for row in reader]
sorted_x = sorted(list(set([r[0] for r in rows])))
sorted_y = sorted(list(set([r[1] for r in rows])))
sorted_z = sorted(list(set([r[2] for r in rows])))
total = sum([r[4] for r in rows])
volume = numpy.zeros((len(sorted_x), len(sorted_y), len(sorted_z)))
for r in range(total):
volume[numpy.random.randint(len(sorted_x)), numpy.random.randint(len(sorted_y)), numpy.random.randint(len(sorted_z))] += 1
real_volume = numpy.zeros((len(sorted_x), len(sorted_y), len(sorted_z)))
for r in rows:
real_volume[sorted_x.index(r[0]), sorted_y.index(r[1]), sorted_z.index(r[2])] = r[-1]
plt.imshow(numpy.amax(volume, axis=2), interpolation='nearest')
plt.show()
plt.imshow(numpy.amax(real_volume, axis=2), interpolation='nearest')
plt.show()
data = open('../data/data.csv', 'r').readlines()
fieldnames = ['x', 'y', 'z', 'unmasked', 'synapses']
reader = csv.reader(data)
reader.next()
rows = [[int(col) for col in row] for row in reader]
unmaskedSynapses = ([r[-1] for r in rows if r[-2] != 0])
unmaskedSynapsesNoZero = ([r[-1] for r in rows if r[-2] != 0 if r[-1] !=0])
#including zeros
plt.hist(unmaskedSynapses, bins=50)
plt.show()
#throwing away zeros
plt.hist(unmaskedSynapsesNoZero, bins=50)
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Emulate the Distribution
Step2: The diagram below is what our volume's max-intensity projection would look like if it were perfectly uniform. Qualitatively, it's clear that our data do not follow this distribution in space in any dimension
Step3: We make the following assumptions about our data
|
795 | <ASSISTANT_TASK:>
Python Code:
a = 1
b = 3
a + b
a.__add__(b)
type(a)
isinstance(a, int)
class Animal(object):
mammal = True # class variable
def __init__(self, name, voice, color="black"):
self.name = name
self.__voice = voice # "приватный" или "защищенный" атрибут
self._color = color # "типа приватный" атрибут
def make_sound(self):
print('{0} {1} says "{2}"'.format(self._color, self.name, self.__voice))
@classmethod
def description(cls):
print("Some animal")
Animal.mammal
Animal.description()
a = Animal("dog", "bark")
a.mammal
c.__voice
c._color
dir(c)
class Cat(Animal):
def __init__(self, color):
super().__init__(name="cat", voice="meow", color=color)
c = Cat(color="white")
isinstance(c, Animal)
c1 = Cat(color="white")
c2 = Cat(color="black")
print(c1.mammal)
c1.mammal = False
print(c1.mammal)
print(c2.mammal)
c1 = Cat(color="white")
c2 = Cat(color="black")
print(c1.mammal)
Cat.mammal = False
print(c1.mammal)
print(c2.mammal)
c._color = "green"
c.make_sound()
class Cat(Animal):
def __init__(self, color):
super().__init__(name="cat", voice="meow", color=color)
@property
def color(self):
return self._color
@color.setter
def color(self, val):
if val not in ("black", "white", "grey", "mixed"):
raise Exception("Cat can't be {0}!".format(val))
self._color = val
c = Cat("white")
c.color
c.color = "green"
c.color
class A(object):
def __init__(self):
self.sandbox = {}
def __enter__(self):
return self.sandbox
def __exit__(self, exc_type, exc_value, traceback):
self.sandbox = {}
a = A()
with a as sbox:
sbox["foo"] = "bar"
print(sbox)
print(a.sandbox)
from contextlib import contextmanager
@contextmanager
def contextgen():
print("enter")
yield 1
print("exit")
with contextgen() as a:
print(a)
import os
import requests
from threading import Thread
class DownloadThread(Thread):
def __init__(self, url, name):
super().__init__()
self.url = url
self.name = name
def run(self):
res = requests.get(self.url, stream=True)
res.raise_for_status()
fname = os.path.basename(self.url)
with open(fname, "wb") as savefile:
for chunk in res.iter_content(1024):
savefile.write(chunk)
print(f"{self.name} закончил загрузку {self.url} !")
def main(urls):
for item, url in enumerate(urls):
thread = DownloadThread(url, f"Поток {item + 1}")
thread.start()
main([
"http://www.irs.gov/pub/irs-pdf/f1040.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040a.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040ez.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040es.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040sb.pdf"
])
В данном случае интерпретатор дожидается
завершения всех дочерних потоков.
В других языках может быть иначе!
import queue
class DownloadThread2(Thread):
def __init__(self, queue, name):
super().__init__()
self.queue = queue
self.name = name
def run(self):
while True:
url = self.queue.get()
fname = os.path.basename(url)
res = requests.get(url, stream=True)
res.raise_for_status()
with open(fname, "wb") as savefile:
for chunk in res.iter_content(1024):
savefile.write(chunk)
self.queue.task_done()
print(f"{self.name} закончил загрузку {url} !")
def main(urls):
q = queue.Queue()
threads = [DownloadThread2(q, f"Поток {i + 1}") for i in range(2)]
for t in threads:
# заставляем интерпретатор НЕ ждать завершения дочерних потоков
t.setDaemon(True)
t.start()
for url in urls:
q.put(url)
q.join() # все обработано - выходим
main([
"http://www.irs.gov/pub/irs-pdf/f1040.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040a.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040ez.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040es.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040sb.pdf"
])
from multiprocessing import Process
from multiprocessing import Queue
import time
from concurrent.futures import ThreadPoolExecutor
# аналогично с ProcessPoolExecutor
def hold_my_beer_5_sec(beer):
time.sleep(5)
return beer
pool = ThreadPoolExecutor(3)
future = pool.submit(hold_my_beer_5_sec, ("Балтика"))
print(future.done())
time.sleep(5)
print(future.done())
print(future.result())
import concurrent.futures
import requests
def load_url(url):
fname = os.path.basename(url)
res = requests.get(url, stream=True)
res.raise_for_status()
with open(fname, "wb") as savefile:
for chunk in res.iter_content(1024):
savefile.write(chunk)
return fname
URLS = [
"http://www.irs.gov/pub/irs-pdf/f1040.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040a.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040ez.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040es.pdf",
"http://www.irs.gov/pub/irs-pdf/f1040sb.pdf"
]
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_url = {
executor.submit(load_url, url): url
for url in URLS
}
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
print(f"URL '{future_to_url[future]}' is saved to '{future.result()}'")
m = threading.Lock()
m.acquire()
m.release()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Упражнение
Step3: Что такое потоки?
Step4: Упражнение
Step5: Ipyparallel
Step6: Примитивы синхронизации - мьютекс
|
796 | <ASSISTANT_TASK:>
Python Code:
from nltk.util import ngrams
from collections import defaultdict
from collections import OrderedDict
import string
import time
import gc
start_time = time.time()
#returns: string
#arg: string
#remove punctuations and make the string lowercase
def removePunctuations(sen):
#split the string into word tokens
temp_l = sen.split()
i = 0
#changes the word to lowercase and removes punctuations from it
for word in temp_l :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
temp_l[i] = word.lower()
i=i+1
#spliting is being don here beacause in sentences line here---so after punctuation removal it should
#become "here so"
content = " ".join(temp_l)
return content
#returns : void
#arg: string,dict,dict,dict,dict
#loads the corpus for the dataset and makes the frequency count of quadgram and trigram strings
def loadCorpus(file_path,bi_dict,tri_dict,quad_dict,vocab_dict):
w1 = '' #for storing the 3rd last word to be used for next token set
w2 = '' #for storing the 2nd last word to be used for next token set
w3 = '' #for storing the last word to be used for next token set
token = []
word_len = 0
#open the corpus file and read it line by line
with open(file_path,'r') as file:
for line in file:
#split the line into tokens
token = line.split()
i = 0
#for each word in the token list ,remove pucntuations and change to lowercase
for word in token :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
token[i] = word.lower()
i=i+1
#make the token list into a string
content = " ".join(token)
token = content.split()
word_len = word_len + len(token)
if not token:
continue
#add the last word from previous line
if w3!= '':
token.insert(0,w3)
temp0 = list(ngrams(token,2))
#since we are reading line by line some combinations of word might get missed for pairing
#for trigram
#first add the previous words
if w2!= '':
token.insert(0,w2)
#tokens for trigrams
temp1 = list(ngrams(token,3))
#insert the 3rd last word from previous line for quadgram pairing
if w1!= '':
token.insert(0,w1)
#add new unique words to the vocaulary set if available
for word in token:
if word not in vocab_dict:
vocab_dict[word] = 1
else:
vocab_dict[word]+= 1
#tokens for quadgrams
temp2 = list(ngrams(token,4))
#count the frequency of the bigram sentences
for t in temp0:
sen = ' '.join(t)
bi_dict[sen] += 1
#count the frequency of the trigram sentences
for t in temp1:
sen = ' '.join(t)
tri_dict[sen] += 1
#count the frequency of the quadgram sentences
for t in temp2:
sen = ' '.join(t)
quad_dict[sen] += 1
#then take out the last 3 words
n = len(token)
#store the last few words for the next sentence pairing
w1 = token[n -3]
w2 = token[n -2]
w3 = token[n -1]
return word_len
#returns: void
#arg: dict,dict,dict,dict,dict,int
#creates dict for storing probable words with their probabilities for a trigram sentence
def createProbableWordDict(bi_dict,tri_dict,quad_dict,prob_dict,vocab_dict,token_len):
for quad_sen in quad_dict:
prob = 0.0
quad_token = quad_sen.split()
tri_sen = ' '.join(quad_token[:3])
tri_count = tri_dict[tri_sen]
if tri_count != 0:
prob = interpolatedProbability(quad_token,token_len, vocab_dict, bi_dict, tri_dict, quad_dict,
l1 = 0.25, l2 = 0.25, l3 = 0.25 , l4 = 0.25)
if tri_sen not in prob_dict:
prob_dict[tri_sen] = []
prob_dict[tri_sen].append([prob,quad_token[-1]])
else:
prob_dict[tri_sen].append([prob,quad_token[-1]])
prob = None
tri_count = None
quad_token = None
tri_sen = None
#returns: void
#arg: dict
#for sorting the probable word acc. to their probabilities
def sortProbWordDict(prob_dict):
for key in prob_dict:
if len(prob_dict[key])>1:
sorted(prob_dict[key],reverse = True)
#returns: string
#arg: string,dict,int
#does prediction for the the sentence
def doPrediction(sen,prob_dict,rank = 1):
if sen in prob_dict:
if rank <= len(prob_dict[sen]):
return prob_dict[sen][rank-1][1]
else:
return prob_dict[sen][0][1]
else:
return "Can't predict"
#returns: float
#arg: float,float,float,float,list,list,dict,dict,dict,dict
#for calculating the interpolated probablity
def interpolatedProbability(quad_token,token_len, vocab_dict, bi_dict, tri_dict, quad_dict,
l1 = 0.25, l2 = 0.25, l3 = 0.25 , l4 = 0.25):
sen = ' '.join(quad_token)
prob =(
l1*(quad_dict[sen] / tri_dict[' '.join(quad_token[0:3])])
+ l2*(tri_dict[' '.join(quad_token[1:4])] / bi_dict[' '.join(quad_token[1:3])])
+ l3*(bi_dict[' '.join(quad_token[2:4])] / vocab_dict[quad_token[2]])
+ l4*(vocab_dict[quad_token[3]] / token_len)
)
return prob
#returns: string
#arg: void
#for taking input from user
def takeInput():
cond = False
#take input
while(cond == False):
sen = input('Enter the string\n')
sen = removePunctuations(sen)
temp = sen.split()
if len(temp) < 3:
print("Please enter atleast 3 words !")
else:
cond = True
temp = temp[-3:]
sen = " ".join(temp)
return sen
def main():
#variable declaration
tri_dict = defaultdict(int) #for keeping count of sentences of three words
quad_dict = defaultdict(int) #for keeping count of sentences of three words
vocab_dict = defaultdict(int) #for storing the different words with their frequencies
prob_dict = OrderedDict() #for storing the probabilities of probable words for a sentence
bi_dict = defaultdict(int)
#load the corpus for the dataset
token_len = loadCorpus('corpusfile.txt',bi_dict,tri_dict,quad_dict,vocab_dict)
print("---Preprocessing Time for Corpus loading: %s seconds ---" % (time.time() - start_time))
start_time1 = time.time()
#creates a dictionary of probable words
createProbableWordDict(bi_dict,tri_dict,quad_dict,prob_dict,vocab_dict,token_len)
#sort the dictionary of probable words
sortProbWordDict(prob_dict)
# writeProbWords(prob_dict)
gc.collect()
print("---Preprocessing Time for Creating Probable Word Dict: %s seconds ---" % (time.time() - start_time1))
"
if __name__ == '__main__':
main()
#variable declaration
tri_dict = defaultdict(int) #for keeping count of sentences of three words
quad_dict = defaultdict(int) #for keeping count of sentences of three words
vocab_dict = defaultdict(int) #for storing the different words with their frequencies
prob_dict = OrderedDict() #for storing the probabilities of probable words for a sentence
bi_dict = defaultdict(int)
#load the corpus for the dataset
token_len = loadCorpus('corpusfile.txt',bi_dict,tri_dict,quad_dict,vocab_dict)
print("---Preprocessing Time for Corpus loading: %s seconds ---" % (time.time() - start_time))
start_time1 = time.time()
#creates a dictionary of probable words
createProbableWordDict(bi_dict,tri_dict,quad_dict,prob_dict,vocab_dict,token_len)
#sort the dictionary of probable words
sortProbWordDict(prob_dict)
# writeProbWords(prob_dict)
gc.collect()
print("---Preprocessing Time for Creating Probable Word Dict: %s seconds ---" % (time.time() - start_time1))
sen = takeInput()
start_time2 = time.time()
prediction = doPrediction(sen,prob_dict)
print("Word Prediction:",prediction)
print("---Time for Prediction Operation: %s seconds ---" % (time.time() - start_time2))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <u>Do preprocessing</u>
Step2: Tokenize and load the corpus data
Step3: Create a Hash Table for Probable words for Trigram sentences
Step4: Sort the probable words
Step5: <u>Driver function for doing the prediction</u>
Step6: <u> For Computing Interpolated Probability</u>
Step7: <u>For Taking input from the User</u>
Step10: <u>main function</u>
Step11: <i><u>For Debugging Purpose Only</u></i>
|
797 | <ASSISTANT_TASK:>
Python Code:
# from pgmpy.factors.continuous import LinearGaussianCPD
import sys
import numpy as np
import pgmpy
sys.path.insert(0, "../pgmpy/")
from pgmpy.factors.continuous import LinearGaussianCPD
mu = np.array([7, 13])
sigma = np.array([[4 , 3],
[3 , 6]])
cpd = LinearGaussianCPD('Y', evidence_mean = mu, evidence_variance=sigma, evidence=['X1', 'X2'])
cpd.variable, cpd.evidence
#### import numpy as np
%matplotlib inline
import pandas as pd
import seaborn as sns
import numpy as np
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import multivariate_normal
from matplotlib import pyplot
# Obtain the X and Y which are jointly gaussian from the distribution
mu_x = np.array([7, 13])
sigma_x = np.array([[4 , 3],
[3 , 6]])
# Variables
states = ['X1', 'X2']
# Generate samples from the distribution
X_Norm = multivariate_normal(mean=mu_x, cov=sigma_x)
X_samples = X_Norm.rvs(size=10000)
X_df = pd.DataFrame(X_samples, columns=states)
# Generate
X_df['P_X'] = X_df.apply(X_Norm.pdf, axis=1)
X_df.head()
g = sns.jointplot(X_df['X1'], X_df['X2'], kind="kde", height=10, space=0)
beta_vec = np.array([.7, .3])
beta_0 = 2
sigma_c = 4
def genYX(x):
x = [x['X1'], x['X2']]
var_mean = np.dot(beta_vec.transpose(), x) + beta_0
Yx_sample = np.random.normal(var_mean, sigma_c, 1)
return Yx_sample[0]
X_df['(Y|X)'] = X_df.apply(genYX, axis=1)
X_df.head()
sns.distplot(X_df['(Y|X)'])
# X_df.to_csv('gbn_values.csv', index=False)
cpd.fit(X_df, states=['(Y|X)', 'X1', 'X2'], estimator='MLE')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Linear Gaussian Models - The Process
|
798 | <ASSISTANT_TASK:>
Python Code:
from pynq import Overlay, PL
from pynq.board import LED, Switch, Button
Overlay('base.bit').download()
buttons = [Button(i) for i in range(4)]
leds = [LED(i) for i in range(4)]
switches = [Switch(i) for i in range(2)]
import asyncio
@asyncio.coroutine
def flash_led(num):
while True:
yield from buttons[num].wait_for_value_async(1)
while buttons[num].read():
leds[num].toggle()
yield from asyncio.sleep(0.1)
leds[num].off()
tasks = [asyncio.ensure_future(flash_led(i)) for i in range(4)]
import psutil
@asyncio.coroutine
def print_cpu_usage():
# Calculate the CPU utilisation by the amount of idle time
# each CPU has had in three second intervals
last_idle = [c.idle for c in psutil.cpu_times(percpu=True)]
while True:
yield from asyncio.sleep(3)
next_idle = [c.idle for c in psutil.cpu_times(percpu=True)]
usage = [(1-(c2-c1)/3) * 100 for c1,c2 in zip(last_idle, next_idle)]
print("CPU Usage: {0:3.2f}%, {1:3.2f}%".format(*usage))
last_idle = next_idle
tasks.append(asyncio.ensure_future(print_cpu_usage()))
if switches[0].read():
print("Please set switch 0 low before running")
else:
switches[0].wait_for_value(1)
[t.cancel() for t in tasks]
switches[0].wait_for_value(0)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the flash LED task
Step2: Create the task
Step3: Monitoring the CPU Usage
Step4: Run the event loop
Step5: Clean up
Step6: Now if we re-run the event loop, nothing will happen when we press the buttons. The process will block until the switch is set back down to the low position.
|
799 | <ASSISTANT_TASK:>
Python Code:
# Execute this cell to load the notebook's style sheet, then ignore it
from IPython.core.display import HTML
css_file = '../style/custom.css'
HTML(open(css_file, "r").read())
# Import Libraries
# ----------------
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from pylab import rcParams
# Ignore Warning Messages
# -----------------------
import warnings
warnings.filterwarnings("ignore")
# Definition of modelling parameters
# ----------------------------------
xmax = 500.0 # maximum spatial extension of the 1D model in x-direction (m)
zmax = xmax # maximum spatial extension of the 1D model in z-direction(m)
dx = 1.0 # grid point distance in x-direction
dz = dx # grid point distance in z-direction
tmax = 0.502 # maximum recording time of the seismogram (s)
dt = 0.0010 # time step
vp0 = 580. # P-wave speed in medium (m/s)
# acquisition geometry
xr = 330.0 # x-receiver position (m)
zr = xr # z-receiver position (m)
xsrc = 250.0 # x-source position (m)
zsrc = 250.0 # z-source position (m)
f0 = 40. # dominant frequency of the source (Hz)
t0 = 4. / f0 # source time shift (s)
# 2D Wave Propagation (Finite Difference Solution)
# ------------------------------------------------
def FD_2D_acoustic(dt,dx,dz):
nx = (int)(xmax/dx) # number of grid points in x-direction
print('nx = ',nx)
nz = (int)(zmax/dz) # number of grid points in x-direction
print('nz = ',nz)
nt = (int)(tmax/dt) # maximum number of time steps
print('nt = ',nt)
ir = (int)(xr/dx) # receiver location in grid in x-direction
jr = (int)(zr/dz) # receiver location in grid in z-direction
isrc = (int)(xsrc/dx) # source location in grid in x-direction
jsrc = (int)(zsrc/dz) # source location in grid in x-direction
# Source time function (Gaussian)
# -------------------------------
src = np.zeros(nt + 1)
time = np.linspace(0 * dt, nt * dt, nt)
# 1st derivative of a Gaussian
src = -2. * (time - t0) * (f0 ** 2) * (np.exp(- (f0 ** 2) * (time - t0) ** 2))
# Analytical solution
# -------------------
G = time * 0.
# Initialize coordinates
# ----------------------
x = np.arange(nx)
x = x * dx # coordinates in x-direction (m)
z = np.arange(nz)
z = z * dz # coordinates in z-direction (m)
# calculate source-receiver distance
r = np.sqrt((x[ir] - x[isrc])**2 + (z[jr] - z[jsrc])**2)
for it in range(nt): # Calculate Green's function (Heaviside function)
if (time[it] - r / vp0) >= 0:
G[it] = 1. / (2 * np.pi * vp0**2) * (1. / np.sqrt(time[it]**2 - (r/vp0)**2))
Gc = np.convolve(G, src * dt)
Gc = Gc[0:nt]
lim = Gc.max() # get limit value from the maximum amplitude
# Initialize empty pressure arrays
# --------------------------------
p = np.zeros((nx,nz)) # p at time n (now)
pold = np.zeros((nx,nz)) # p at time n-1 (past)
pnew = np.zeros((nx,nz)) # p at time n+1 (present)
d2px = np.zeros((nx,nz)) # 2nd spatial x-derivative of p
d2pz = np.zeros((nx,nz)) # 2nd spatial z-derivative of p
# Initialize model (assume homogeneous model)
# -------------------------------------------
vp = np.zeros((nx,nz))
vp = vp + vp0 # initialize wave velocity in model
# Initialize empty seismogram
# ---------------------------
seis = np.zeros(nt)
# Calculate Partial Derivatives
# -----------------------------
for it in range(nt):
# FD approximation of spatial derivative by 3 point operator
for i in range(1, nx - 1):
for j in range(1, nz - 1):
d2px[i,j] = (p[i + 1,j] - 2 * p[i,j] + p[i - 1,j]) / dx ** 2
d2pz[i,j] = (p[i,j + 1] - 2 * p[i,j] + p[i,j - 1]) / dz ** 2
# Time Extrapolation
# ------------------
pnew = 2 * p - pold + vp ** 2 * dt ** 2 * (d2px + d2pz)
# Add Source Term at isrc
# -----------------------
# Absolute pressure w.r.t analytical solution
pnew[isrc,jsrc] = pnew[isrc,jsrc] + src[it] / (dx * dz) * dt ** 2
# Remap Time Levels
# -----------------
pold, p = p, pnew
# Output of Seismogram
# -----------------
seis[it] = p[ir,jr]
# Compare FD Seismogram with analytical solution
# ----------------------------------------------
# Define figure size
rcParams['figure.figsize'] = 12, 5
plt.plot(time, seis, 'b-',lw=3,label="FD solution") # plot FD seismogram
Analy_seis = plt.plot(time,Gc,'r--',lw=3,label="Analytical solution") # plot analytical solution
plt.xlim(time[0], time[-1])
plt.ylim(-lim, lim)
plt.title('Seismogram')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.legend()
plt.grid()
plt.show()
%%time
dx = 1.0 # grid point distance in x-direction (m)
dx = dz # grid point distance in z-direction (m)
dt = 0.0010 # time step (s)
FD_2D_acoustic(dt,dx,dz)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From 1D to 2D acoustic finite difference modelling
Step2: Comparison of 2D finite difference with analytical solution
|