Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
---|---|---|
400 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import sys
# Add a new path with needed .py files.
sys.path.insert(0, 'C:\Users\Dominik\Documents\GitRep\kt-2015-DSPHandsOn\MedianFilter\Python')
import functions
import gitInformation
%matplotlib inline
gitInformation.printInformation()
fig = plt.figure()
for y in range (0, 40):
if y % 2 == 1:
functions.ErrorPlotWindow(5, y)
plt.savefig('WLError.png')
plt.figure(figsize=(30,20))
for x in range(1, 5):
for y in range(1, 6):
# Creates different subplots in one figure, with x and y.
# the window length is calculate.
plt.subplot(5, 5, x + (y-1)*4)
windowLength = ((x-1)*2 + (y-1)*8)*8 +1
functions.medianSinPlot(16, windowLength, 16*128,128*5+1,-128*5-1)
plt.suptitle('Median filtered sine waves', fontsize = 48)
plt.xlabel(("Window length (/2 pi) = " + str(windowLength/128.)), fontsize=18)
fig = plt.figure(figsize=(30,20))
for x in range(1, 5):
for y in range(1, 6):
for z in range(0,40):
if z%2 ==1:
plt.subplot(5, 5, x + (y-1)*4)
wavenum = (x-1) + (y-1)*4
functions.ErrorPlotWindow(wavenum, z)
plt.suptitle(' Error of the Median filter with different wave numbers and window lengths',
fontsize = 40)
plt.xlabel(("Wave number = " + str((x-1) + (y-1)*4)), fontsize = 18)
fig = plt.figure(figsize=(30,20))
for x in range(1, 5):
for y in range(1, 6):
for z in range(0,160,3):
if z%2 ==1:
plt.subplot(5, 5, x + (y-1)*4)
wavenum = (x-1) + (y-1)*4
functions.ErrorPlotWindow( wavenum, z, 1024 )
plt.suptitle(' Error of the Median filter with different wave numbers and window lengths ',
fontsize = 40)
plt.xlabel(("Wave number = " + str((x-1) + (y-1)*4)), fontsize = 18)
plt.savefig('SumErrorWL')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualization of the Error rate with different window lengths and a sine with wave number 5 (128 samples)
Step2: As you can see, the error is higher when the window is bigger.
Step3: Visualization of the Error rate with different wave numbers and different window lengths
Step4: At the beginning you can see the error rises with a higher wave number. But at the end there are some unlikely curves. This is because of the low samples(128).
|
401 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import re
import math
from sys import argv
sns.set_style("whitegrid")
sns.set_style("ticks")
sns.set_context("poster")
def read_clusters_tsv(path):
df = pd.read_csv(path, sep='\t', header=0, index_col=0)
df = df.rename(columns={'Unnamed: 1': 'cluster_size'})
col = [int(re.sub(r'SIZE:([0-9]*)', r'\1', i)) for i in df['cluster_size']]
df['cluster_size'] = col
return df
def plot_hist_tara(df_all_clusters, df_missing_clusters, bin_max, bin_size, max_frac, num_genomes, ymax, fig_path):
fig, ax = plt.subplots()
sns.distplot(df_all_clusters.cluster_size, kde=False, color='b', bins=np.arange(0,bin_max+bin_size,bin_size), label='All gene clusters')
sns.distplot(df_missing_clusters.cluster_size, kde=False, color='r', bins=np.arange(0,bin_max+bin_size,bin_size), label='Missing from Tara metagenomes')
sns.despine(offset=10)
xticks = np.array(np.arange(0, max_frac, 0.2) * num_genomes)
xticklabels = xticks / num_genomes
plt.xticks(xticks, xticklabels)
plt.xlim(0, max_frac*num_genomes)
plt.xlabel('Cluster copy number (per genome)')
plt.yscale('log')
plt.ylim(0.5, 1e4)
yticks = np.array([1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000])
yticklabels = ['1', '2', '5', '10', '20', '50', '100', '200', '500', '1000', '2000', '5000', '10000']
plt.yticks(yticks, yticklabels)
plt.ylabel('Number of clusters')
plt.legend()
fig.set_size_inches(12, 8)
plt.savefig(fig_path)
def merge_cluster_counts(path):
# Paths of input files, containing cluster counts
paths = pd.Series.from_csv(path, header=-1, sep='\t', index_col=None)
# Data frame containing all samples cluster counts (NaN if missing)
pieces = []
for path in paths:
fullpath = "/Users/luke/singlecell/tara/PROK-139/%s" % path
counts = pd.DataFrame.from_csv(fullpath, header=-1, sep='\t', index_col=0)
pieces.append(counts)
frame = pd.concat(pieces, axis=1)
headings = paths.tolist()
frame.columns = headings
return frame
def make_df_size_count(df_all, df_counts):
df_size_count = pd.DataFrame()
df_size_count['cluster_size'] = df_all.cluster_size
df_size_count['tara_count'] = df_counts.count(axis=1)
df_size_count.fillna(0, inplace=True)
df_size_count.tara_count = df_size_count.tara_count.astype(int)
return(df_size_count)
def make_groupby_size_count(df_size_count):
# matrix style of cluster_size and tara_count
groupby_size_count = df_size_count.groupby(['cluster_size', 'tara_count']).size().unstack().transpose()
max_size = df_size_count.cluster_size.max()
max_count = int(df_size_count.tara_count.max())
# add empty columns
for i in range(1, max_size+1):
if not i in groupby_size_count:
groupby_size_count[i] = np.nan
# add empty rows (might not be any)
for i in range(1, max_count+1):
if not i in groupby_size_count.index:
groupby_size_count.loc[i] = np.nan
groupby_size_count.sort_index(axis=1, inplace=True)
#groupby_size_count.fillna(0, inplace=True)
return(groupby_size_count, max_size, max_count)
def plot_heatmap_tara(df, num_genomes, max_size, max_count, max_frac, xinches, yinches, species, fig_path):
fig, ax = plt.subplots()
myfig = sns.heatmap(np.log(df.iloc[::-1]), square=False, robust=True, cmap='inferno')
xticks = np.array(np.arange(0, 5, 0.2) * num_genomes)
xticklabels = xticks / num_genomes
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
ax.set_xlabel('Cluster copy number (per genome)')
ylabels = np.array(df.index[0::20].tolist() + [max_count])
ax.set_yticks(ylabels+0.5)
ax.set_yticklabels(ylabels)
ax.set_ylabel('Tara metagenomes cluster found in')
ax.set_title('Density heatmap of %s gene clusters' % species)
ax.axis([0, max_frac*num_genomes, 0, max_count+2])
fig.set_size_inches(xinches, yinches)
fig.savefig(fig_path)
# missing clusters
path = '/Users/luke/singlecell/tara/table_missing_proch_all_1e-5.tsv'
num_genomes = 145
fig_full = '/Users/luke/singlecell/tara/hist_missing_proch_all_1e-5_full.pdf'
fig_zoom = '/Users/luke/singlecell/tara/hist_missing_proch_all_1e-5_zoom.pdf'
df_missing_clusters = read_clusters_tsv(path)
# all clusters
path = '/Users/luke/singlecell/tara/table_all_proch.tsv'
df_all_clusters = read_clusters_tsv(path)
# tara counts by cluster
species = 'proch'
evalue = '1e-5'
path = '/Users/luke/singlecell/tara/paths_%s_%s.list' % (species, evalue)
df_counts = merge_cluster_counts(path)
# col_SRF = [col for col in list(df_counts.columns) if 'SRF' in col]
# df_counts = df_counts[col_SRF]
# full hist
plot_hist_tara(df_all_clusters, df_missing_clusters, 240, 5.8, 1.6001, num_genomes, 10000, fig_full)
# zoom hist
# plot_hist_tara(df_all_clusters, df_missing_clusters, 1.6201, num_genomes, 30, fig_zoom)
df_size_count = make_df_size_count(df_all_clusters, df_counts)
groupby_size_count, max_size, max_count = make_groupby_size_count(df_size_count)
print groupby_size_count.max().max()
# def fourth_root(num):
# return num**0.25
# def square_root(num):
# return num**0.5
# groupby_size_count_fourth_root = groupby_size_count_nonan.apply(fourth_root)
# groupby_size_count_square_root = groupby_size_count_nonan.apply(square_root)
fig_path = '/Users/luke/singlecell/tara/heatmap_tara_proch.pdf'
plot_heatmap_tara(groupby_size_count, num_genomes, max_size, max_count, 1.1, 12, 8, 'Prochlorococcus', fig_path) # 1.1 was 1.62
# jointplot of cluster_size and tara_count
fig = sns.jointplot(x='cluster_size', y='tara_count', data=df_size_count)
# missing clusters
path = '/Users/luke/singlecell/tara/table_missing_pelag_all_1e-5.tsv'
num_genomes = 47
fig_full = '/Users/luke/singlecell/tara/hist_missing_pelag_all_1e-5_full.pdf'
fig_zoom = '/Users/luke/singlecell/tara/hist_missing_pelag_all_1e-5_zoom.pdf'
df_missing_clusters = read_clusters_tsv(path)
# all clusters
path = '/Users/luke/singlecell/tara/table_all_pelag.tsv'
df_all_clusters = read_clusters_tsv(path)
# tara counts by cluster
species = 'pelag'
evalue = '1e-5'
path = '/Users/luke/singlecell/tara/paths_%s_%s.list' % (species, evalue)
df_counts = merge_cluster_counts(path)
# col_SRF = [col for col in list(df_counts.columns) if 'SRF' in col]
# df_counts = df_counts[col_SRF]
df_counts.shape
# full hist
plot_hist_tara(df_all_clusters, df_missing_clusters, 100, 1.88, 1.6001, num_genomes, 5000, fig_full)
# zoom hist
#plot_hist_tara(df_all_clusters, df_missing_clusters, 1.6201, num_genomes, 30, fig_zoom)
df_size_count = make_df_size_count(df_all_clusters, df_counts)
groupby_size_count, max_size, max_count = make_groupby_size_count(df_size_count)
print groupby_size_count.max().max()
groupby_size_count.max().max()
fig_path = '/Users/luke/singlecell/tara/heatmap_tara_pelag.pdf'
plot_heatmap_tara(groupby_size_count, num_genomes, max_size, max_count, 1.1, 12, 8, 'Pelagibacter', fig_path) # 1.1 was 1.62
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Seaborn settings
Step2: Function to read tsv of clusters and fix cluster size column
Step3: Function to plot histograms of cluster size
Step4: Function to merge cluster counts
Step5: Function to make two-column dataframe of cluster_size and tara_count
Step6: Function to make groupby_size_count frame
Step7: Function to plot heatmap
Step8: PROCHLOROCOCCUS
Step9: Histograms of Prochlorococcus gene cluster size among clusters MISSING in all Tara samples
Step10: Density heatmap of Prochlorococcus gene clusters by cluster size (numer of genomes) and presence/absence in 139 Tara prokaryote metagenomes
Step11: PELAGIBACTER
Step12: Histograms of Pelagibacter gene cluster size among clusters MISSING in all Tara samples
Step13: Density heatmap of Pelagibacter gene clusters by cluster size (numer of genomes) and presence/absence in 139 Tara prokaryote metagenomes
|
402 | <ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import make_classification
X, y = make_classification()
from reskit.core import Pipeliner
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
classifiers = [('LR', LogisticRegression()),
('SVC', SVC())]
scalers = [('standard', StandardScaler()),
('minmax', MinMaxScaler())]
steps = [('scaler', scalers),
('classifier', classifiers)]
param_grid = {'LR': {'penalty': ['l1', 'l2']},
'SVC': {'kernel': ['linear', 'poly', 'rbf', 'sigmoid']}}
from sklearn.model_selection import StratifiedKFold
grid_cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
eval_cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
pipe = Pipeliner(steps=steps, grid_cv=grid_cv, eval_cv=eval_cv, param_grid=param_grid)
pipe.plan_table
pipe.get_results(X, y, scoring=['roc_auc'])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting steps for our pipelines and parameters for grid search
Step2: Setting a cross-validation for grid searching of hyperparameters and for evaluation of models with obtained hyperparameters.
Step3: Creating a plan of our research
Step4: To tune parameters of models and evaluate this models, run
|
403 | <ASSISTANT_TASK:>
Python Code:
import george
george.__version__
import numpy as np
import matplotlib.pyplot as plt
def objective(theta):
return -0.5 * np.exp(-0.5*(theta - 2)**2) - 0.5 * np.exp(-0.5 * (theta + 2.1)**2 / 5) + 0.3
t = np.linspace(-5, 5, 5000)
plt.figure(figsize=(8, 5))
plt.plot(t, objective(t))
plt.ylim(-0.37, 0.37)
plt.xlim(-5, 5)
plt.xlabel("$\\theta$")
plt.ylabel("objective");
from george import kernels
from scipy.special import erf
from scipy.optimize import minimize
N_init = 4
train_theta = np.linspace(-5, 5, N_init + 1)[1:]
train_theta -= 0.5 * (train_theta[1] - train_theta[0])
train_f = objective(train_theta)
gp = george.GP(np.var(train_f) * kernels.Matern52Kernel(3.0),
fit_mean=True)
gp.compute(train_theta)
def nll(params):
gp.set_parameter_vector(params)
g = gp.grad_log_likelihood(train_f, quiet=True)
return -gp.log_likelihood(train_f, quiet=True), -g
fig, axes = plt.subplots(2, 2, figsize=(8, 6))
j = 0
old_min = None
converged = False
for i in range(1000):
# Update the GP parameters
soln = minimize(nll, gp.get_parameter_vector(), jac=True)
# Compute the acquisition function
mu, var = gp.predict(train_f, t, return_var=True)
std = np.sqrt(var)
f_min = np.min(train_f)
chi = (f_min - mu) / std
Phi = 0.5 * (1.0 + erf(chi / np.sqrt(2)))
phi = np.exp(-0.5 * chi**2) / np.sqrt(2*np.pi*var)
A_ei = (f_min - mu) * Phi + var * phi
A_max = t[np.argmax(A_ei)]
# Add a new point
train_theta = np.append(train_theta, A_max)
train_f = np.append(train_f, objective(train_theta[-1]))
gp.compute(train_theta)
# Estimate the minimum - I'm sure that there's a better way!
i_min = np.argmin(mu)
sl = slice(max(0, i_min - 1), min(len(t), i_min + 2))
ts = t[sl]
D = np.vander(np.arange(len(ts)).astype(float))
w = np.linalg.solve(D, mu[sl])
minimum = ts[0] + (ts[1] - ts[0]) * np.roots(np.polyder(w[::-1]))
# Check convergence
if i > 0 and np.abs((old_min - minimum) / minimum) < 1e-5:
converged = True
old_min = float(minimum[0])
# Make the plots
if converged or i in [0, 1, 2]:
ax = axes.flat[j]
j += 1
ax.plot(t, objective(t))
ax.plot(t, mu, "k")
ax.plot(train_theta[:-1], train_f[:-1], "or")
ax.plot(train_theta[-1], train_f[-1], "og")
ax.fill_between(t, mu+std, mu-std, color="k", alpha=0.1)
if i <= 3:
ax2 = ax.twinx()
ax2.plot(t, A_ei, "g", lw=0.75)
ax2.set_yticks([])
ax.axvline(old_min, color="k", lw=0.75)
ax.set_ylim(-0.37, 0.37)
ax.set_xlim(-5, 5)
ax.set_yticklabels([])
ax.annotate("step {0}; {1:.3f}".format(i+1, old_min), xy=(0, 1),
xycoords="axes fraction", ha="left", va="top",
xytext=(5, -5), textcoords="offset points",
fontsize=14)
if converged:
break
plt.tight_layout()
print("{0} model evaluations".format(len(train_f)))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this tutorial, we'll show a very simple example of implementing "Bayesian optimization" using george.
Step2: Now, for the "Bayesian" optimization, the basic procedure that we'll follow is
|
404 | <ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# We'll also import a few standard python libraries
from matplotlib import pyplot
import numpy as np
import time
# These are the droids you are looking for.
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
# Let's show all plots inline.
%matplotlib inline
print("Current blobs in the workspace: {}".format(workspace.Blobs()))
print("Workspace has blob 'X'? {}".format(workspace.HasBlob("X")))
X = np.random.randn(2, 3).astype(np.float32)
print("Generated X from numpy:\n{}".format(X))
workspace.FeedBlob("X", X)
print("Current blobs in the workspace: {}".format(workspace.Blobs()))
print("Workspace has blob 'X'? {}".format(workspace.HasBlob("X")))
print("Fetched X:\n{}".format(workspace.FetchBlob("X")))
np.testing.assert_array_equal(X, workspace.FetchBlob("X"))
try:
workspace.FetchBlob("invincible_pink_unicorn")
except RuntimeError as err:
print(err)
print("Current workspace: {}".format(workspace.CurrentWorkspace()))
print("Current blobs in the workspace: {}".format(workspace.Blobs()))
# Switch the workspace. The second argument "True" means creating
# the workspace if it is missing.
workspace.SwitchWorkspace("gutentag", True)
# Let's print the current workspace. Note that there is nothing in the
# workspace yet.
print("Current workspace: {}".format(workspace.CurrentWorkspace()))
print("Current blobs in the workspace: {}".format(workspace.Blobs()))
workspace.SwitchWorkspace("default")
print("Current workspace: {}".format(workspace.CurrentWorkspace()))
print("Current blobs in the workspace: {}".format(workspace.Blobs()))
workspace.ResetWorkspace()
print("Current blobs in the workspace after reset: {}".format(workspace.Blobs()))
# Create an operator.
op = core.CreateOperator(
"Relu", # The type of operator that we want to run
["X"], # A list of input blobs by their names
["Y"], # A list of output blobs by their names
)
# and we are done!
print("Type of the created op is: {}".format(type(op)))
print("Content:\n")
print(str(op))
workspace.FeedBlob("X", np.random.randn(2, 3).astype(np.float32))
workspace.RunOperatorOnce(op)
print("Current blobs in the workspace: {}\n".format(workspace.Blobs()))
print("X:\n{}\n".format(workspace.FetchBlob("X")))
print("Y:\n{}\n".format(workspace.FetchBlob("Y")))
print("Expected:\n{}\n".format(np.maximum(workspace.FetchBlob("X"), 0)))
op = core.CreateOperator(
"GaussianFill",
[], # GaussianFill does not need any parameters.
["Z"],
shape=[100, 100], # shape argument as a list of ints.
mean=1.0, # mean as a single float
std=1.0, # std as a single float
)
print("Content of op:\n")
print(str(op))
workspace.RunOperatorOnce(op)
temp = workspace.FetchBlob("Z")
pyplot.hist(temp.flatten(), bins=50)
pyplot.title("Distribution of Z")
net = core.Net("my_first_net")
print("Current network proto:\n\n{}".format(net.Proto()))
X = net.GaussianFill([], ["X"], mean=0.0, std=1.0, shape=[2, 3], run_once=0)
print("New network proto:\n\n{}".format(net.Proto()))
print("Type of X is: {}".format(type(X)))
print("The blob name is: {}".format(str(X)))
W = net.GaussianFill([], ["W"], mean=0.0, std=1.0, shape=[5, 3], run_once=0)
b = net.ConstantFill([], ["b"], shape=[5,], value=1.0, run_once=0)
Y = X.FC([W, b], ["Y"])
print("Current network proto:\n\n{}".format(net.Proto()))
from caffe2.python import net_drawer
from IPython import display
graph = net_drawer.GetPydotGraph(net, rankdir="LR")
display.Image(graph.create_png(), width=800)
workspace.ResetWorkspace()
print("Current blobs in the workspace: {}".format(workspace.Blobs()))
workspace.RunNetOnce(net)
print("Blobs in the workspace after execution: {}".format(workspace.Blobs()))
# Let's dump the contents of the blobs
for name in workspace.Blobs():
print("{}:\n{}".format(name, workspace.FetchBlob(name)))
workspace.ResetWorkspace()
print("Current blobs in the workspace: {}".format(workspace.Blobs()))
workspace.CreateNet(net)
workspace.RunNet(net.Proto().name)
print("Blobs in the workspace after execution: {}".format(workspace.Blobs()))
for name in workspace.Blobs():
print("{}:\n{}".format(name, workspace.FetchBlob(name)))
# It seems that %timeit magic does not work well with
# C++ extensions so we'll basically do for loops
start = time.time()
for i in range(1000):
workspace.RunNetOnce(net)
end = time.time()
print('Run time per RunNetOnce: {}'.format((end - start) / 1000))
start = time.time()
for i in range(1000):
workspace.RunNet(net.Proto().name)
end = time.time()
print('Run time per RunNet: {}'.format((end - start) / 1000))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You might see a warning saying that caffe2 does not have GPU support. That means you are running a CPU-only build. Don't be alarmed - anything CPU is still runnable without a problem.
Step2: We can feed blobs into the workspace using FeedBlob().
Step3: Now, let's take a look at what blobs are in the workspace.
Step4: Let's verify that the arrays are equal.
Step5: Note that if you try to access a blob that does not exist, an error will be thrown
Step6: One thing that you might not use immediately
Step7: Let's switch back to the default workspace.
Step8: Finally, ResetWorkspace() clears anything that is in the current workspace.
Step9: Operators
Step10: As we mentioned, the created op is actually a protobuf object. Let's show the content.
Step11: Ok, let's run the operator. We first feed the input X to the workspace.
Step12: After execution, let's see if the operator is doing the right thing.
Step13: This is working if your Expected output matches your Y output in this example.
Step14: Let's run it and see if things are as intended.
Step15: If you see a bell shaped curve then it worked!
Step16: Let's create a blob called X, and use GaussianFill to fill it with some random data.
Step17: You might have observed a few differences from the earlier core.CreateOperator call. Basically, when using a net, you can directly create an operator and add it to the net at the same time by calling net.SomeOp where SomeOp is a registered type string of an operator. This gets translated to
Step18: Let's continue to create W and b.
Step19: Now, one simple code sugar
Step20: Under the hood, X.FC(...) simply delegates to net.FC by inserting X as the first input of the corresponding operator, so what we did above is equivalent to
Step21: Too verbose huh? Let's try to visualize it as a graph. Caffe2 ships with a very minimal graph visualization tool for this purpose.
Step22: So we have defined a Net, but nothing has been executed yet. Remember that the net above is essentially a protobuf that holds the definition of the network. When we actually run the network, what happens under the hood is
Step23: Now let's try the second way to create the net, and run it. First, clear the variables with ResetWorkspace(). Then create the net with the workspace's net object that we created earlier using CreateNet(net_object). Finally, run the net with RunNet(net_name).
Step24: There are a few differences between RunNetOnce and RunNet, but the main difference is the computational overhead. Since RunNetOnce involves serializing the protobuf to pass between Python and C and instantiating the network, it may take longer to run. Let's run a test and see what the time overhead is.
|
405 | <ASSISTANT_TASK:>
Python Code:
# Make sure the base overlay is loaded
from pynq import Overlay
Overlay("base.bit").download()
from pynq.iop import Arduino_Analog
from pynq.iop import ARDUINO
from pynq.iop import ARDUINO_GROVE_A1
from pynq.iop import ARDUINO_GROVE_A4
analog1 = Arduino_Analog(ARDUINO,ARDUINO_GROVE_A1)
analog1.read()
analog1.read_raw()[0]
from time import sleep
analog1.set_log_interval_ms(100)
analog1.start_log()
log1 = analog1.get_log()
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
line1, = plt.plot(range(len(log1[0])), log1[0],
'ro', label="X-axis of joystick")
line2, = plt.plot(range(len(log1[1])), log1[1],
'bs', label="Y-axis of joystick")
plt.title('Arduino Analog Voltage Log')
plt.axis([0, len(log1[0]), 0.0, 3.3])
plt.legend(loc=4,bbox_to_anchor=(1, -0.3),
ncol=2, borderaxespad=0.,
handler_map={line1: HandlerLine2D(numpoints=1),
line2: HandlerLine2D(numpoints=1)})
plt.show()
analog2 = Arduino_Analog(ARDUINO,[0,1,4])
analog2.set_log_interval_ms(100)
analog2.start_log()
log2 = analog2.get_log()
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
line1, = plt.plot(range(len(log2[0])), log2[0],
'ro', label="X-axis of joystick")
line2, = plt.plot(range(len(log2[1])), log2[1],
'bs', label="Y-axis of joystick")
line3, = plt.plot(range(len(log2[2])), log2[2],
'g^', label="potentiometer")
plt.title('Arduino Analog Voltage Log')
plt.axis([0, len(log2[0]), 0.0, 3.3])
plt.legend(loc=4,bbox_to_anchor=(1, -0.3),
ncol=2, borderaxespad=0.,
handler_map={line1: HandlerLine2D(numpoints=1),
line2: HandlerLine2D(numpoints=1),
line3: HandlerLine2D(numpoints=1)})
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Instantiate individual analog controller
Step2: 2. Read voltage value out
Step3: 3. Read raw value out
Step4: 4. Logging multiple sample values
Step 1
Step5: Step 2
Step6: Step 3. Plot values over time
Step7: 5. Logging multiple devices
Step8: Step 2
Step9: Step 3. Plot values over time
|
406 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.read_csv('./input.gtf', sep='\t', header = None)
df
replace_dict = {0 : 'reference', 1 : 'source', 2 : 'feature', 3 : 'start', 4 : 'end', 5 : 'score', 6 : 'strand', 7 : 'frame', 8 : 'attributes'}
df.rename(columns = replace_dict, inplace = True)
df
df.drop(['source', 'score'], axis=1, inplace = True)
df['reference'].replace('ENm006', 'ENCODE_REGION', inplace = True)
df['reference'] = df['reference'].apply(lambda x: 'ENCODE_REGION')
df['reference'] = 'ENCODE_REGION'
df
import re
df['gene'] = ''
df['transcript'] = ''
for (index, record) in df.iterrows():
transcript_id = re.search('transcript_id\s+(.+?);', record['attributes']).group(1).replace('"', '')
gene_id = re.search('gene_id\s+(.+?);', record['attributes']).group(1).replace('"', '')
df.loc[index, 'transcript'] = transcript_id
df.loc[index, 'gene'] = gene_id
df.drop('attributes', axis=1, inplace=True)
df
df['length'] = df['end'] - df['start'] + 1
df = df.reindex(columns = ['reference', 'feature', 'start', 'end', 'length', 'strand', 'frame', 'gene', 'transcript'])
df
df = df[df['length'] > 6]
df.drop(df[df.length <= 6].index, axis=0, inplace = True)
df
df.sort_values('start', ascending = True)
df.sort_values('length', ascending = True)
list(df['gene'].unique())
set(df['gene'])
list(df['transcript'].unique())
df['length'].mean()
min_length = df[df.feature == 'exon'].length.min()
min_length = df[df['feature'] == 'exon']['length'].min()
mask = (df.feature == 'exon') & (df.length == min_length)
list(df[mask]['transcript'].unique())
len(df[df['gene'] == 'ARHGAP4']['transcript'].unique())
list(df[df['strand'] == '+']['gene'].unique())
df_temp = df[(df['gene'] == 'ATP6AP1') & (df['feature'] == 'exon')]
set(zip(df_temp['start'], df_temp['end']))
len(df[(df.gene == 'ARHGAP4') & (df.feature == 'CDS')]['transcript'].unique())
df[df['gene'] == 'ATP6AP1']['strand'].unique()[0]
df_temp = df[(df.gene == 'ATP6AP1') & (df.feature == 'exon')][['start', 'end', 'transcript']]
transcript_list = list(df_temp['transcript'].unique())
feature_list_for_transcript = []
for transcript in transcript_list:
start_transcript = df_temp[df_temp['transcript'] == transcript]['start']
end_transcript = df_temp[df_temp['transcript'] == transcript]['end']
feature_list = list(zip(start_transcript, end_transcript))
feature_list_for_transcript.append(feature_list)
for transcript in transcript_list:
print(transcript)
print(feature_list_for_transcript.pop(0))
df.groupby(['gene', 'feature'])['transcript'].count()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2) Leggere il file GTF
Step2: NB
Step3: 4) Eliminare le colonne source e score e sostituire l'identificatore ENm006 con l'identificatore ENCODE_REGION in tutti i campi della colonna reference
Step4: Tre alternative per sostituire ENm006 con ENCODE_REGION
Step5: 5) Sostituire la colonna degli attributi con le due colonne transcript e gene
Step6: 6) Aggiungere la colonna length contenente la lunghezza della feature
Step7: Reindicizzazione delle colonne
Step8: 7) Rimuovere tutte le features di lunghezza minore o uguale a 6 basi
Step9: Alternativa con il metodo drop()
Step10: 8) Ottenere un data frame ordinato per coordinate crescenti delle features
Step11: 9) Ottenere un data frame ordinato per lunghezza crescente delle features
Step12: 10) Determinare la lista dei geni annotati
Step13: In alternativa si può anche determinare il set
Step14: 11) Determinare la lista degli identificatori dei trascritti annotati
Step15: 12) Calcolare la lunghezza media delle features
Step16: 13) Determinare la lunghezza minima degli esoni e i trascritti che contengono un esone di lunghezza minima
Step17: In alternativa si può usare la notazione con il punto
Step18: b) Estrarre la lista dei trascritti che contengono un esone di lunghezza minima
Step19: 14) Contare quanti trascritti sono annotati per il gene ARHGAP4
Step20: 15) Estrarre la lista dei geni con strand +
Step21: 16) Estrarre il set degli esoni (distinti) del gene ATP6AP1
Step22: 17) Contare il numero di trascritti del gene ARHGAP4 che hanno una CDS annotata
Step23: 18) Estrarre lo strand del gene ATP6AP1
Step24: 19) Estrarre per ogni trascritto del gene ATP6AP1 la lista delle tuple (start, end) dei suoi esoni
Step25: b) Estrarre la lista dei trascritti
Step26: c) Estrarre per ogni trascritto la lista delle sue features (start, end)
Step27: d) Produrre in output per ogni trascritto la lista delle sue features (start, end)
Step28: 20) Contare per ogni gene quante sono le features annotate per ognuno dei tipi presenti nel GTF
|
407 | <ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
import matplotlib.pylab as plt
import numpy as np
from distutils.version import StrictVersion
import sklearn
print(sklearn.__version__)
assert StrictVersion(sklearn.__version__ ) >= StrictVersion('0.18.1')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
assert StrictVersion(tf.__version__) >= StrictVersion('1.1.0')
import keras
print(keras.__version__)
assert StrictVersion(keras.__version__) >= StrictVersion('2.0.0')
%load https://djcordhose.github.io/ai/fragments/neuron.py
from sklearn.datasets import load_iris
iris = load_iris()
iris.data[0]
neuron_no_activation(5.1, 3.5)
def centerAxis(uses_negative=False):
# http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
ax = plt.gca()
ax.spines['left'].set_position('center')
if uses_negative:
ax.spines['bottom'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def np_sigmoid(X):
return 1 / (1 + np.exp(X * -1))
x = np.arange(-10,10,0.01)
y = np_sigmoid(x)
centerAxis()
plt.plot(x,y,lw=3)
def np_relu(x):
return np.maximum(0, x)
x = np.arange(-10, 10, 0.01)
y = np_relu(x)
centerAxis()
plt.plot(x,y,lw=3)
w0 = 3
w1 = -4
w2 = 2
import math as math
def sigmoid(x):
return 1 / (1 + math.exp(x * -1))
def neuron(x1, x2):
sum = w0 + x1 * w1 + x2 * w2
return sigmoid(sum)
neuron(5.1, 3.5)
from keras.layers import Input
inputs = Input(shape=(4, ))
from keras.layers import Dense
fc = Dense(3)(inputs)
from keras.models import Model
model = Model(input=inputs, output=fc)
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.predict(np.array([[ 5.1, 3.5, 1.4, 0.2]]))
inputs = Input(shape=(4, ))
fc = Dense(3)(inputs)
predictions = Dense(3, activation='softmax')(fc)
model = Model(input=inputs, output=predictions)
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.predict(np.array([[ 5.1, 3.5, 1.4, 0.2]]))
X = np.array(iris.data)
y = np.array(iris.target)
X.shape, y.shape
y[100]
# tiny little pieces of feature engeneering
from keras.utils.np_utils import to_categorical
num_categories = 3
y = to_categorical(y, num_categories)
y[100]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42, stratify=y)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# !rm -r tf_log
# tb_callback = keras.callbacks.TensorBoard(log_dir='./tf_log')
# https://keras.io/callbacks/#tensorboard
# To start tensorboard
# tensorboard --logdir=/mnt/c/Users/olive/Development/ml/tf_log
# open http://localhost:6006
# %time model.fit(X_train, y_train, epochs=500, validation_split=0.3, callbacks=[tb_callback])
%time model.fit(X_train, y_train, epochs=500, validation_split=0.3)
model.predict(np.array([[ 5.1, 3.5, 1.4, 0.2]]))
X[0], y[0]
train_loss, train_accuracy = model.evaluate(X_train, y_train)
train_loss, train_accuracy
test_loss, test_accuracy = model.evaluate(X_test, y_test)
test_loss, test_accuracy
model.save('nn-iris.hdf5')
import os
from keras import backend as K
K.set_learning_phase(0)
sess = K.get_session()
!rm -r tf
tf.app.flags.DEFINE_integer('model_version', 1, 'version number of the model.')
tf.app.flags.DEFINE_string('work_dir', '/tmp', 'Working directory.')
FLAGS = tf.app.flags.FLAGS
export_path_base = 'tf'
export_path = os.path.join(
tf.compat.as_bytes(export_path_base),
tf.compat.as_bytes(str(FLAGS.model_version)))
classification_inputs = tf.saved_model.utils.build_tensor_info(model.input)
classification_outputs_scores = tf.saved_model.utils.build_tensor_info(model.output)
from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def
signature = predict_signature_def(inputs={'inputs': model.input},
outputs={'scores': model.output})
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
builder.add_meta_graph_and_variables(
sess,
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature
})
builder.save()
!ls -lhR tf
# cd tf
# gsutil cp -R 1 gs://irisnn
# create model and version at https://console.cloud.google.com/mlengine
# gcloud ml-engine predict --model=irisnn --json-instances=./sample_iris.json
# SCORES
# [0.9954029321670532, 0.004596732556819916, 3.3544753819114703e-07]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Iris mit Neuronalen Netzwerken
Step2: Wir probieren unser Modell mit dem Iris Dataset
Step3: Wie sollen wir das interpretieren? Damit können wir nicht viel anfangen
Step4: Sigmoid
Step5: Relu
Step6: Das komplette Neuron
Step7: Unser erste Neuronales Netz mit Keras
Step8:
Step9: Training
Step10: Bewertung
Step11: Hands-On
Step12: Export as raw tf model
Step13: Dieses Tensorflow Modell kann man bei Google Cloud ML hochladen und für Berechnungen nutzen
|
408 | <ASSISTANT_TASK:>
Python Code:
# Učitaj osnovne biblioteke...
import sklearn
import mlutils
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
%pylab inline
from collections import Counter
class VotingClassifierDIY(object):
SCHEME_COUNTING = "counting"
SCHEME_AVERAGING = "averaging"
def __init__(self, clfs, voting_scheme=SCHEME_COUNTING):
self.clfs = clfs
self.voting_scheme = voting_scheme
def fit(self, X, y):
[ clf.fit(X,y) for clf in self.clfs ]
def predict_proba(self, X):
if self.voting_scheme == self.SCHEME_AVERAGING:
pred_avg = []; pred = []; prob = [];
for i in range(0, len(self.clfs)):
pred.append(self.clfs[i].predict_proba(X))
for i in range(0, shape(pred)[1]):
prob = []
for j in range(0, len(pred)):
prob.append(pred[j][i])
pred_avg.append(mean(prob, axis = 0))
return pred_avg
else:
raise Exception("Nemoze")
def predict(self, X):
if self.voting_scheme == self.SCHEME_AVERAGING:
avg = self.predict_proba(X)
return [ argmax(a) for a in avg ]
elif self.voting_scheme == self.SCHEME_COUNTING:
pred_cnt = []; pred = []; predd = [];
predd = [ clf.predict(X) for clf in self.clfs ]
for i in range(0, shape(predd)[1]): # 0 - 1000
for j in range(0, len(predd)): # 0 - 3
pred.append(predd[j][i])
for i in range(0, len(pred), 3):
pred_cnt.append((Counter(pred[i:i+3]).most_common()[0])[0])
return pred_cnt
else:
raise Exception("Nemoze")
from sklearn.datasets import make_classification
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
X_voting, y_voting = make_classification(n_samples=1000, n_features=4, n_redundant=0, n_informative=3, n_classes=3, n_clusters_per_class=2)
Logisticka1 = LogisticRegression(multi_class = 'auto', solver = 'lbfgs', C = 1, max_iter = 1000).fit(X_voting, y_voting)
Logisticka2 = LogisticRegression(multi_class = 'auto', solver = 'lbfgs', C = 100, max_iter = 1000).fit(X_voting, y_voting)
Logisticka3 = LogisticRegression(multi_class = 'auto', solver = 'lbfgs', C = 1000, max_iter = 1000).fit(X_voting, y_voting)
clfs = [Logisticka1, Logisticka2, Logisticka3]
moj = VotingClassifierDIY(clfs = clfs)
njihov = VotingClassifier(estimators= [('lr1', Logisticka1), ('lr2', Logisticka2), ('lr3', Logisticka3)])
a = moj.fit(X_voting, y_voting)
a = njihov.fit(X_voting, y_voting)
moj.voting_scheme = moj.SCHEME_COUNTING
njihov.voting = 'hard'
print(all(moj.predict(X_voting) == njihov.predict(X_voting)))
moj.voting_scheme = moj.SCHEME_AVERAGING
njihov.voting = 'soft'
print(all(moj.predict_proba(X_voting) == njihov.predict_proba(X_voting)))
from sklearn.model_selection import train_test_split
X_bag, y_bag = make_classification(n_samples=1000, n_features=20, n_redundant=1, n_informative=17, n_classes=3, n_clusters_per_class=2)
X_bag_train, X_bag_test, y_bag_train, y_bag_test = train_test_split(X_bag, y_bag, train_size=0.7, test_size = 0.3, random_state=69)
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
tree1 = DecisionTreeClassifier(max_depth = None).fit(X_bag_train, y_bag_train)
h1 = tree1.predict(X_bag_test)
print(zero_one_loss(y_bag_test, h1))
tree2 = DecisionTreeClassifier(max_depth= 10000000000).fit(X_bag_train, y_bag_train)
h2 = tree2.predict(X_bag_test)
print(zero_one_loss(y_bag_test, h2))
from sklearn.ensemble import RandomForestClassifier
L = list(range(0, 31))
error_train = []
error_test = []
for l in L:
rfc = RandomForestClassifier(n_estimators=l+1).fit(X_bag_train, y_bag_train)
h_train = rfc.predict(X_bag_train)
h_test = rfc.predict(X_bag_test)
error_train.append(zero_one_loss(y_bag_train, h_train))
error_test.append(zero_one_loss(y_bag_test, h_test))
minimal_error = min(error_test)
index_of_min = error_test.index(minimal_error)
print(minimal_error)
print(index_of_min)
figure(figsize(10, 5))
plot(L, error_train)
plot(L, error_test)
scatter(L[index_of_min], minimal_error)
grid()
from sklearn.datasets import make_circles
circ_X, circ_y = make_circles(n_samples=400, noise=0.1, factor=0.4)
mlutils.plot_2d_clf_problem(circ_X, circ_y)
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
L = [1, 2, 3, 50]
figure(figsize(12, 8))
for i, l in enumerate(L):
ada = AdaBoostClassifier(base_estimator = DecisionTreeClassifier(max_depth = 1), n_estimators = l).fit(circ_X, circ_y)
subplot(3,3, i +1)
mlutils.plot_2d_clf_problem(circ_X, circ_y, ada.predict)
X_boost, y_boost = make_classification(n_samples=1000, n_features=20, n_redundant=0, n_informative=18, n_classes=3, n_clusters_per_class=1)
X_boost_train, X_boost_test, y_boost_train, y_boost_test = train_test_split(X_boost, y_boost, train_size=0.7, random_state=69)
L = list(range(0, 81))
error_train = []
error_test = []
for l in L:
rfc = AdaBoostClassifier(base_estimator = DecisionTreeClassifier(max_depth = 1), n_estimators=l+1).fit(X_bag_train, y_bag_train)
h_train = rfc.predict(X_bag_train)
h_test = rfc.predict(X_bag_test)
error_train.append(zero_one_loss(y_bag_train, h_train))
error_test.append(zero_one_loss(y_bag_test, h_test))
minimal_error = min(error_test)
index_of_min = error_test.index(minimal_error)
print(minimal_error)
print(index_of_min)
figure(figsize(10, 5))
plot(L, error_train)
plot(L, error_test)
scatter(L[index_of_min], minimal_error)
grid()
D = list(range(1, 21))
error_train = []
error_test = []
for d in D:
rfc = DecisionTreeClassifier(max_depth = d).fit(X_bag_train, y_bag_train)
h_train = rfc.predict(X_bag_train)
h_test = rfc.predict(X_bag_test)
error_train.append(zero_one_loss(y_bag_train, h_train))
error_test.append(zero_one_loss(y_bag_test, h_test))
minimal_error = min(error_test)
index_of_min = error_test.index(minimal_error)
print(minimal_error)
print(index_of_min)
figure(figsize(10, 5))
plot(D, error_train)
plot(D, error_test)
scatter(D[index_of_min], minimal_error)
grid()
def L(u, N, m):
return (u**m) * (1-u)**(N-m)
m1 = [1,2,5,9]
N1 = 10
m2 = [1, 10, 50, 90]
N2 = 100
u = linspace(0, 1, 300)
figure(figsize(10,8))
subplot(2,1,1)
grid()
for m in m1:
plot(u, L(u, N1, m))
subplot(2,1,2)
grid()
for m in m2:
plot(u, L(u, N2, m))
N = 10
figure(figsize(10, 8))
plot(u, L(u, N, 0))
plot(u, L(u, N, 9))
grid()
from scipy.stats import beta
alphas = [ 1, 2, 3, 0.5, 1, 2, 3, 0.5 ]
betas = [ 1, 2, 3, 0.5, 0.5, 3, 2, 1 ]
for a, b in zip(alphas, betas):
plot(u, beta.pdf(u, a, b))
grid()
axis([0, 1, 0, 3])
def P(u, N, m, a, b):
return L(u, N, m) * beta.pdf(u, a, b)
N = 10
m = 9
for a, b in zip(alphas, betas):
plot(u, P(u, N, m, a, b))
grid()
N = 10
m = 1
a = 4
b = 2
plot(u, beta.pdf(u, a, b))
plot(u, L(u, N, m))
plot(u, P(u, N, m, a, b))
grid()
from sklearn.datasets import load_iris
import itertools as it
iris = load_iris()
y = iris.target == 1
X = iris.data[y]
x = it.combinations(range(1, 5), 2)
figure(figsize(12, 28))
for i, (a, b) in enumerate(x):
subplot(6, 1, i+1)
grid()
scatter(X[:, a-1], X[:, b-1])
from scipy.stats import norm
def log_izglednost(D, mi_ML, sigm2_ML):
N = len(D)
return -1*((N/2)*log(2*pi) + N*log(sqrt(sigm2_ML)) + (1/(2*sigm2_ML))*sum((D - mi_ML)**2))
def mi_ML(D):
return sum(D)/len(D)
def sigm2_ML(D):
return sum((D - mi_ML(D))**2)/len(D)
for i in range(4):
D = X[:,i]
print('n=', i+1, ', mi_ML = ', str(mi_ML(D))[:4], ', sigm2_ML = ', str(sigm2_ML(D))[:7], ', izglednost = ', str(log_izglednost(D, mi_ML(D), sigm2_ML(D)))[:7] )
from scipy.stats import pearsonr
x = it.combinations(range(0,4), 2)
for i,j in x:
pear = pearsonr(X[:,i], X[:,j])[0]
print('from ', i, ' ', j, ' pear: ', pear)
N = [int(len(X)/4), int(len(X)/2), len(X)]
for br in N:
nepristran = cov(X[0:br,], rowvar = False, bias= False)
pristran = cov(X[0:br,], rowvar = False, bias = True)
diff_abs = mean(abs(nepristran - pristran))
diff_sqr = mean(pow((nepristran - pristran),2))
print('abs diff ', str(diff_abs)[:9], ' square diff ', diff_sqr, ' N ', br)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Ansambli (glasovanje)
Step2: (b)
Step3: Q
Step4: Razred koji implementira stablo odluke jest tree.DecisionTreeClassifier. Prvo naučite stablo odluke (engl. decision tree) na skupu za učenje, ali tako da je taj model presložen. To možete postići tako da povećate najveću moguću dubinu stabla (parametar max_depth). Ispišite pogrešku na skupu za ispitivanje (pogrešku 0-1; pogledajte paket metrics).
Step5: Sada isprobajte algoritam slučajnih šuma (dostupan u razredu ensemble.RandomForestClassifier) za različit broj stabala $L \in [1, 30]$. Iscrtajte pogrešku na skupu za učenje i na skupu za ispitivanje u ovisnosti o tom hiperparametru. Ispišite najmanju pogrešku na skupu za ispitivanje.
Step6: Q
Step7: (a)
Step8: Q
Step9: Iscrtajte krivulje pogrešaka na skupu za učenje i ispitivanje u ovisnosti o hiperparametru $L \in [1,80]$. Koristite pogrešku 0-1 iz paketa metrics. Ispišite najmanju ostvarenu pogrešku na skupu za ispitivanje, te pripadajuću vrijednost hiperparametra $L$.
Step10: Q
Step11: Q
Step12: (b)
Step13: Q
Step14: Q
Step15: Q
Step16: Q
Step17: (g)
Step18: (h)
Step19: (i)
Step20: Q
Step21: (k)
|
409 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy as sp
from scipy import linalg as la
import scipy.sparse.linalg as spla
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
n = 10
np.random.seed(0)
A = np.random.rand(n,n)+2*np.eye(n)
#print(np.linalg.eigvals(A))
B = np.random.rand(n,n)
#print(np.linalg.eigvals(B))
C = np.random.rand(n,n)
def solve_JGS_iterative_Sylvester(A,B,C,m,alg=1):
if alg==1:
# Algorithm 1
# AX+XB=C
# X=A^{-1}(C-XB)
# X^{(i+1)}=A^{-1}(C-X^{(i)} B)
X0 = np.zeros_like(A)
X1 = np.zeros_like(A)
for i in range(m):
X1=np.linalg.solve(A,C-np.dot(X0,B))
X0=X1
print(np.linalg.norm(np.dot(A,X1)+np.dot(X1,B)-C))
return X1
# elif algo==2: # TO DO 1!!!!!!!!!!!!!
# Algorithm 2
# AX+XB=C
# X = (C-AX)B^{-1}
# X^{(i+1)}=(C-A X^{(i)})B^{-1}
# How do we implement this? Hint: You only need to use np.linalg.solve in a convenient way.
X_JGS=solve_JGS_iterative_Sylvester(A,B,C,10)
print(X_JGS)
def compute_matrix_vector_product(x,A,B,n):
X = np.reshape(x,(n,n))
out = np.dot(A,X)+np.dot(X,B)
return out.flatten()
Ax = lambda x: compute_matrix_vector_product(x,A,B,n)
afun = spla.LinearOperator((n**2, n**2), matvec=Ax)
x, exitCode = spla.gmres(afun, C.flatten(), tol=1e-10)
X_GMRes = np.reshape(x,(n,n))
print(X_GMRes)
print(np.linalg.norm(X_JGS-X_GMRes))
Ax_JGS = Ax(X_JGS.flatten())
Ax_GMRes = Ax(X_GMRes.flatten())
c = C.flatten()
print(np.linalg.norm(Ax_JGS-c)/np.linalg.norm(c))
print(np.linalg.norm(Ax_GMRes-c)/np.linalg.norm(c))
# This is a very instructive implementation of GMRes.
def GMRes_Ax(A, b, x0=np.array([0.0]), m=10, flag_display=True, threshold=1e-12):
n = len(b)
if len(x0)==1:
x0=np.zeros(n)
r0 = b - np.dot(A, x0)
nr0=np.linalg.norm(r0)
out_res=np.array(nr0)
Q = np.zeros((n,n))
H = np.zeros((n,n))
Q[:,0] = r0 / nr0
flag_break=False
for k in np.arange(np.min((m,n))):
y = np.dot(A, Q[:,k])
if flag_display:
print('||y||=',np.linalg.norm(y))
for j in np.arange(k+1):
H[j][k] = np.dot(Q[:,j], y)
if flag_display:
print('H[',j,'][',k,']=',H[j][k])
y = y - np.dot(H[j][k],Q[:,j])
if flag_display:
print('||y||=',np.linalg.norm(y))
# All but the last equation are treated equally. Why?
if k+1<n:
H[k+1][k] = np.linalg.norm(y)
if flag_display:
print('H[',k+1,'][',k,']=',H[k+1][k])
if (np.abs(H[k+1][k]) > 1e-16):
Q[:,k+1] = y/H[k+1][k]
else:
print('flag_break has been activated')
flag_break=True
# Do you remember e_1? The canonical vector.
e1 = np.zeros((k+1)+1)
e1[0]=1
H_tilde=H[0:(k+1)+1,0:k+1]
else:
H_tilde=H[0:k+1,0:k+1]
# Solving the 'SMALL' least square problem.
# This could be improved with Givens rotations!
ck = np.linalg.lstsq(H_tilde, nr0*e1)[0]
if k+1<n:
x = x0 + np.dot(Q[:,0:(k+1)], ck)
else:
x = x0 + np.dot(Q, ck)
# Why is 'norm_small' equal to 'norm_full'?
norm_small=np.linalg.norm(np.dot(H_tilde,ck)-nr0*e1)
out_res = np.append(out_res,norm_small)
if flag_display:
norm_full=np.linalg.norm(b-np.dot(A,x))
print('..........||b-A\,x_k||=',norm_full)
print('..........||H_k\,c_k-nr0*e1||',norm_small);
if flag_break:
if flag_display:
print('EXIT: flag_break=True')
break
if norm_small<threshold:
if flag_display:
print('EXIT: norm_small<threshold')
break
return x,out_res
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <div id='intro' />
Step2: Using a Jacobi/Gauss-Seidel iterative solver
Step3: Using GMRes of SciPy
Step4: Computing the relative residues
Step5: To Do 2
|
410 | <ASSISTANT_TASK:>
Python Code:
def first_order(t,A,k):
First-order kinetics model.
return A*(1 - np.exp(-k*t))
def first_order_r(param,t,obs):
Residuals function for first-order model.
return first_order(t,param[0],param[1]) - obs
def fit_model(t,obs,param_guesses=(1,1)):
Fit the first-order model.
fit = scipy.optimize.least_squares(first_order_r,
param_guesses,
args=(t,obs))
fit_A = fit.x[0]
fit_k = fit.x[1]
return fit_A, fit_k
d = pd.read_csv("data/time_course_0.csv")
A, k = fit_model(d.t,d.obs)
plt.plot(d.t,d.obs,'o')
plt.plot(d.t,first_order(d.t,A,k))
d = pd.read_csv("data/time_course_0.csv")
datasets = []
for i in range(1000):
datasets.append(d.obs + np.random.normal(0,0.05,len(d.t)))
A_list = []
k_list = []
for dataset in datasets:
A, k = fit_model(d.t,dataset)
A_list.append(A)
k_list.append(k)
plt.hist(A_list)
plt.show()
plt.hist(k_list)
plt.show()
k_list.sort() # Sort from low to high
lower = k_list[25] # Lower 25
upper = k_list[1000-25] # Upper 25
plt.hist(k_list,bins=np.arange(0.15,0.35,0.005))
plt.show()
print(lower,np.mean(k_list),upper)
d = pd.read_csv("data/time_course_1.csv")
A1_list = []
k1_list = []
for i in range(1000):
A, k = fit_model(d.t,d.obs + np.random.normal(0,0.05,len(d.t)))
A1_list.append(A)
k1_list.append(k)
plt.hist(k_list,bins=np.arange(0.15,0.35,0.005))
plt.hist(k1_list,bins=np.arange(0.25,0.45,0.005))
plt.show()
k1_list.sort()
lower1 = k1_list[25]
upper1 = k1_list[1000-250]
print(lower,np.mean(k_list),upper)
print(lower1,np.mean(k1_list),upper1)
# 95% confidence intervals do not overlap. Can be distinguished.
### CELL FOR CREATING DATA
def create_data(t,k,A,out_file="junk.csv",noise=None):
write_noise = False
if noise == None:
noise = 0
elif type(noise) == float:
noise = np.random.normal(0,noise,len(t))
else:
write_noise = True
nosie = noise
obs = first_order(t,A,k) + noise
plt.plot(t,obs,"o")
if not write_noise:
d = pd.DataFrame({"t":t,"obs":obs})
else:
d = pd.DataFrame({"t":t,"obs":obs,"obs_err":np.abs(noise)})
d.to_csv(out_file)
#t = np.arange(0,10,0.25)
#create_data(t,0.25,1.0,"data/time_course_0.csv",0.05)
#create_data(t,0.35,1.0,"data/time_course_1.csv",0.05)
#create_data(t,0.25,1.0,"data/time_course_2.csv",np.random.normal(0,0.05,len(t)))
#create_data(t,0.25,1.0,"data/time_course_3.csv",np.random.normal(0,0.05,len(t)))
#create_data(t,0.25,1.0,"data/time_course_4.csv",np.random.normal(0,0.05,len(t)))
# these values
ssr_list = [0.05,0.001,0.00095]
num_parameters = [2,3,10]
num_obs = 10
# should give these weights
weights = [8.69e-9,0.9988,1.18e-3]
def calc_aic(ssr_list,k_list,num_obs):
aic_list = []
for i in range(len(ssr_list)):
aic_list.append(num_obs*np.log(ssr_list[i]) + 2*(k_list[i] + 1))
aic_list = np.array(aic_list)
delta_list = aic_list - np.min(aic_list)
Q = np.exp(-delta_list/2)
return Q/np.sum(Q)
weights = calc_aic(ssr_list,num_parameters,num_obs)
print(weights)
def residuals(params,r,c,f):
General residuals function.
return f(r,*params) - c
def sed_eq_m(r,c0,M):
return c0*np.exp(M*(r**2)/2)
def sed_eq_md(r,c0,M,theta):
return c0*((theta)*np.exp(M*(r**2)/2) + (1 - theta)*np.exp(2*M*(r**2)/2))
## CREATE DATA CELL
#r = np.linspace(0,1,100)
#c0 = 1.0
#M = 2
#theta = 0.95
#md = pd.DataFrame({"r":r,"c":sed_eq_md(r,c0,M,theta)+ np.random.normal(0,0.025,len(r))})
#md.to_csv("dev/md.csv")
d = pd.read_csv("data/sed_eq.csv")
plt.plot(d.r, d.c,"o")
fit_m = scipy.optimize.least_squares(residuals,
(1,2),
kwargs={"r":d.r,
"c":d.c,
"f":sed_eq_m})
print("monomer:",fit_m.cost,fit_m.x,len(fit_m.fun))
plt.plot(d.r,sed_eq_m(d.r,fit_m.x[0],fit_m.x[1]),color="red")
fit_md = scipy.optimize.least_squares(residuals,
(1,2.0,0.95),
kwargs={"r":d.r,
"c":d.c,
"f":sed_eq_md})
print("monomer/dimer",fit_md.cost,fit_md.x,len(fit_md.fun))
plt.plot(d.r,sed_eq_md(d.r,fit_md.x[0],fit_md.x[1],fit_md.x[2]),color="blue")
calc_aic([0.0211731680092,0.0205784296649],[2,3],100)
d = pd.read_csv("data/gaussian.csv")
plt.plot(d.x,d.y)
def multi_gaussian(x,means,stds,areas):
Function calculating multiple gaussians (built from
values in means, stds, areas). The number of gaussians
is determined by the length of means, stds, and areas.
The gaussian functions are calculated at values in
array x.
if len(means) != len(stds) or len(means) != len(areas):
err = "means, standard deviations and areas should have the same length!\n"
raise ValueError(err)
out = np.zeros(len(x),dtype=float)
for i in range(len(means)):
out += areas[i]*scipy.stats.norm(means[i],stds[i]).pdf(x)
return out
def multi_gaussian_r(params,x,y):
Residuals function for multi_guassian.
params = np.array(params)
if params.shape[0] % 3 != 0:
err = "num parameters must be divisible by 3\n"
raise ValueError(err)
means = params[np.arange(0,len(params),3)]
stds = params[np.arange(1,len(params),3)]
areas = params[np.arange(2,len(params),3)]
return multi_gaussian(x,means,stds,areas) - y
def fitter(x,y,means_guess,stds_guess,areas_guess):
Fit an arbitrary number of gaussian functions to x/y data.
The number of gaussians that will be fit is determined by
the length of means_guess.
x: measurement x-values (array)
y: measurement y-values (array)
means_guess: array of guesses for means for gaussians.
length determines number of gaussians
stds_guess: array of guesses of standard deviations for
gaussians. length must match means_guess
areas_guess: array of area guesses for gaussians.
length must match means guess.
returns: means, stds, areas and fit sum-of-squared-residuals
# Sanity check
if len(means_guess) != len(stds_guess) or len(means_guess) != len(areas_guess):
err = "means, standard deviations and areas should have the same length!\n"
raise ValueError(err)
# Construct an array of parameter guesses by assembling
# means, stds, and areas
param_guesses = []
for i in range(len(means_guess)):
param_guesses.append(means_guess[i])
param_guesses.append(stds_guess[i])
param_guesses.append(areas_guess[i])
param_guesses = np.array(param_guesses)
# Fit the multigaussian function
fit = scipy.optimize.least_squares(multi_gaussian_r,param_guesses,
args=(x,y))
# Disassemble into means, stds, areas
means = fit.x[np.arange(0,len(fit.x),3)]
stds = fit.x[np.arange(1,len(fit.x),3)]
areas = fit.x[np.arange(2,len(fit.x),3)]
return means, stds, areas, fit.cost
def plot_gaussians(means,stds,areas):
Plot a collection of gaussians.
means: array of means for gaussians.
length determines number of gaussians
stds: array of standard deviations for gaussians.
length must match means_guess
areas: array of areas for gaussians.
length must match means guess.
plt.plot(d.x,multi_gaussian(d.x,means,stds,areas))
for i in range(len(means)):
plt.plot(d.x,multi_gaussian(d.x,
[means[i]],
[stds[i]],
[areas[i]]))
## CREATE DATA
#x = np.arange(-10,10,0.1)
#means = np.array((-2,0,1.5))
#stds = np.array((0.6,.6,1.5))
#areas = np.array((1,1,1))
#d = pd.DataFrame({"x":np.arange(-10,10,0.1),
# "y":multi_gaussian(x,means,stds,areas)+np.random.normal(0,0.01,len(x))})
#d.to_csv("dev/gaussian.csv")
d = pd.read_csv("data/gaussian.csv")
ssr_list = []
num_params = []
for i in range(1,6):
means_guess = np.random.normal(0.1,1,i) #np.ones(i,dtype=float)
stds_guess = np.ones(i,dtype=float)
areas_guess = np.ones(i,dtype=float)
fit_means, fit_stds, fit_areas, ssr = fitter(d.x,d.y,means_guess,stds_guess,areas_guess)
plt.plot(d.x,d.y,"o")
plot_gaussians(fit_means,fit_stds,fit_areas)
plt.show()
ssr_list.append(ssr)
num_params.append((i+1)*3)
len(d.x)
print(calc_aic(ssr_list,num_params,len(d.x)))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Parameter uncertainty
Step4: Generate 1,000 simulated data sets where each experimental point is drawn from a normal distribution with a mean of d.obs and a standard deviation of 0.05.
Step5: Generate a histogram of possible values of $A$ and $k$ from these simulations. (Hint
Step6: What are the 95% confidence intervals on your estimate of $k$? The lower bound is the value of $k$ for which 2.5% of the histogram counts are below the value. The upper bound is the value of $k$ for which 2.5% of the histogram counts are above the value.
Step7: You measure the same process under slightly different conditions. These data are stored in data/time_course_1.csv. Is there a statistically significant difference between $k$ from dataset 1 vs. 0?
Step8: Bonus
Step9: Model selection
Step11: Real example
Step12: Fit both models to the data in data/sed_eq.csv. What are your estimates of $c_{0}$, $M$, and $\theta$? Are they the same between the two fits?
Step13: Use your calc_aic function on these fits. Which model is supported? Can you conclude there is dimer present?
Step14: Gaussian
Step19: You find code to analyze this kind of data on the internet. Using the functions below, determine
|
411 | <ASSISTANT_TASK:>
Python Code:
from IPython.display import YouTubeVideo
# WATCH THE VIDEO IN FULL-SCREEN MODE
YouTubeVideo("JXJQYpgFAyc",width=640,height=360) # Numerical integration
# Put your code here
import math
Nstep = 10
begin = 0.0
end = 3.1415926
dx = (end-begin)/Nstep
sum = 0.0
xpos = 0.0
for i in range(Nstep):
thisval = math.sin(xpos)*dx
sum += thisval
xpos += dx
error = abs(sum-2.0)/2.0
print("for dx = {0:3f} we get an answer of {1:3f} and a fractional error of {2:4e}".format(dx,sum,error))
# WATCH THE VIDEO IN FULL-SCREEN MODE
YouTubeVideo("b0K8LiHyrBg",width=640,height=360) # Numerical differentiation
# Put your code here
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
def f(x):
return np.exp(-2.0*x)
def dfdx(x):
return -2.0*np.exp(-2.0*x)
x = np.linspace(-3.0,3.0, 100)
dx = 1.0e-2
deriv = (f(x+dx)-f(x-dx))/(2.0*dx)
error = np.abs((deriv-dfdx(x))/dfdx(x))
plt.plot(x,error)
print("the average fractional error is:", error.mean())
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 1
Step2: Question 2
|
412 | <ASSISTANT_TASK:>
Python Code:
import pyautogui
# Writes to the cell right below (70 pixels down)
pyautogui.moveRel(0,70)
pyautogui.click()
pyautogui.typewrite('Hello world!')
# Writes to the cell right below (70 pixels down)
pyautogui.moveRel(0,70)
pyautogui.click()
pyautogui.typewrite('Hello world!', interval=0.2)
# Writes to the cell right below (70 pXixels down)
pyautogui.moveRel(0,70)
pyautogui.click()
pyautogui.typewrite(['a','b','left','left','X','Y'], interval=1)
XYab
pyautogui.KEYBOARD_KEYS
pyautogui.typewrite('F1')
pyautogui.typewrite('f1')
# Simulates ctrl + alt + delete
pyautogui.hotkey('ctrl','alt','delete')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This lesson will control all the keyboard controlling functions in the module.
Step2: Again, to simulate more human interaction, we can an interval parameter like duration before.
Step3: For more complex characters, we can pass a list of complex characters, like the arrow keys, shift, etc.
Step4: A list of keys are available in the KEYBOARD_KEYS
Step5: These are case-sensitive, but often map to the same function anyway.
Step6: We can also pass variables in hotkey mode, i.e. pressed together.
|
413 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from pylab import *
np.random.seed(2)
pageSpeeds = np.random.normal(3.0, 1.0, 1000)
purchaseAmount = np.random.normal(50.0, 10.0, 1000) / pageSpeeds
scatter(pageSpeeds, purchaseAmount)
x = np.array(pageSpeeds)
y = np.array(purchaseAmount)
p4 = np.poly1d(np.polyfit(x, y, 4))
import matplotlib.pyplot as plt
xp = np.linspace(0, 7, 100)
plt.scatter(x, y)
plt.plot(xp, p4(xp), c='r')
plt.show()
from sklearn.metrics import r2_score
r2 = r2_score(y, p4(x))
print r2
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: numpy has a handy polyfit function we can use, to let us construct an nth-degree polynomial model of our data that minimizes squared error. Let's try it with a 4th degree polynomial
Step2: We'll visualize our original scatter plot, together with a plot of our predicted values using the polynomial for page speed times ranging from 0-7 seconds
Step3: Looks pretty good! Let's measure the r-squared error
|
414 | <ASSISTANT_TASK:>
Python Code:
import pyisc;
import visisc;
import numpy as np
import datetime
from scipy.stats import poisson, norm, multivariate_normal
%matplotlib wx
from pylab import plot, figure
n_sources = 10
n_events = 20
num_of_normal_days = 200
num_of_anomalous_days = 10
data = None
days_list = [num_of_normal_days, num_of_anomalous_days]
dates = []
for state in [0,1]: # normal, anomalous data
num_of_days = days_list[state]
for i in range(n_sources):
data0 = None
for j in range(n_events):
if state == 0:# Normal
po_dist = poisson(int((10+2*(n_sources-i))*(float(j)/n_events/2+0.75))) # from 0.75 to 1.25
else: # anomalous
po_dist = poisson(int((20+2*(n_sources-i))*(float(j)/n_events+0.5))) # from 0.5 to 1.5
tmp = po_dist.rvs(num_of_days)
if data0 is None:
data0 = tmp
else:
data0 = np.c_[data0,tmp]
tmp = np.c_[
[i] * (num_of_days), # Sources
[ # Timestamp
datetime.date(2015,02,24) + datetime.timedelta(d)
for d in np.array(range(num_of_days)) + (0 if state==0 else num_of_normal_days)
],
[1] * (num_of_days), # Measurement period
data0, # Event frequency counts
]
if data is None:
data = tmp
else:
data = np.r_[
tmp,
data
]
# Column index into the data
source_column = 0
date_column = 1
period_column = 2
first_event_column = 3
last_event_column = first_event_column + n_events
model = visisc.EventDataModel.flat_model(
event_columns=range(first_event_column,last_event_column)
)
data_object = model.data_object(
data,
source_column = source_column,
class_column = source_column,
period_column = period_column,
date_column = date_column
)
anomaly_detector = model.fit_anomaly_detector(data_object, poisson_onesided=True)
!ipython --matplotlib=wx --gui=wx -i visISC_simple_frequency_data_example.py
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Event Frequency Data
Step2: Flat Event Data Model
Step3: Second we transform numpy array to a pyisc data object. The data object consists of the orginial event columns, the source column, period column and a root column containg the sum of all event frequency counts per day. In this case, the source and the class are the same. The source identifies the origin of the data, for instance, the user or machine that generates the data, while the class is the type of source. A reference to the last created data object is also kept in the model.
Step4: Thereafter, we create an anomaly detector and fit a onesided poisson distribution for each event column.
Step5: Visualization
|
415 | <ASSISTANT_TASK:>
Python Code:
print "Hello World!"
print 'Hello World!'
# This is a comment.
print 'This is not a comment.'
'Something smells funny.'
print 2 + 2
# Spaces between characters don't matter
print 2+2
2 + 2
print "2 + 2"
print 2.1 + 2 # The most precise value is a float.
(3.*10. - 26.)/5.
(3*10 - 26)/5.0
# Since our most precise value is an int, python spits out the solution as an int
(3*10 - 26)/5
# Rounding errors can creep in
2.1 + 2 == 4.0999999999999996 # two 'equals' signs asks whether something is equal
complex(1,2)
# note that python uses j to denote the imaginary part
1+2j
a = 1+2j
print a.real, a.imag
1+2j-2j
t = 1.0 # declare a variable t (time)
accel = 9.8 # acceleration in units of m/s^2
dist = 0.5*accel*t*t # distance traveled in time t seconds is 1/2*a*t^2
print dist # this is the distance in meters
dist1 = accel*(t**2)/2 # note: t^2 means something very different!
print dist1
dist2 = 0.5*accel*pow(t,2)
print dist2
# Integer division prints the floor; i.e., it only takes the integer digits
print 6/5
# modulo operator
6 % 5
# bitwise operators: shift left
# 1 in binary is '1', shifting left by two bits gives '100' = 4
1 << 2
# bitwise operators: shift right
# 5 in binary is '101', shifting right by one bit gives '10' = 2
5 >> 1
x = 2 ; y = 3 # multiple commands on the same line, separated by a semicolon
x | y # bitwise OR
# x in binary is '10', y in binary is '11', x | y is '11' -> 3
x ^ y # exclusive OR ('10' ^ '11' = '01' = 1)
x & y # bitwise AND ('10' & '11' = '10' = 2)
x = x ^ y ; print x # x has been reassigned
x += 3 ; print x # 'x += 3' is the same as saying 'x = x+3'
# the equivalent holds from -, *, /
a = 3 ; b = 4
a == b # two '=' signs for comparison, one '=' for assignment
a+1 == b
a+1.0 == b
a < 10
a < 3
a <= 3
a < (10 + 2j)
a < -2.0
a != 3.1415
0 == False # False is equivalent to 0, and other things
1 == True # True is equivalent to 1, and other things
not False
1 == False
not (10.0 - 10.0)
x = None # None is neither True nor False
print None == False
print None == True
type(None)
print type(1)
print type("1")
x = 2 ; type(x)
type(2) == type(1)
type(False)
type(type(1))
type(pow)
isinstance(1,int) # check if something is a certain type
isinstance("spam",str)
isinstance(1.212,int)
x = "spam" ; print type(x)
print "Hello!\nI'm hungry." # '\n' means 'new line'
# This doesn't work.
print "Hello!
I'm hungry."
print "Hello! \n I'm hungry."
"Wah?!" == "Wah?!"
print "'Wah?!' said the student."
print ""Wah?!" said the student."
print '"Wah?!" said the student.'
print "\"Wah?!\" said the student."
# Raw strings don't recognize escape characters
print r'This is a raw string ... newlines \n are ignored. So are returns \r and tabs \t.'
# Triple quotes are useful for multiple line strings
y = '''Four score and seven minutes ago,
you folks all learned some basic mathy stuff with Python
and boy were you blown away!'''
print y
# Prepending 'u' makes a string "unicode"
print u"\N{BLACK HEART SUIT}"
# You can concatenate strings with the '+' sign
s = "spam" ; e = "eggs"
print s + e
print s + " and " + e
print "green " + e + " and " + s
# You can do multiple concatenations with the '*' sign
print s*3 + e
print "*"*50
# Strings can be compared
print "spam" == "good"
# s comes before z in the alphabet
"spam" < "zoo"
# 's' comes before 'spam' in the dictionary
"s" < "spam"
# 'spaa' comes before 'spam' alphabetically
"spaaaaaaaaaaaam" < "spam"
print 'I want ' + 3 + ' eggs and no ' + s
# We have to cast the 3 (an int) as a string
print 'I want ' + str(3) + ' eggs and no ' + s
pi = 3.14159 # There are easier ways to call pi, which we'll see later
print 'I want ' + str(pi) + ' eggs and no ' + s
print str(True) + ':' + ' I want ' + str(pi) + ' eggs and no ' + s
print s
print len(s) # len() tells you the length of a string (or, more generally, an array)
print len("eggs\n") # The newline \n counts as ONE character
print len("") # empty string
# Strings act like arrays. We'll see more about arrays later.
s = "SPAM"
print s
print s[0] # Python uses zero-based indexing; i.e., it starts counting at zero
print s[1]
print s[-1]
print s[-2]
# Take slices of strings
print s
print s[0:1]
print s[1:4]
print s[0:100] # Python doesn't warn you. Be careful!
# Slice counting backwards
print s
print s[-3:-1]
# You don't have to specify both ends
print s
print s[:2]
print s[2:]
# You can slice in different steps
print s
print s[::2]
print s[::-1]
mygrade = 'F+'
print mygrade
mygrade[0] = 'A'
mygrade = 'A+'
print mygrade
# Ask for user input
faren = raw_input("Enter the temperature (in Fahrenheit): ")
print "Your temperature is " + faren + " degrees."
# User input is always saved as a string
faren = raw_input("Enter the temperature in Fahrenheit): ")
cel = 5./9. * (faren - 32.)
print "The temperature in Celcius is " + cel + " degrees."
# Don't forget to convert things to the right type
faren = raw_input("Enter the temperature in Fahrenheit): ")
faren = float(faren) # The calculation on the right gets saved to the variable on the left
cel = 5./9. * (faren - 32.)
print "The temperature in Celcius is " + str(cel) + " degrees."
x = raw_input("Enter x: ")
x = float(x)
print x
execfile("temperature.py")
x = 1
print x
x = 1
if x > 0: # colons indicate the beginning of a control statement
print "yo"
else: # unindenting tells Python to move to the next case
print "dude"
print "ok" # unindenting also tells Python the control statement is done
# You can mix indentations between different blocks ... but this is ugly and people will judge you
x = 1
if x > 0:
print "yo"
else:
print "dude"
print "dude"
# You can put everything on one line
print "yo" if x > 0 else "dude"
# Multiple cases
x = -100
if x < -10:
print "yo"
elif x > 10: # 'elif' is short for 'else if'
print "dude"
else:
print "sup"
for x in range(5,50,10):
print x**2
for x in ("all","we","wanna","do","is","eat","your","brains"):
print x
x = 0
while x < 5:
print pow(2,x)
x += 1 # don't forget to increment x!
# Multiple levels
for x in range(1,10):
if x % 2 == 0:
print str(x) + " is even."
else:
print str(x) + " is odd."
# Blocks cannot be empty
x = "fried goldfish"
if x == "spam for dinner":
print "I will destroy the universe"
else:
# Nothing here.
# Use a 'pass' statement, which indicates 'do nothing'
x = "fried goldfish"
if x == "spam for dinner":
print "I will destroy the universe"
else:
pass
# Use a 'break' statement to escape a loop
x = 0
while True:
print x**2
if x**2 >= 100:
break
x +=1
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python as a calculator
Step2: Defining and using variables
Step3: Some more mathy operators
Step4: Comparisons
Step5: Truthiness
Step6: 0j == False
Step7: More on variables and types
Step8: Built-in types in python
Step9: Backslashes ( \ ) start special (escape) characters
Step10: <img src="https
Step11: Strings are immutable (unlike in C), so you cannot change a string in place.
Step12: Running a Program from Command Line
Step13: With python as in other languages you can separate scripts and programs containing python code that can be used independently or with other python programs. The notebook is excellent for code development and presentations but not the best for production level code.
Step14: IPython Notebook automatically converts tabs into spaces, but some programs do not. Be careful not to mix these up! Be consistent in your programming.
|
416 | <ASSISTANT_TASK:>
Python Code:
# Some imports we will need below
import numpy as np
from devito import *
import matplotlib.pyplot as plt
%matplotlib inline
nx, ny = 100, 100
grid = Grid(shape=(nx, ny))
u = TimeFunction(name='u', grid=grid, space_order=2, save=200)
c = Constant(name='c')
eqn = Eq(u.dt, c * u.laplace)
step = Eq(u.forward, solve(eqn, u.forward))
op = Operator([step])
xx, yy = np.meshgrid(np.linspace(0., 1., nx, dtype=np.float32),
np.linspace(0., 1., ny, dtype=np.float32))
r = (xx - .5)**2. + (yy - .5)**2.
# Inserting the ring
u.data[0, np.logical_and(.05 <= r, r <= .1)] = 1.
stats = op.apply(dt=5e-05, c=0.5)
plt.rcParams['figure.figsize'] = (20, 20)
for i in range(1, 6):
plt.subplot(1, 6, i)
plt.imshow(u.data[(i-1)*40])
plt.show()
# Instead of `platform=nvidiaX`, you may run your Python code with
# the environment variable `DEVITO_PLATFORM=nvidiaX`
# We also need the `gpu-fit` option to tell Devito that `u` will definitely
# fit in the GPU memory. This is necessary every time a TimeFunction with
# `save != None` is used. Otherwise, Devito could generate code such that
# `u` gets streamed between the CPU and the GPU, but for this advanced
# feature you will need `devitopro`.
op = Operator([step], platform='nvidiaX', opt=('advanced', {'gpu-fit': u}))
# Uncomment and run only if Devito was installed with GPU support.
# stats = op.apply(dt=5e-05, c=0.5)
print(op)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Solver implementation
Step2: To represent the density, we use a TimeFunction -- a scalar, discrete function encapsulating space- and time-varying data. We also use a Constant for the diffusion coefficient.
Step3: The 2D diffusion equation is expressed as
Step4: From this diffusion equation we derive our time-marching method -- at each timestep, we compute u at timestep t+1, which in the Devito language is represented by u.forward. Hence
Step5: OK, it's time to let Devito generate code for our solver!
Step6: Before executing the Operator we must first specify the initial density profile. Here, we place a "ring" with a constant density value in the center of the domain.
Step7: We're now ready to execute the Operator. We run it with a diffusion coefficient of 0.5 and for a carefully chosen dt. Unless specified otherwise, the simulation runs for 199 timesteps as specified in the definition of u (i.e. the function was defined with save=200 the initial data + 199 new timesteps).
Step8: Initial conditions and snapshots every 40 timesteps
Step9: GPU-parallel solver
Step10: That's it! We can now run it exactly as before
Step11: We should see a big performance difference between the two runs. We can also inspect op to see what Devito has generated to run on the GPU
|
417 | <ASSISTANT_TASK:>
Python Code:
'''
The code in this cell opens up the file skydiver_time_velocities.csv
and extracts two 1D numpy arrays of equal length. One array is
of the velocity data taken by the radar gun, and the second is
the times that the data is taken.
'''
import numpy as np
skydiver_time, skydiver_velocity = np.loadtxt("skydiver_time_velocities.csv",
delimiter=',',skiprows=1,unpack=True)
'''
This is a piece of example code that shows you how to get the
velocity at any time you want using the Numpy interp() method.
This requires you to pick a time where you want the velocity
as an input parameter to the method, as well as the time and
velocity arrays that you will interpolate from.
'''
time = 7.2 # time in seconds
vel = np.interp(time,skydiver_time,skydiver_velocity)
print("velocity at time {:.3f} s is {:.3f} m/s".format(time,vel))
# put your code here!
# put your code here!
from IPython.display import HTML
HTML(
<iframe
src="https://goo.gl/forms/XvxmPrGnDOD3UZcI2?embedded=true"
width="80%"
height="1200px"
frameborder="0"
marginheight="0"
marginwidth="0">
Loading...
</iframe>
)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The second part of the challenge
Step3: Assignment wrapup
|
418 | <ASSISTANT_TASK:>
Python Code:
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from time import time
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualization code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the Census dataset
data = pd.read_csv("census.csv")
# Success - Display the first record
display(data.head(n=1))
len(data[data['income']==">50K"])
# TODO: Total number of records
n_records = len(data['income'].index)
# TODO: Number of records where individual's income is more than $50,000
n_greater_50k = len(data[data['income']==">50K"])
# TODO: Number of records where individual's income is at most $50,000
n_at_most_50k = len(data[data['income']=="<=50K"])
# TODO: Percentage of individuals whose income is more than $50,000
greater_percent = float(n_greater_50k*100.00/n_records)
# Print the results
print "Total number of records: {}".format(n_records)
print "Individuals making more than $50,000: {}".format(n_greater_50k)
print "Individuals making at most $50,000: {}".format(n_at_most_50k)
print "Percentage of individuals making more than $50,000: {:.2f}%".format(greater_percent)
# Split the data into features and target label
income_raw = data['income']
features_raw = data.drop('income', axis = 1)
# Visualize skewed continuous features of original data
vs.distribution(data)
# Log-transform the skewed features
skewed = ['capital-gain', 'capital-loss']
features_log_transformed = pd.DataFrame(data = features_raw)
features_log_transformed[skewed]= features_raw[skewed].apply(lambda x: np.log(x + 1))
# Visualize the new log distributions
vs.distribution(features_log_transformed, transformed = True)
# Import sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_log_minmax_transform = pd.DataFrame(data = features_log_transformed)
features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])
# Show an example of a record with scaling applied
display(features_log_minmax_transform.head(n = 5))
# TODO: One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()
features_final = pd.get_dummies(features_log_minmax_transform)
# TODO: Encode the 'income_raw' data to numerical values
income = income_raw.map({'<=50K':0,'>50K':1})
# Print the number of features after one-hot encoding
encoded = list(features_final.columns)
print "{} total features after one-hot encoding.".format(len(encoded))
# Uncomment the following line to see the encoded feature names
# print encoded
# Import train_test_split
from sklearn.cross_validation import train_test_split
# Split the 'features' and 'income' data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(features_final,
income,
test_size = 0.2,
random_state = 0)
# Show the results of the split
print "Training set has {} samples.".format(X_train.shape[0])
print "Testing set has {} samples.".format(X_test.shape[0])
'''
TP = np.sum(income) # Counting the ones as this is the naive case. Note that 'income' is the 'income_raw' data
encoded to numerical values done in the data preprocessing step.
FP = income.count() - TP # Specific to the naive case
TN = 0 # No predicted negatives in the naive case
FN = 0 # No predicted negatives in the naive case
'''
TP = np.sum(income)
FP = income.count() - TP
TN = 0
FN = 0
# TODO: Calculate accuracy, precision and recall
accuracy = float((TP+TN))/len(income)
recall = float(TP)/(TP+FN)
precision = float(TP)/(TP+FP)
# TODO: Calculate F-score using the formula above for beta = 0.5 and correct values for precision and recall.
# HINT: The formula above can be written as (1 + beta**2) * (precision * recall) / ((beta**2 * precision) + recall)
beta = 0.5
fscore = (1 + beta**2) * (precision * recall) / ((beta**2 * precision) + recall)
# Print the results
print "Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]".format(accuracy, fscore)
# TODO: Import two metrics from sklearn - fbeta_score and accuracy_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
def train_predict(learner, sample_size, X_train, y_train, X_test, y_test):
'''
inputs:
- learner: the learning algorithm to be trained and predicted on
- sample_size: the size of samples (number) to be drawn from training set
- X_train: features training set
- y_train: income training set
- X_test: features testing set
- y_test: income testing set
'''
results = {}
# TODO: Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:])
start = time() # Get start time
learner.fit(features_final[:sample_size], income[:sample_size])
end = time() # Get end time
# TODO: Calculate the training time
results['train_time'] = end - start
# TODO: Get the predictions on the test set(X_test),
# then get predictions on the first 300 training samples(X_train) using .predict()
start = time() # Get start time
predictions_test = learner.predict(X_test)
predictions_train = learner.predict(X_train[:300])
end = time() # Get end time
# TODO: Calculate the total prediction time
results['pred_time'] = end - start
# TODO: Compute accuracy on the first 300 training samples which is y_train[:300]
results['acc_train'] = accuracy_score(y_train[:300],predictions_train)
# TODO: Compute accuracy on test set using accuracy_score()
results['acc_test'] = accuracy_score(y_test,predictions_test)
# TODO: Compute F-score on the the first 300 training samples using fbeta_score()
results['f_train'] = fbeta_score(y_train[:300],predictions_train,beta=0.5)
# TODO: Compute F-score on the test set which is y_test
results['f_test'] = fbeta_score(y_test,predictions_test,beta=0.5)
# Success
print "{} trained on {} samples.".format(learner.__class__.__name__, sample_size)
# Return the results
return results
# TODO: Import the three supervised learning models from sklearn
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
# TODO: Initialize the three models
# n_neighbors=int(math.log(len(X_train),2))
clf_A = KNeighborsClassifier()
clf_B = DecisionTreeClassifier(random_state=13)
clf_C = GradientBoostingClassifier(random_state=13)
# TODO: Calculate the number of samples for 1%, 10%, and 100% of the training data
# HINT: samples_100 is the entire training set i.e. len(y_train)
# HINT: samples_10 is 10% of samples_100
# HINT: samples_1 is 1% of samples_100
samples_100 = len(X_train)
samples_10 = int (len(X_train) * 0.1)
samples_1 = int (len(X_train) * 0.01)
# Collect results on the learners
results = {}
for clf in [ clf_A, clf_B,clf_C]:
clf_name = clf.__class__.__name__
results[clf_name] = {}
for i, samples in enumerate([samples_1, samples_10, samples_100]):
results[clf_name][i] = \
train_predict(clf, samples, X_train, y_train, X_test, y_test)
# Run metrics visualization for the three supervised learning models chosen
vs.evaluate(results, accuracy, fscore)
# TODO: Import 'GridSearchCV', 'make_scorer', and any other necessary libraries
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
# TODO: Initialize the classifier
clf = DecisionTreeClassifier(random_state=13)
# TODO: Create the parameters list you wish to tune, using a dictionary if needed.
# HINT: parameters = {'parameter_1': [value1, value2], 'parameter_2': [value1, value2]}
parameters = {'max_depth':[3, 4, 5]}
# TODO: Make an fbeta_score scoring object using make_scorer()
scorer = make_scorer(fbeta_score,beta=0.5)
# TODO: Perform grid search on the classifier using 'scorer' as the scoring method using GridSearchCV()
grid_obj = GridSearchCV(clf,parameters,scoring=scorer)
# TODO: Fit the grid search object to the training data and find the optimal parameters using fit()
grid_fit = grid_obj.fit(X_train,y_train)
# Get the estimator
best_clf = grid_fit.best_estimator_
# Make predictions using the unoptimized and model
predictions = (clf.fit(X_train, y_train)).predict(X_test)
best_predictions = best_clf.predict(X_test)
# Report the before-and-afterscores
print "Unoptimized model\n------"
print "Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions))
print "F-score on testing data: {:.4f}".format(fbeta_score(y_test, predictions, beta = 0.5))
print "\nOptimized Model\n------"
print "Final accuracy score on the testing data: {:.4f}".format(accuracy_score(y_test, best_predictions))
print "Final F-score on the testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5))
# TODO: Import a supervised learning model that has 'feature_importances_'
from sklearn.ensemble import AdaBoostClassifier
# TODO: Train the supervised model on the training set using .fit(X_train, y_train)
model = AdaBoostClassifier(random_state=13)
model.fit(X_train, y_train)
# TODO: Extract the feature importances using .feature_importances_
importances = model.feature_importances_
# Plot
vs.feature_plot(importances, X_train, y_train)
# Import functionality for cloning a model
from sklearn.base import clone
# Reduce the feature space
X_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]]
X_test_reduced = X_test[X_test.columns.values[(np.argsort(importances)[::-1])[:5]]]
# Train on the "best" model found from grid search earlier
clf = (clone(best_clf)).fit(X_train_reduced, y_train)
# Make new predictions
reduced_predictions = clf.predict(X_test_reduced)
# Report scores from the final model using both versions of data
print "Final Model trained on full data\n------"
print "Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, best_predictions))
print "F-score on testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5))
print "\nFinal Model trained on reduced data\n------"
print "Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, reduced_predictions))
print "F-score on testing data: {:.4f}".format(fbeta_score(y_test, reduced_predictions, beta = 0.5))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implementation
Step2: Featureset Exploration
Step3: For highly-skewed feature distributions such as 'capital-gain' and 'capital-loss', it is common practice to apply a <a href="https
Step4: Normalizing Numerical Features
Step5: Implementation
Step6: Shuffle and Split Data
Step7: Evaluating Model Performance
Step8: Supervised Learning Models
Step9: Implementation
Step10: Improving Results
Step11: Question 5 - Final Model Evaluation
Step12: Question 7 - Extracting Feature Importance
|
419 | <ASSISTANT_TASK:>
Python Code:
2 #Integer yani tam sayı
2.0 #Float yani ondalıklı sayı
1.67 #Yine bir float
4 #Yine bir int(Integer)
'Bu bir string'
"Bu da bir string"
True #Boolean
False #Boolean
print('test')
print('test2')
print("deneme123")
print(1)
print(2.55)
print(1, 2, 'Hello World!')
print('\n')
print(1, '\n',2, '\nHelloWorld') #\n sonraki satırda bir boşluğa neden olacak.
print('\n')
print(1, '\n' + str(2)) #str fonksiyonu integeri stringe çevirecek. Toplama işlemi stringleri birleştirmeye de yarıyor.
print('\ttest')
print('\t test')
print('\' <= bu işaretin adı neydi?')
True and False
True or False
2 or False
0 or False
3 < 4
2.5 or False
3 >= 4
'string' or False
2 == 2
2 == 3
2 != 2
2 != 1034354435345
False or True and False
False and False or True
2 <= 4
not True
3 is False
2 + 2 + 4 - 1
2 * 3 / 2
2 ** 4
4 ** (1/2)
5 // 2
5 % 2
2.3 == 23 or 10
0.1 == 1/10
2 is not 3 is 4 #Hem 2 is not 3 hem 3 is 4 doğru olduğunda doğrudur. Arada and var gibi.
2 is not 3 is 3
3 is not 3 is 3
1 == 1 == 2
1 == 2 == 2
2 ** 2 ** 3 #İşlem sağdan sola yapılıyor.
2 * 2 ** 2 ** 3 / 3 #Önceliği yüksek işlemden başlanıyor.
2 ** -3 * 4
2 ** (-3 + 4)
a = 4
a = 5
b = 4
c = a != b
print(c)
variableda boşuk olmaz = 0
alt_cizgi_kullanilir = 5
yadaBuSekildeTanimlanir = 4
print(alt_cizgi_kullanilir, yadaBuSekildeTanimlanir)
myList = [] #Bu boş bir liste
myTuple = (1,) #Tuple en az bir element içermek zorundadır. Tek element olduğunda virgül eklemek zorundayız.
myTuple2 = 1, 2 #Parantez kullanmadan virgül ile de tuple oluşturabiliriz.
print(myList, myTuple, myTuple2)
print(1 not in (2, 3, 4), 2 in [2, 3])
a = [[1, 3, 4], (2, 3)] #Listler tuple ve list elementleri içerebilir.
b = (1, 2, 3, (3, 4), [1, 2])
print(a, b)
b[4].append(1) #List ve tuple indexi 0'dan başlar, erişmek için elementin ismi yanına köşeli parentezle indexini yazarız.
print(b) #Tuple içine list koymak tupleın immutable yapısını bozar.
a = [[1, 3, 4], (2, 3)]
a.pop() # pop fonksiyonu listin son elementini verir.
a = [[1, 3, 4], (2, 3)]
a.pop(0) #Indexi verirsek o elementi çıkarır. Sonra stdouta basar.
a = [[1, 3, 4], (2, 3)]
a.insert(0, 1) #Insert ile indexi verilen lokasyondaki elementten önce yeni element eklenir. Büyük listelerde verimsizdir.
print(a)
a = [0, 1, 3, 2, 4, 5, 6, 7, 8, 9]
a.index(3) # index fonksiyonu listler ve tuplelar üzerinde çalışır. Verilen elementin indexini stdouta basar.
a = (1, 2)
a.index(2)
a = 1, #Bu da bir tuple, parantez koymak zorunda değiliz.
a.append(1) #Hata verecek çünkü tuple immutable
a = True
if a is True: #İki nokta işaretine dikkat edelim.
print("a Doğru") #Statementın içinde boşlukla çalışmamız gerekiyor.
else:
print("a Yanlış") #Bu kod çalışmayacak.
a = [1, 2, 3, 4, 5]
if 6 in a:
print("6 dahil")
elif 2 in a:
print('2 dahil')
input("Yaşın kaç: ")
age = input('Yaşınızı girin: ')
if age >= 13:
print('Siteye devam edebilirsiniz.')
else:
print('Siteye giremezsiniz.')
age = input('Yaşınızı girin: ')
if int(age) >= 13:
print('Siteye devam edebilirsiniz.')
else:
print('Siteye giremezsiniz.')
try:
a = input('Yaşınızı girin: ')
age = int(a)
except ValueError:
raise SystemExit('Hatalı Giriş')
mid1 = input('Midterm 1 Notunuz: ')
try:
mid1 = int(mid1)
except ValueError:
raise SystemExit('Hatalı Giriş')
mid2 = input('Midterm 2 Notunuz: ')
try:
mid2 = int(mid2)
except ValueError:
raise SystemExit('Hatalı Giriş')
final = int(input('Final Notunuz: '))
try:
final = int(final)
except ValueError:
raise SystemExit('Hatalı Giriş')
average = (mid1 * 30 + mid2 * 30 + final * 40) / 100
if average >= 55:
print('Dersten geçtiniz. Tebrikler.')
else:
print('Dersten kaldınız. Tekrar deneyiniz.')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: print Fonksiyonu, Özel Karakterler ve str Fonksiyonu
Step2: Mantıksal İşlemler
Step3: Matematiksel İşlemler
Step4: Yönü Yok kavramını açacağız.
Step5: Bu konuda evde daha fazla pratik yapabilirsiniz. Konuyu geçiyorum.
Step6: Listler ve Tuplelar
Step7: If - Elif - Else Statementlar
Step8: Kullanıcıdan Input Almak - input()
Step9: try - except - raise
Step10: Pekiştirme yapmak amacıyla şimdi bir program yazalım. Bu program toplam 3 adet not alıyor.
|
420 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas import Series, DataFrame
from numpy.random import randint
dices = randint(1,7,(5,2))
dices
diceroll = DataFrame(dices, columns=['dice1','dice2'])
diceroll
city = Series(['Tokyo','Osaka','Nagoya','Okinawa'], name='City')
city
temp = Series([25.0,28.2,27.3,30.9], name='Temperature')
temp
humid = Series([44,42,np.nan,62], name='Humidity')
humid
cities = DataFrame({'City':city, 'Temperature':temp, 'Humidity':humid})
cities
data = {'City': ['Tokyo','Osaka','Nagoya','Okinawa'],
'Temperature': [25.0,28.2,27.3,30.9],
'Humidity': [44,42,np.nan,62]}
cities = DataFrame(data)
cities
diceroll = DataFrame(columns=['dice1','dice2'])
diceroll
oneroll = Series(randint(1,7,2), index=['dice1','dice2'])
oneroll
diceroll = diceroll.append(oneroll, ignore_index=True)
diceroll
diceroll = DataFrame(columns=['dice1','dice2'])
for i in range(1000):
diceroll = diceroll.append(
Series(randint(1,7,2), index=['dice1','dice2']),
ignore_index = True)
diceroll[:5]
diceroll.describe()
diceroll1 = DataFrame(randint(1,7,(5,2)),
columns=['dice1','dice2'])
diceroll1
diceroll2 = DataFrame(randint(1,7,(3,2)),
columns=['dice1','dice2'])
diceroll2
diceroll3 = diceroll1.append(diceroll2)
diceroll3
diceroll4 = diceroll1.append(diceroll2, ignore_index=True)
diceroll4
diceroll = DataFrame()
diceroll['dice1'] = randint(1,7,5)
diceroll
diceroll['dice2'] = randint(1,7,5)
diceroll
dice1 = Series(randint(1,7,5),name='dice1')
dice2 = Series(randint(1,7,5),name='dice2')
diceroll = pd.concat([dice1, dice2], axis=1)
diceroll
dice3 = Series(randint(1,7,5),name='dice3')
diceroll = pd.concat([diceroll, dice3], axis=1)
diceroll
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2次元の array を DataFrame に変換する例です。
Step2: columns オプションで、各列の column 名を指定します。
Step3: Series オブジェクトから DataFrame を作成する例です。
Step4: 各列の column 名と対応する Series オブジェクトのディクショナリを与えて、DataFrame を生成します。
Step5: Series オブジェクトの代わりに、リストから DataFrame を作成する例です。
Step6: 空の DataFrame に行を追加する例です。
Step7: 対応するデータを Series オブジェクトとして用意します。この際、index オプションで column 名に対応する名前を付けておきます。
Step8: 用意した DataFrame の append メソッドで、Series オブジェクトを追加します。
Step9: 2個のサイコロを 1000 回振った結果をシュミレーションする例です。
Step10: DataFrameのdescribeメソッドで、記法的な統計値を確認することができます。
Step11: DataFrame の append メソッドで、2つの DataFrame を結合する例です。
Step12: ignore_index=True を指定すると、index は通し番号になるように再割当てが行われます。
Step13: DataFrame に列を追加する例です。
Step14: pd.concat 関数で複数の Series を列として結合できます。(axis=1 は列方向での結合を意味します。)
Step15: pd.concat 関数で既存の DataFrame に Series を追加することもできます。
|
421 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import pandas.io.data as pdd
from urllib import urlretrieve
%matplotlib inline
try:
index = pdd.DataReader('^GDAXI', data_source='yahoo', start='2007/3/30')
# e.g. the EURO STOXX 50 ticker symbol -- ^SX5E
except:
index = pd.read_csv('dax.txt', index_col=0, parse_dates=True)
index.info()
index.tail()
index['Returns'] = np.log(index['Close'] / index['Close'].shift(1))
index[['Close', 'Returns']].plot(subplots=True, style='b', figsize=(8, 5))
index['Mov_Vol'] = pd.rolling_std(index['Returns'], window=252) * np.sqrt(252)
index[['Close', 'Returns', 'Mov_Vol']].plot(subplots=True, style='b', figsize=(8, 5))
import pandas as pd
import datetime as dt
from urllib import urlretrieve
try:
es_url = 'http://www.stoxx.com/download/historical_values/hbrbcpe.txt'
vs_url = 'http://www.stoxx.com/download/historical_values/h_vstoxx.txt'
urlretrieve(es_url, 'es.txt')
urlretrieve(vs_url, 'vs.txt')
except:
pass
lines = open('es.txt').readlines() # reads the whole file line-by-line
lines[:5] # header not well formatted
lines[3883:3890] # from 27.12.2001 additional semi-colon
lines = open('es.txt').readlines() # reads the whole file line-by-line
new_file = open('es50.txt', 'w') # opens a new file
new_file.writelines('date' + lines[3][:-1].replace(' ', '') + ';DEL' + lines[3][-1])
# writes the corrected third line (additional column name)
# of the orginal file as first line of new file
new_file.writelines(lines[4:]) # writes the remaining lines of the orginal file
list(open('es50.txt'))[:5] # opens the new file for inspection
es = pd.read_csv('es50.txt', index_col=0, parse_dates=True, sep=';', dayfirst=True)
del es['DEL'] # delete the helper column
es.info()
vs = pd.read_csv('vs.txt', index_col=0, header=2, parse_dates=True, sep=',', dayfirst=True)
# you can alternatively read from the Web source directly
# without saving the csv file to disk:
# vs = pd.read_csv(vs_url, index_col=0, header=2,
# parse_dates=True, sep=',', dayfirst=True)
import datetime as dt
data = pd.DataFrame({'EUROSTOXX' :
es['SX5E'][es.index > dt.datetime(1999, 12, 31)]})
data = data.join(pd.DataFrame({'VSTOXX' :
vs['V2TX'][vs.index > dt.datetime(1999, 12, 31)]}))
data.info()
data.head()
data.plot(subplots=True, grid=True, style='b', figsize=(10, 5))
rets = np.log(data / data.shift(1))
rets.head()
xdat = rets['EUROSTOXX']
ydat = rets['VSTOXX']
model = pd.ols(y=ydat, x=xdat)
model
import matplotlib.pyplot as plt
plt.plot(xdat, ydat, 'r.')
ax = plt.axis() # grab axis values
x = np.linspace(ax[0], ax[1] + 0.01)
plt.plot(x, model.beta[1] + model.beta[0] * x, 'b', lw=2)
plt.grid(True)
plt.axis('tight')
import matplotlib as mpl
mpl_dates = mpl.dates.date2num(rets.index)
plt.figure(figsize=(8, 4))
plt.scatter(rets['EUROSTOXX'], rets['VSTOXX'], c=mpl_dates, marker='o')
plt.grid(True)
plt.xlabel('EUROSTOXX')
plt.ylabel('VSTOXX')
plt.colorbar(ticks=mpl.dates.DayLocator(interval=250),
format=mpl.dates.DateFormatter('%d %b %y'))
data = data.dropna()
data = data / data.ix[0] * 100
data.head()
invest = 100
cratio = 0.3
data['Equity'] = (1 - cratio) * invest / data['EUROSTOXX'][0]
data['Volatility'] = cratio * invest / data['VSTOXX'][0]
data['Static'] = (data['Equity'] * data['EUROSTOXX']
+ data['Volatility'] * data['VSTOXX'])
data[['EUROSTOXX', 'Static']].plot(figsize=(10, 5))
for i in range(1, len(data)):
evalue = data['Equity'][i - 1] * data['EUROSTOXX'][i]
# value of equity position
vvalue = data['Volatility'][i - 1] * data['VSTOXX'][i]
# value of volatility position
tvalue = evalue + vvalue
# total wealth
data['Equity'][i] = (1 - cratio) * tvalue / data['EUROSTOXX'][i]
# re-allocation of total wealth to equity ...
data['Volatility'][i] = cratio * tvalue / data['VSTOXX'][i]
# ... and volatility position
data['Dynamic'] = (data['Equity'] * data['EUROSTOXX']
+ data['Volatility'] * data['VSTOXX'])
data.head()
(data['Volatility'] * data['VSTOXX'] / data['Dynamic'])[:5]
(data['Equity'] * data['EUROSTOXX'] / data['Dynamic'])[:5]
data[['EUROSTOXX', 'Dynamic']].plot(figsize=(10, 5))
try:
url = 'http://hopey.netfonds.no/posdump.php?'
url += 'date=%s%s%s&paper=AAPL.O&csv_format=csv' % ('2014', '03', '12')
# you may have to adjust the date since only recent dates are available
urlretrieve(url, 'aapl.csv')
except:
pass
AAPL = pd.read_csv('aapl.csv', index_col=0, header=0, parse_dates=True)
AAPL.info()
AAPL['bid'].plot()
AAPL = AAPL[AAPL.index > dt.datetime(2014, 3, 12, 10, 0, 0)]
# only data later than 10am at that day
# this resamples the record frequency to 5 minutes, using mean as aggregation rule
AAPL_5min = AAPL.resample(rule='5min', how='mean').fillna(method='ffill')
AAPL_5min.head()
AAPL_5min['bid'].plot()
AAPL_5min['bid'].apply(lambda x: 2 * 530 - x).plot()
# this mirrors the stock price development at
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The convenience function DataReader makes it easy to read historical stock price data from Yahoo! Finance (http
Step2: pandas strength is the handling of indexed/labeled/structured data, like times series data.
Step3: pandas makes it easy to implement vectorized operations, like calculating log-returns over whole time series.
Step4: In addition, pandas makes plotting quite simple and compact.
Step5: We now want to check how annual volatility changes over time.
Step6: Obviously, the annual volatility changes significantly over time.
Step7: Exercise
Step8: The EURO STOXX 50 data is not yet in the right format. Some house cleaning is necessary (I).
Step9: The EURO STOXX 50 data is not yet in the right format. Some house cleaning is necessary (II).
Step10: The EURO STOXX 50 data is not yet in the right format. Some house cleaning is necessary (III).
Step11: The EURO STOXX 50 data is not yet in the right format. Some house cleaning is necessary (IV).
Step12: Now, the data can be safely read into a DataFrame object.
Step13: The VSTOXX data can be read without touching the raw data.
Step14: We now merge the data for further analysis.
Step15: Let's inspect the two time series.
Step16: A picture can tell almost the complete story.
Step17: We now generate log returns for both time series.
Step18: To this new data set, also stored in a DataFrame object, we apply OLS.
Step19: Again, we want to see how our results look graphically.
Step20: Let us see if we can identify systematics over time. And indeed, during the crisis 2007/2008 (yellow dots) volatility has been more pronounced than more recently (red dots).
Step21: Exercise
Step22: First, the initial invest.
Step23: This can already be considered an static investment strategy.
Step24: Second, the dynamic strategy with daily adjustments to keep the value ratio constant.
Step25: Third, the total wealth position.
Step26: A brief check if the ratios are indeed constant.
Step27: Let us inspect the performance of the strategy.
Step28: Exercise
Step29: The intraday evolution of the Apple stock price.
Step30: A resampling of the data is easily accomplished with pandas.
Step31: Let's have a graphical look at the new data set.
Step32: With pandas you can easily apply custom functions to time series data.
|
422 | <ASSISTANT_TASK:>
Python Code:
import csv
import re
import googlemaps
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
db_password = 'somestring'
gapi_key = 'anotherstring'
points = []
positions = []
project_no = False
while project_no is False:
try:
string = input("Enter Project Number: ")
pattern = re.compile(r"S\d{6}\Z")
if pattern.match(string):
project_no = string
else:
raise ValueError('ERROR: Not a correct project number')
except ValueError as e:
print(e)
engine = create_engine('mssql+pyodbc://HolebaseSIUser:{password}@Holebase'.format(password=db_password))
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
result = session.execute(SELECT COR_LocationDetails.LocationID, COR_LocationDetails.LongLatCentroid.STAsText()
FROM COR_Project, COR_LocationDetails
WHERE COR_Project.ProjectID = '{project_no}'
AND COR_Project.Id = COR_LocationDetails.Project.format(project_no=project_no))
session.close()
for i in result:
if i[1] != None:
match = re.findall(r'\((.*?)\)', i[1])
long, lat = match[0].split(' ')
points.append([float(lat), float(long)])
positions.append([i[0]])
else:
pass
gmaps = googlemaps.Client(key=gapi_key)
gmaps_elevation = gmaps.elevation(points)
for i, d in enumerate(gmaps_elevation):
positions[i].append(round(d['elevation'], 3))
with open('Location Details.csv', 'w', newline='') as f:
writer = csv.writer(f, quoting=csv.QUOTE_ALL)
writer.writerow(['Location ID',
'Ground Level'])
writer.writerows(positions)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting initial variables
Step2: User input for Project ID
Step4: SQLAlchemy code
Step5: Looping through the data of each Location ID
Step6: Making a request to Google Maps API
Step7: I love enumerate
Step8: Creates a Holebase importable csv file
|
423 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
reviews = pd.read_csv('reviews.txt', header=None)
labels = pd.read_csv('labels.txt', header=None)
from collections import Counter
total_counts = # bag of words here
print("Total words in data set: ", len(total_counts))
vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]
print(vocab[:60])
print(vocab[-1], ': ', total_counts[vocab[-1]])
word2idx = ## create the word-to-index dictionary here
def text_to_vector(text):
pass
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)
for ii, (_, text) in enumerate(reviews.iterrows()):
word_vectors[ii] = text_to_vector(text[0])
# Printing out the first 5 word vectors
word_vectors[:5, :23]
Y = (labels=='positive').astype(np.int_)
records = len(labels)
shuffle = np.arange(records)
np.random.shuffle(shuffle)
test_fraction = 0.9
train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):]
trainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split], 2)
testX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split], 2)
trainY
# Network building
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
#### Your code ####
model = tflearn.DNN(net)
return model
model = build_model()
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=10)
predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)
test_accuracy = np.mean(predictions == testY[:,0], axis=0)
print("Test accuracy: ", test_accuracy)
# Helper function that uses your model to predict sentiment
def test_sentence(sentence):
positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1]
print('Sentence: {}'.format(sentence))
print('P(positive) = {:.3f} :'.format(positive_prob),
'Positive' if positive_prob > 0.5 else 'Negative')
sentence = "Moonlight is by far the best movie of 2016."
test_sentence(sentence)
sentence = "It's amazing anyone could be talented enough to make something this spectacularly awful"
test_sentence(sentence)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preparing the data
Step2: Counting word frequency
Step3: Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort vocab by the count value and keep the 10000 most frequent words.
Step4: What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words.
Step5: The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words.
Step6: Text to vector function
Step7: If you do this right, the following code should return
Step8: Now, run through our entire review data set and convert each review to a word vector.
Step9: Train, Validation, Test sets
Step10: Building the network
Step11: Intializing the model
Step12: Training the network
Step13: Testing
Step14: Try out your own text!
|
424 | <ASSISTANT_TASK:>
Python Code:
%pylab inline
from scipy import linalg as la
def KDparams(F):
u, s, v = svd(F)
Rxy = s[0]/s[1]
Ryz = s[1]/s[2]
K = (Rxy-1)/(Ryz-1)
D = sqrt((Rxy-1)**2 + (Ryz-1)**2)
return K, D
yearsec = 365.25*24*3600
sr = 3e-15
times = linspace(0.00000001,10,20)
alphas = linspace(0,90,20)
time, alpha = meshgrid(times, alphas)
K = zeros_like(alpha)
D = zeros_like(alpha)
for (r,c) in np.ndindex(alpha.shape):
a = deg2rad(alpha[r,c])
t = time[r,c]*1e6*yearsec
edot = sr*sin(a)
gdot = sr*cos(a)
L = array([[0, gdot, 0], [0, -edot, 0],[0, 0, edot]])
F = la.expm(L*t)
K[r,c], D[r,c] = KDparams(F)
contourf(time, alpha, K, linspace(0, 1, 11))
colorbar()
contourf(time, alpha, D, linspace(0, 2.5, 11))
colorbar()
from IPython.core.display import HTML
def css_styling():
styles = open("./css/sg2.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we will examine strain evolution during transpression deformation. Transpression (Sanderson and Marchini, 1984) is considered as a wrench or transcurrent shear accompanied by horizontal shortening across, and vertical lengthening along, the shear plane.
Step2: Here we define some constants including bulk strain rate.
Step3: We define 2D arrays of angles and times to be examined...
Step4: and l;oop over to calculate symmetry and intensity for each combination
Step5: Now we can plot results.
|
425 | <ASSISTANT_TASK:>
Python Code:
import pprint
def primes():
generate successive prime numbers (trial by division)
candidate = 1
_primes_so_far = [2] # first prime, only even prime
yield _primes_so_far[0] # share it!
while True:
candidate += 2 # check odds only from now on
for prev in _primes_so_far:
if prev**2 > candidate:
yield candidate # new prime!
_primes_so_far.append(candidate)
break
if not divmod(candidate, prev)[1]: # no remainder!
break # done looping
p = primes() # generator function based iterator
pp = pprint.PrettyPrinter(width=40, compact=True)
pp.pprint([next(p) for _ in range(30)]) # next 30 primes please!
def gcd(a, b):
while b:
a, b = b, a % b
return a
print(gcd(81, 18))
print(gcd(12, 44))
print(gcd(117, 17)) # strangers
print(81 % 18) # 18 goes into
print(18 % 9) # so the new b becomes the answer
def totatives(N):
# list comprehension!
return [x for x in range(1,N) if gcd(x,N)==1] # strangers only
def T(N):
Returns the number of numbers between (1, N) that
have no factors in common with N: called the
'totient of N' (sometimes phi is used in the docs)
return len(totatives(N)) # how many strangers did we find?
print("Totient of 100:", T(100))
print("Totient of 1000:", T(1000))
def powers(N):
totient = T(N)
print("Totient of {}:".format(N), totient)
for t in totatives(N):
values = [pow(t, n, N) for n in range(totient + 1)]
cycle = values[:values.index(1, 1)] # first 1 after initial 1
print("{:>2}".format(len(cycle)), cycle)
powers(17)
from random import randint
def check(N):
totient = T(N)
for t in totatives(N):
n = randint(1, 10)
print(t, pow(t, (n * totient) + 1, N))
check(17)
p = 17
q = 23
T(p*q) == (p-1)*(q-1)
p = 37975227936943673922808872755445627854565536638199
q = 40094690950920881030683735292761468389214899724061
RSA_100 = p * q
totient = (p - 1) * (q - 1)
# https://en.wikibooks.org/wiki/
# Algorithm_Implementation/Mathematics/
# Extended_Euclidean_algorithm
def xgcd(b, n):
x0, x1, y0, y1 = 1, 0, 0, 1
while n != 0:
q, b, n = b // n, n, b % n
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return b, x0, y0
# x = mulinv(b) mod n, (x * b) % n == 1
def mulinv(b, n):
g, x, _ = xgcd(b, n)
if g == 1:
return x % n
e = 3
d = mulinv(e, totient)
print((e*d) % totient)
import binascii
m = int(binascii.hexlify(b"I'm a secret"), 16)
print(m) # decimal encoding of byte string
c = pow(m, e, RSA_100) # raise to eth power
print(c)
m = pow(c, d, RSA_100) # raise to dth power
print(m)
binascii.unhexlify(hex(m)[2:]) # m is back where we started.
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Silicon Forest Math Series<br/>Oregon Curriculum Network
Step2: The above algorithm is known as "trial by division".
Step3: How does Euclid's Method work? That's a great question and one your teacher should be able to explain. First see if you might figure it out for yourself...
Step5: Suppose we had asked for gcd(18, 81) instead? 18 is the remainder (no 81s go into it) whereas b was 81, so the while loop simply flips the two numbers around to give the example above.
Step6: Where to go next is in the direction of Euler's Theorem, a generalization of Fermat's Little Theorem. The built-in pow(m, n, N) function will raise m to the n modulo N in an efficient manner.
Step7: Above we see repeating cycles of numbers, with the length of the cycles all dividing 16, the totient of the prime number 17.
Step8: In public key cryptography, RSA in particular, a gigantic composite N is formed from two primes p and q.
Step9: From this totient, we'll be able to find pairs (e, d) such that (e * d) modulo T(N) == 1.
|
426 | <ASSISTANT_TASK:>
Python Code:
from torchvision import utils
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import torch, torch.nn as nn
import torch.nn.functional as F
from itertools import count
from IPython import display
import warnings
import time
plt.rcParams.update({'axes.titlesize': 'small'})
from sklearn.datasets import load_digits
#The following line fetches you two datasets: images, usable for autoencoder training and attributes.
#Those attributes will be required for the final part of the assignment (applying smiles), so please keep them in mind
from lfw_dataset import fetch_lfw_dataset
data,attrs = fetch_lfw_dataset(dimx=36, dimy=36)
#preprocess faces
data = np.float32(data).transpose([0,3,1,2]) / 255.
IMG_SHAPE = data.shape[1:]
#print random image
plt.imshow(data[np.random.randint(data.shape[0])].transpose([1,2,0]),
cmap="gray", interpolation="none")
use_cuda = torch.cuda.is_available()
print("Torch version:", torch.__version__)
if use_cuda:
print("Using GPU")
else:
print("Not using GPU")
def sample_noise_batch(batch_size):
noise = torch.randn(batch_size, CODE_SIZE)
#print(noise.shape)
return noise.cuda() if use_cuda else noise.cpu()
class Reshape(nn.Module):
def __init__(self, shape):
nn.Module.__init__(self)
self.shape=shape
def forward(self,input):
return input.view(self.shape)
def save_checkpoint(state, filename):
torch.save(state, filename)
CODE_SIZE = 256
# automatic layer name maker. Don't do this in production :)
ix = ('layer_%i'%i for i in count())
generator = nn.Sequential()
generator.add_module(next(ix), nn.Linear(CODE_SIZE, 10*8*8)) #output 10*8*8
generator.add_module(next(ix), nn.ELU())
generator.add_module(next(ix), Reshape([-1, 10, 8, 8])) #output 10x8x8
generator.add_module(next(ix), nn.ConvTranspose2d(10, 64, kernel_size=(5,5))) #output 64x12x12
generator.add_module(next(ix), nn.ELU())
generator.add_module(next(ix), nn.ConvTranspose2d(64, 64, kernel_size=(5,5))) #output 64x16x16
generator.add_module(next(ix), nn.ELU())
generator.add_module(next(ix), nn.Upsample(scale_factor=2)) #output 64x32x32
generator.add_module(next(ix), nn.ConvTranspose2d(64, 32, kernel_size=(5,5))) #output 32x36x36
generator.add_module(next(ix), nn.ELU())
generator.add_module(next(ix), nn.ConvTranspose2d(32, 32, kernel_size=(5,5))) #output 32x40x40
generator.add_module(next(ix), nn.ELU())
generator.add_module(next(ix), nn.Conv2d(32, 3, kernel_size=(5,5))) #output 3x36x36
#generator.add_module(next(ix), nn.Sigmoid())
if use_cuda: generator.cuda()
generated_data = generator(sample_noise_batch(5))
assert tuple(generated_data.shape)[1:] == IMG_SHAPE, \
"generator must output an image of shape %s, but instead it produces %s"%(IMG_SHAPE,generated_data.shape)
plt.figure(figsize=(16,10))
plt.axis('off')
plt.imshow(utils.make_grid(generated_data).cpu().detach().numpy().transpose((1,2,0)).clip(0,1)*10)
plt.show()
def sample_data_batch(batch_size):
idxs = np.random.choice(np.arange(data.shape[0]), size=batch_size)
batch = torch.tensor(data[idxs], dtype=torch.float32)
return batch.cuda() if use_cuda else batch.cpu()
# a special module that converts [batch, channel, w, h] to [batch, units]
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.shape[0], -1)
discriminator = nn.Sequential()
## Got mediocre result
### YOUR CODE - create convolutional architecture for discriminator
### Note: please start simple. A few convolutions & poolings would do, inception/resnet is an overkill
discriminator.add_module("conv1", nn.Conv2d(3, 32, 5)) #output 32x32x32
discriminator.add_module("elu1", nn.ELU())
#discriminator.add_module("pool2d", nn.MaxPool2d(2, stride=2)) #output 32x16x16
discriminator.add_module('avgpool1', nn.AdaptiveAvgPool2d((16,16)))
discriminator.add_module("conv2", nn.Conv2d(32, 64, 5)) #output 64x12x12
discriminator.add_module("elu2", nn.ELU())
discriminator.add_module("conv3", nn.Conv2d(64, 10, 5)) #output 10x8x8
discriminator.add_module("elu3", nn.ELU())
discriminator.add_module("reshape", Reshape([-1, 10*8*8]))
discriminator.add_module("linear1", nn.Linear(10*8*8, CODE_SIZE)) #output 256
discriminator.add_module("elu4", nn.ELU())
discriminator.add_module("linear1", nn.Linear(CODE_SIZE, 1))
if use_cuda: discriminator.cuda()
discriminator = nn.Sequential()
# Got bad results
### YOUR CODE - create convolutional architecture for discriminator
### Note: please start simple. A few convolutions & poolings would do, inception/resnet is an overkill
discriminator.add_module("conv1", nn.Conv2d(3, 32, 5)) #output 32x32x32
discriminator.add_module("lrelu1", nn.LeakyReLU(0.2))
discriminator.add_module("conv2", nn.Conv2d(32, 64, 3)) #output 64x30x30
discriminator.add_module("bn1", nn.BatchNorm2d(64))
discriminator.add_module("lrelu2", nn.LeakyReLU(0.2))
discriminator.add_module('avgpool1', nn.AdaptiveAvgPool2d((15,15)))
discriminator.add_module("conv3", nn.Conv2d(64, 128, 4)) #output 128x12x12
discriminator.add_module("bn2", nn.BatchNorm2d(128))
discriminator.add_module("lrelu3", nn.LeakyReLU(0.2))
discriminator.add_module('avgpool2', nn.AdaptiveAvgPool2d((6,6))) #output 128x6x6
discriminator.add_module("conv4", nn.Conv2d(128, 256, 4)) #output 256x3x3
discriminator.add_module("bn3", nn.BatchNorm2d(256))
discriminator.add_module("lrelu4", nn.LeakyReLU(0.2))
discriminator.add_module("reshape", Reshape([-1, 256*3*3]))
discriminator.add_module("linear1", nn.Linear(256*3*3, 1)) #output 256
if use_cuda: discriminator.cuda()
discriminator = nn.Sequential()
# Moreless fine
### YOUR CODE - create convolutional architecture for discriminator
### Note: please start simple. A few convolutions & poolings would do, inception/resnet is an overkill
discriminator.add_module("conv1", nn.Conv2d(3, 32, 5)) #output 32x32x32
discriminator.add_module("lrelu1", nn.LeakyReLU(0.2))
discriminator.add_module('avgpool1', nn.AdaptiveAvgPool2d((16,16))) #output 32x16x16
discriminator.add_module("conv2", nn.Conv2d(32, 64, 5, 1, 2)) #output 64x16x16
discriminator.add_module("bn1", nn.BatchNorm2d(64))
discriminator.add_module("lrelu2", nn.LeakyReLU(0.2))
discriminator.add_module('avgpool2', nn.AdaptiveAvgPool2d((8,8))) #output 64x8x8
discriminator.add_module("conv3", nn.Conv2d(64, 128, 5, 1, 2)) #output 128x8x8
discriminator.add_module("bn2", nn.BatchNorm2d(128))
discriminator.add_module("lrelu3", nn.LeakyReLU(0.2))
discriminator.add_module('avgpool2', nn.AdaptiveAvgPool2d((4,4))) #output 128x4x4
discriminator.add_module("conv4", nn.Dropout(0.5))
discriminator.add_module("reshape", Reshape([-1, 128*4*4]))
discriminator.add_module("linear1", nn.Linear(128*4*4, 1)) #output 1
if use_cuda: discriminator.cuda()
sample = sample_data_batch(5)
plt.figure(figsize=(16,10))
plt.axis('off')
plt.imshow(utils.make_grid(sample).cpu().detach().numpy().transpose((1,2,0)).clip(0,1))
plt.show()
discriminator(sample).shape
def generator_loss(noise):
1. generate data given noise
2. compute log P(real | gen noise)
3. return generator loss (should be scalar)
generated_data = generator(noise)
disc_on_generated_data = discriminator(generated_data)
logp_gen_is_real = F.logsigmoid(disc_on_generated_data)
loss = -1 * torch.mean(logp_gen_is_real)
return loss
loss = generator_loss(sample_noise_batch(32))
print(loss)
assert len(loss.shape) == 0, "loss must be scalar"
def discriminator_loss(real_data, generated_data):
1. compute discriminator's output on real & generated data
2. compute log-probabilities of real data being real, generated data being fake
3. return discriminator loss (scalar)
disc_on_real_data = discriminator(real_data)
disc_on_fake_data = discriminator(generated_data)
logp_real_is_real = F.logsigmoid(disc_on_real_data)
logp_gen_is_fake = F.logsigmoid(1 - disc_on_fake_data)
loss = -1 * torch.mean(logp_real_is_real + logp_gen_is_fake)
return loss
loss = discriminator_loss(sample_data_batch(32),
generator(sample_noise_batch(32)))
print(loss)
assert len(loss.shape) == 0, "loss must be scalar"
def sample_images(nrow, ncol, sharp=False):
with torch.no_grad():
images = generator(sample_noise_batch(batch_size=nrow*ncol))
images = images.data.cpu().numpy().transpose([0, 2, 3, 1])
if np.var(images)!=0:
images = images.clip(np.min(data),np.max(data))
for i in range(nrow*ncol):
plt.subplot(nrow,ncol,i+1)
plt.axis('off')
if sharp:
plt.imshow(images[i], cmap="gray", interpolation="none")
else:
plt.imshow(images[i], cmap="gray")
plt.show()
def sample_probas(batch_size):
plt.title('Generated vs real data')
D_real = F.sigmoid(discriminator(sample_data_batch(batch_size)))
generated_data_batch = generator(sample_noise_batch(batch_size))
D_fake = F.sigmoid(discriminator(generated_data_batch))
plt.hist(D_real.data.cpu().numpy(),
label='D(x)', alpha=0.5, range=[0,1])
plt.hist(D_fake.data.cpu().numpy(),
label='D(G(z))', alpha=0.5, range=[0,1])
plt.legend(loc='best')
plt.show()
#optimizers
disc_opt = torch.optim.SGD(discriminator.parameters(), weight_decay=1e-4, lr=5e-3)
gen_opt = torch.optim.Adam(generator.parameters(), lr=1e-4)
last_epoch = 0
WEIGHTS_PATH = './weights/dcgan.pth.tar'
if (torch.cuda.is_available()):
checkpoint = torch.load(f=WEIGHTS_PATH)
else:
net = nn.DataParallel(net)
checkpoint = torch.load(map_location='cpu', f=WEIGHTS_PATH)
generator.load_state_dict(checkpoint['gen_weights'])
discriminator.load_state_dict(checkpoint['disc_weights'])
last_epoch = checkpoint['last_epoch']
disc_opt.load_state_dict(checkpoint['disc_optim'])
gen_opt.load_state_dict(checkpoint['gen_optim'])
def gaussian(ins, mean=0, stddev=0.05):
noise = torch.autograd.Variable(ins.data.new(ins.size()).normal_(mean, stddev))
return ins + noise
warnings.simplefilter('ignore')
batch_size = 100
disc_loss = 0
gen_loss = 0
start = time.time()
for epoch in range(last_epoch, 50000):
# Train discriminator
for i in range(5):
real_data = sample_data_batch(batch_size)
fake_data = generator(sample_noise_batch(batch_size))
loss = discriminator_loss(gaussian(real_data), gaussian(fake_data))
disc_opt.zero_grad()
loss.backward()
disc_opt.step()
disc_loss = loss.item()
# Train generator
for j in range(1):
noise = sample_noise_batch(batch_size)
loss = generator_loss(noise)
gen_opt.zero_grad()
loss.backward()
gen_opt.step()
gen_loss = loss.item()
if epoch %100==0:
end = time.time()
display.clear_output(wait=True)
print("epoch %d, Generator loss %.7f, discriminator loss %.7f" % (epoch, gen_loss, disc_loss))
print("time taken (100 epochs) %.0f sec" % (end - start))
sample_images(2,3,True)
sample_probas(1000)
start = time.time()
last_epoch = epoch
print(epoch)
save_checkpoint({
'gen_weights': generator.state_dict(),
'disc_weights' : discriminator.state_dict(),
'gen_optim' : gen_opt.state_dict(),
'disc_optim' : disc_opt.state_dict(),
'last_epoch' : last_epoch
}, "./weights/dcgan.pth.tar")
plt.figure(figsize=[16, 24])
sample_images(16, 8)
# Note: a no-nonsense neural network should be able to produce reasonably good images after 15k iterations
# By "reasonably good" we mean "resembling a car crash victim" or better
num_images = len(data)
batch_size = 100
all_images = []
for batch_i in range(int((num_images - 1) / batch_size + 1)):
with torch.no_grad():
images = generator(sample_noise_batch(batch_size=batch_size))
images = images.data.cpu().numpy().transpose([0, 2, 3, 1])
if np.var(images)!=0:
images = images.clip(np.min(data), np.max(data))
all_images.append(images)
all_images = np.concatenate(all_images, axis=0)[:num_images]
np.savez("./gan.npz", Pictures=all_images)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generative adversarial nets 101
Step2: Discriminator
Step5: Training
Step6: Auxilary functions
Step7: Training
Step8: Evaluation
|
427 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib as plt
# draw plots in notebook
%matplotlib inline
# make plots SVG (higher quality)
%config InlineBackend.figure_format = 'svg'
# more time/compute intensive to parse dates. but we know we definitely have/need them
df = pd.read_csv('data/sf_listings.csv', parse_dates=['last_review'], infer_datetime_format=True)
df_reviews = pd.read_csv('data/reviews.csv', parse_dates=['date'], infer_datetime_format=True)
df_reviews.date[0]
df.head()
# display general diagnostic info
df.info()
df_reviews.head()
# index DataFrame on listing_id in order to join datasets
reindexed_df = df_reviews.set_index('listing_id')
reindexed_df.head()
# remember the original id in a column to group on
df['listing_id'] = df['id']
df_listing = df.set_index('id')
df_listing.head()
# join the listing information with the review information
review_timeseries = df_listing.join(reindexed_df)
print review_timeseries.columns
review_timeseries.head()
# nothing new/interesting here...
review_timeseries.groupby('listing_id').count()['name'].hist(bins=100, figsize=(12,6));
# causes python to crash, lets see if there is a better way
# review_timeseries.groupby(['neighbourhood','date']).count()
# lets try a pivot table...
reviews_over_time = pd.crosstab(review_timeseries.date, review_timeseries.neighbourhood)
reviews_over_time.head()
# let's look at some particular neighborhoods
neighborhoods = df.neighbourhood.unique()
print neighborhoods
# a little noisy
reviews_over_time[['Mission', 'South of Market', 'Noe Valley']].plot(figsize=(12,6))
# smooth by resampling by month
reviews_over_time.resample('M').mean()[['Mission', 'South of Market', 'Noe Valley']].plot(figsize=(12,6))
# Exercise 1 Solution
from sklearn import linear_model
features = df[['host_name', 'neighbourhood', 'room_type', 'minimum_nights','number_of_reviews', \
'calculated_host_listings_count', 'availability_365']]
labels = df['price']
# no price!
features.head()
# Categorical -> One Hot Encoding
# http://scikit-learn.org/stable/modules/preprocessing.html#preprocessing-categorical-features
dummies = pd.get_dummies(features)
# sklearn likes matrices
feature_matrix = dummies.as_matrix()
labels.as_matrix()
feature_matrix
# Initialize and Fit sklearn model
model = linear_model.LinearRegression()
clf = model.fit(feature_matrix, labels.as_matrix())
# How well did we do?
clf.score(feature_matrix, labels.as_matrix())
print "There are {0} features...".format(len(clf.coef_))
clf.coef_
# Remove the name column, we are probably overfitting...
no_name = features.copy()
no_name.pop('host_name')
no_names_feature_m = pd.get_dummies(no_name).as_matrix()
model = linear_model.LinearRegression(normalize=True)
clf = model.fit(no_names_feature_m, labels.as_matrix())
# Turns out the name feature is highly predictive...
# but not very useful: https://www.kaggle.com/wiki/Leakage
clf.score(no_names_feature_m, labels.as_matrix())
len(clf.coef_)
# We need more and better features
df2 = pd.read_csv('data/listings_full.csv')
df2.columns
df2.head()
# get a snapshot of some of the columns in the center of the matrix
df2.iloc[1:5, 40:60]
# optimistically lets just use a few key features to start. Remember Occam's razor..
select_features = df2[['host_has_profile_pic' ,'host_identity_verified', 'host_listings_count','host_response_time', 'host_acceptance_rate', 'host_is_superhost', 'transit', 'neighbourhood_cleansed','is_location_exact', 'property_type', 'room_type', 'accommodates','bathrooms','bedrooms','beds']]
select_features.head()
# moar feature engineering. fill in missing data since it wil break our model
select_features = select_features.fillna({'host_response_time': 'NA', 'host_acceptance_rate': '-1%'})
select_features.info()
# convert the percentage as a string into a float
select_features.host_acceptance_rate = select_features.host_acceptance_rate.str.strip('%').astype(float) / 100
# Binarize transit column... the listing is either near transit or it isn't
select_features.transit = select_features.transit.isnull()
select_features.transit
# One last fill incase we missed any nulls
dummies = pd.get_dummies(select_features).fillna(0)
feature_matrix = dummies.as_matrix()
# Price as a currency string -> price as a float
labels = df2.price.str.strip('$').str.replace(',', '').astype(float)
# initialize model again
model = linear_model.LinearRegression(normalize=True)
clf = model.fit(feature_matrix, labels)
# much better!
clf.score(feature_matrix, labels)
# a sweet spot in between over and under fitting
len(clf.coef_)
# Predict what we should price listing #1000 at given its features
clf.predict(feature_matrix[1100])
# Looks like it is overpriced...
df2.iloc[1100].price
# And it shows... there are only 2 reviews per month
df2.iloc[1100]
# Where the top listing have 10+ reviews per month
df2.sort_values('reviews_per_month', ascending=False).head()
# Zip together our column names with our beta coefficients
coefficients = zip(dummies.columns, clf.coef_)
# Most significant
sorted(coefficients, key=lambda coef: coef[1], reverse=True)[:10]
# Least significant
sorted(coefficients, key=lambda coef: coef[1])[:10]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Star Schema (facts vs. dimensions)
Step2:
Step3: Pandas Resample String convention
Step4: Correlation vs. Regression
Step5: R-squared
Step6: Making Predictions
|
428 | <ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings("ignore")
import os
import numpy as np
import xarray as xr
import dask
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
%matplotlib inline
import holoviews as hv
hv.notebook_extension("matplotlib")
from landlab import RasterModelGrid
from landlab.components import (
FlowAccumulator,
FastscapeEroder,
LinearDiffuser,
Lithology,
LithoLayers,
)
mg = RasterModelGrid((10, 15))
z = mg.add_zeros("topographic__elevation", at="node")
layer_elevations = 5.0 * np.arange(-10, 10)
# we create a bottom layer that is very thick.
layer_elevations[-1] = layer_elevations[-2] + 100
layer_ids = np.tile([0, 1, 2, 3], 5)
attrs = {"K_sp": {0: 0.0003, 1: 0.0001, 2: 0.0002, 3: 0.0004}}
func = lambda x, y: x + (2.0 * y)
lith = LithoLayers(mg, layer_elevations, layer_ids, function=func, attrs=attrs)
mg.imshow("rock_type__id", cmap="viridis")
z -= 1.0
dz_ad = 0.0
lith.dz_advection = dz_ad
lith.run_one_step()
mg.imshow("rock_type__id", cmap="viridis")
z += 1.0
dz_ad = 0.0
lith.dz_advection = dz_ad
lith.rock_id = 0
lith.run_one_step()
mg.imshow("rock_type__id", cmap="viridis", vmin=0, vmax=3)
z += 2.0
dz_ad = 0.0
spatially_variable_rock_id = mg.ones("node")
spatially_variable_rock_id[mg.x_of_node > 6] = 2
lith.dz_advection = dz_ad
lith.rock_id = spatially_variable_rock_id
lith.run_one_step()
mg.imshow("rock_type__id", cmap="viridis", vmin=0, vmax=3)
ds = lith.rock_cube_to_xarray(np.arange(30))
hvds_rock = hv.Dataset(ds.rock_type__id)
%opts Image style(cmap='viridis') plot[colorbar=True]
hvds_rock.to(hv.Image, ["x", "y"])
%opts Image style(cmap='viridis') plot[colorbar=True, invert_yaxis=True]
hvds_rock.to(hv.Image, ["x", "z"])
# Parameters that control the size and shape of the model grid
number_of_rows = 50
number_of_columns = 50
dx = 1
# Parameters that control the LithoLayers
# the layer shape function
func = lambda x, y: (0.5 * x) ** 2 + (0.5 * y) ** 2
# the layer thicknesses
layer_thickness = 50.0
# the location of the anchor point
x0 = 25
y0 = 25
# the resolution at which you sample to create the plan view and cros-section view figures.
sample_depths = np.arange(0, 30, 1)
# create the model grid
mg = RasterModelGrid((number_of_rows, number_of_columns), dx)
z = mg.add_zeros("topographic__elevation", at="node")
# set up LithoLayers inputs
layer_ids = np.tile([0, 1, 2, 3], 5)
layer_elevations = layer_thickness * np.arange(-10, 10)
layer_elevations[-1] = layer_elevations[-2] + 100
attrs = {"K_sp": {0: 0.0003, 1: 0.0001, 2: 0.0002, 3: 0.0004}}
# create LithoLayers
lith = LithoLayers(
mg, layer_elevations, layer_ids, x0=x0, y0=y0, function=func, attrs=attrs
)
# deposity and erode
dz_ad = 0.0
z -= 1.0
lith.dz_advection = dz_ad
lith.run_one_step()
z += 1.0
lith.dz_advection = dz_ad
lith.rock_id = 0
lith.run_one_step()
z += 2.0
spatially_variable_rock_id = mg.ones("node")
spatially_variable_rock_id[mg.x_of_node > 6] = 2
lith.dz_advection = dz_ad
lith.rock_id = spatially_variable_rock_id
lith.run_one_step()
# get the rock-cube data structure and plot
ds = lith.rock_cube_to_xarray(sample_depths)
hvds_rock = hv.Dataset(ds.rock_type__id)
# make a plan view image
%opts Image style(cmap='viridis') plot[colorbar=True]
hvds_rock.to(hv.Image, ["x", "y"])
%opts Image style(cmap='viridis') plot[colorbar=True, invert_yaxis=True]
hvds_rock.to(hv.Image, ["x", "z"])
mg = RasterModelGrid((50, 30), 400)
z = mg.add_zeros("topographic__elevation", at="node")
random_field = 0.01 * np.random.randn(mg.size("node"))
z += random_field - random_field.min()
attrs = {"K_sp": {0: 0.0003, 1: 0.0001}}
z0s = 50 * np.arange(-20, 20)
z0s[-1] = z0s[-2] + 10000
ids = np.tile([0, 1], 20)
# Anticline
anticline_func = lambda x, y: ((0.002 * x) ** 2 + (0.001 * y) ** 2)
# Shallow dips
shallow_func = lambda x, y: ((0.001 * x) + (0.003 * y))
# Steeper dips
steep_func = lambda x, y: ((0.01 * x) + (0.01 * y))
# Anticline
lith = LithoLayers(
mg, z0s, ids, x0=6000, y0=10000, function=anticline_func, attrs=attrs
)
# Shallow dips
# lith = LithoLayers(mg, z0s, ids, function=shallow_func, attrs=attrs)
# Steeper dips
# lith = LithoLayers(mg, z0s, ids, function=steep_func, attrs=attrs)
mg.imshow("K_sp")
nts = 300
U = 0.001
dt = 1000
fa = FlowAccumulator(mg)
sp = FastscapeEroder(mg, K_sp="K_sp")
ds = xr.Dataset(
data_vars={
"topographic__elevation": (
("time", "y", "x"), # tuple of dimensions
np.empty((nts, mg.shape[0], mg.shape[1])), # n-d array of data
{
"units": "meters", # dictionary with data attributes
"long_name": "Topographic Elevation",
},
),
"rock_type__id": (
("time", "y", "x"),
np.empty((nts, mg.shape[0], mg.shape[1])),
{"units": "-", "long_name": "Rock Type ID Code"},
),
},
coords={
"x": (
("x"), # tuple of dimensions
mg.x_of_node.reshape(mg.shape)[0, :], # 1-d array of coordinate data
{"units": "meters"},
), # dictionary with data attributes
"y": (("y"), mg.y_of_node.reshape(mg.shape)[:, 1], {"units": "meters"}),
"time": (
("time"),
dt * np.arange(nts) / 1e6,
{"units": "millions of years since model start", "standard_name": "time"},
),
},
)
print(ds)
ds.topographic__elevation
out_fields = ["topographic__elevation", "rock_type__id"]
for i in range(nts):
fa.run_one_step()
sp.run_one_step(dt=dt)
dz_ad = np.zeros(mg.size("node"))
dz_ad[mg.core_nodes] = U * dt
z += dz_ad
lith.dz_advection = dz_ad
lith.run_one_step()
for of in out_fields:
ds[of][i, :, :] = mg["node"][of].reshape(mg.shape)
mg.imshow("topographic__elevation", cmap="viridis")
hvds_topo = hv.Dataset(ds.topographic__elevation)
hvds_rock = hv.Dataset(ds.rock_type__id)
hvds_topo
%opts Image style(interpolation='bilinear', cmap='viridis') plot[colorbar=True]
topo = hvds_topo.to(hv.Image, ["x", "y"])
rock = hvds_rock.to(hv.Image, ["x", "y"])
topo + rock
mg2 = RasterModelGrid((30, 30), 200)
mg2.set_closed_boundaries_at_grid_edges(False, True, False, True)
z2 = mg2.add_zeros("topographic__elevation", at="node")
random_field = 0.01 * np.random.randn(mg2.size("node"))
z2 += random_field - random_field.min()
thicknesses2 = [10000]
ids2 = [0]
attrs2 = {"K_sp": {0: 0.0001, 1: 0.00001}, "D": {0: 0.4, 1: 0.001}}
lith2 = Lithology(mg2, thicknesses2, ids2, attrs=attrs2)
nts = 500
U = 0.005
dt = 1000
fa2 = FlowAccumulator(mg2)
sp2 = FastscapeEroder(mg2, K_sp="K_sp")
ld2 = LinearDiffuser(mg2, linear_diffusivity="D")
out_fields = ["topographic__elevation", "rock_type__id"]
out_fields = ["topographic__elevation", "rock_type__id"]
nts = 200
U = 0.001
dt = 1000
ds2 = xr.Dataset(
data_vars={
"topographic__elevation": (
("time", "y", "x"),
np.empty((nts, mg2.shape[0], mg2.shape[1])),
{"units": "meters", "long_name": "Topographic Elevation"},
),
"rock_type__id": (
("time", "y", "x"),
np.empty((nts, mg2.shape[0], mg2.shape[1])),
{"units": "-", "long_name": "Rock Type ID Code"},
),
},
coords={
"x": (("x"), mg2.x_of_node.reshape(mg2.shape)[0, :], {"units": "meters"}),
"y": (("y"), mg2.y_of_node.reshape(mg2.shape)[:, 1], {"units": "meters"}),
"time": (
("time"),
dt * np.arange(nts) / 1e6,
{"units": "millions of years since model start", "standard_name": "time"},
),
},
)
half_nts = int(nts / 2)
dz_ad2 = np.zeros(mg2.size("node"))
dz_ad2[mg2.core_nodes] = U * dt
lith2.dz_advection = dz_ad2
lith2.rock_id = 0
for i in range(half_nts):
fa2.run_one_step()
sp2.run_one_step(dt=dt)
ld2.run_one_step(dt=dt)
z2 += dz_ad2
lith2.run_one_step()
for of in out_fields:
ds2[of][i, :, :] = mg2["node"][of].reshape(mg2.shape)
mg2.imshow("topographic__elevation", cmap="viridis")
volcanic_deposits = np.zeros(mg2.size("node"))
da_big_enough = mg2["node"]["drainage_area"] > 5e4
topo_difference_from_top = (
mg2["node"]["topographic__elevation"].max() - mg2["node"]["topographic__elevation"]
)
volcanic_deposits[da_big_enough] = 0.25 * topo_difference_from_top[da_big_enough]
volcanic_deposits[mg2.boundary_nodes] = 0.0
z2 += volcanic_deposits
lith2.rock_id = 1
lith2.run_one_step()
mg2.imshow(volcanic_deposits)
for i in range(half_nts, nts):
fa2.run_one_step()
sp2.run_one_step(dt=dt)
ld2.run_one_step(dt=dt)
dz_ad2 = np.zeros(mg2.size("node"))
dz_ad2[mg2.core_nodes] = U * dt
z2 += dz_ad2
lith2.dz_advection = dz_ad2
lith2.rock_id = 0
lith2.run_one_step()
for of in out_fields:
ds2[of][i, :, :] = mg2["node"][of].reshape(mg2.shape)
mg2.imshow("topographic__elevation", cmap="viridis")
hvds_topo2 = hv.Dataset(ds2.topographic__elevation)
hvds_rock2 = hv.Dataset(ds2.rock_type__id)
%opts Image style(interpolation='bilinear', cmap='viridis') plot[colorbar=True]
topo2 = hvds_topo2.to(hv.Image, ["x", "y"])
rock2 = hvds_rock2.to(hv.Image, ["x", "y"])
topo2 + rock2
# if you wanted to output to visualize in something like ParaView, the following commands can be used
# ds.to_netcdf('anticline.nc')
# ds2.to_netcdf('inversion.nc')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 1
Step2: Next we make our layer elevations. We will make 20 layers that are 5 meters thick. Note that here, as with most Landlab components, there are no default units. At the anchor point, half of the layers will be above the ground ('layer_elevations' will have negative values) and half will be below the ground ('layer_elevations' have positive values).
Step3: Next we create an array that represents our rock type ID values. We will create alternating layers of four types of rock by making an array with alternating 0s 1s 2s and 3s with the np.tile function.
Step4: Our dictionary containing rock property attributes has the following form
Step5: 'K_sp' is the property that we want to track through the layered rock, 0, 1, 2, 3 are the rock type IDs, and 0.0003 and 0.0001 are the values for 'K_sp' for the rock types 0 and 1.
Step6: Finally we construct our LithoLayers component by passing the correct arguments.
Step7: LithoLayers will make sure that the model grid has at-node grid fields with the layer attribute names. In this case, this means that the model grid will now include a grid field called 'K_sp' and a field called 'rock_type__id'. We can plot these with the Landlab imshow function.
Step8: As you can see, we have layers that strike East-South-East. Since we can only see the surface expression of the layers, we can't infer the dip direction or magnitude from the plot alone.
Step9: We can re-plot the value of 'K_sp'. We will see that the location of the surface expression of the rock layers has changed. As we expect, the location has changed in a way that is consistent with layers dipping to the NNE.
Step10: Anytime material is added, LithoLayers or Lithology needs to know the type of rock that has been added. LithoLayers and Lithology do not assume to know the correct rock type ID and thus require that the user specify it with the rock_id keyword argument. In the run_one_step function, both components will check to see if any deposition has occured. If deposition occurs and this argument is not passed, then an error will be raised.
Step11: When we plot the value of the rock type ID at the surface, we find that it is now all purple, the color of rock type zero.
Step12: The value passed to the rock_id keyword argument can be either a single value (as in the second to last example) or an array of length number-of-nodes. This option permits a user to indicate that more than one type of rock is deposited in a single time step.
Step13: As you can see this results in the value of rock type at the surface being about half rock type 1 and about half rock type 2. Next we will create an xarray dataset that has 3D information about our Lithology to help visualize the layers in space. We will use the rock_cube_to_xarray method of the LithoLayers component.
Step14: The slider allows us to change the depth below the topographic surface.
Step15: Hopefuly this gives you a sense of how LithoLayers works. The next two blocks of code have all the steps we just worked through in one place.
Step16: You can also make a cross section of this new LithoLayers component.
Step17: Part 2
Step18: Next we set all the parameters for LithoLayers. Here we have two types of rock with different erodabilities.
Step19: There are three functional forms that you can choose between. Here we define each of them.
Step20: The default option is to make an anticline, but you can comment/uncomment lines to choose a different functional form.
Step21: Now that we've created LithoLayers, model grid fields for each of the LithoLayers attributes exist and have been set to the values of the rock exposed at the surface.
Step22: As you can see (in the default anticline option) we have concentric elipses of stronger and weaker rock.
Step23: Before we run the model we will also instatiate an xarray dataset used to store the output of our model through time for visualization.
Step24: We can print the data set to get some basic information about it.
Step25: We can also print a single variable to get more detailed information about it.
Step26: Next, we run the model. In each time step we first run the FlowAccumulator to direct flow and accumulatate drainage area. Then the FastscapeEroder erodes the topography based on the stream power equation using the erodability value in the field 'K_sp'. We create an uplift field that uplifts only the model grid's core nodes. After uplifting these core nodes, we update LithoLayers. Importantly, we must tell the LithoLayers how it has been advected upward by uplift using the dz_advection keyword argument.
Step27: Now that the model has run, lets start by plotting the resulting topography.
Step28: The layers of rock clearly influence the form of topography.
Step29: Next we specify that we want two images, one showing rock type and one showing topographic elevation. A slider bar shows us model time in millions of years.
Step30: We can see the form of the anticline advecting through the topography. Cool!
Step31: After the first half of run time, let's look at the topography.
Step32: We can see that we have developed ridges and valleys as we'd expect from a model with stream power erosion and linear diffusion.
Step33: We should expect that the locations of our valleys and ridges change as the river system encouters the much stronger volcanic rock.
Step34: Now that the model has run, let's plot the final elevation
Step35: And now a HoloView Plot that lets us explore the time evolution of the topography
|
429 | <ASSISTANT_TASK:>
Python Code:
%run "../Functions/1. Game sessions.ipynb"
import unidecode
accented_string = "Enormément"
# accented_string is of type 'unicode'
unaccented_string = unidecode.unidecode(accented_string)
unaccented_string
# unaccented_string contains 'Malaga'and is of type 'str'
_rmDF = rmdf1522
userId = '8829514a-cb9f-47fb-aaeb-3167776f1062'
#userId = getRandomRedMetricsGUID(_rmDF)
#def getUserSessions( _rmDF, userId):
result = _rmDF.loc[:,['userId','sessionId']][_rmDF['userId']==userId]['sessionId'].drop_duplicates().dropna(how='any')
result
_sessionIndex = randint(0,len(result)-1)
_guid = result.iloc[_sessionIndex]
_guid
userId
from random import randint
uniqueUsers = rmdf1522['userId'].dropna().unique()
userCount = len(uniqueUsers)
testlocalplayerguid = '0'
while (not isGUIDFormat(testlocalplayerguid)):
userIndex = randint(0,userCount-1)
testlocalplayerguid = uniqueUsers[userIndex]
testlocalplayerguid
sessionscount = rmdf1522["sessionId"].nunique()
sessionscount
platforms = rmdf1522["customData.platform"].unique()
platforms
testGUID = '"4dbc2f43-421c-4e23-85d4-f17723ff8c66"'
# includewithoutusers=True will count sessions that do not have any userId attached
getSessionsCount( rmdf1522, testGUID)
sessionsList = getUserSessions(rmdf1522, testGUID)
sessionsList
sessionsList = rmdf1522[rmdf1522['type']=='start']
sessionsList = sessionsList.drop('type', 1)
sessionsList = sessionsList.dropna(how='any')
userSessionsList = sessionsList[sessionsList['userId']==testGUID]
userSessionsList
#print(testGUID)
sessionsList = getUserSessions(rmdf1522, testGUID)
#sessionsList = getAllSessions(rmdf1522, testGUID.replace('"',''))
#print(type(sessionsList))
sessionsList.shape[0]
allSessions = rmdf1522.loc[:,['userId', 'sessionId']].drop_duplicates()
allSessions.head()
allSessions.groupby('userId').size().reset_index(name='counts').sort_values(by='counts', ascending=False).head(10)
#getUserSessionsCounts(getNormalizedRedMetricsCSV(part152)).head(10)
allSessions.groupby('userId').agg(['count']).head() #.sort_values(by='sessionId', ascending=False).head(10)
#df2 = pd.concat([df151, rmdf1522])
#df2.head(2)
#print(df2.columns)
#df2columns = df2.columns.values
#type(df2columns)
#df2columns
#newColumns = np.concatenate((minimalInitializationColumns, df2columns))
#newColumns
#df2 = getNormalizedRedMetricsCSV(df)
getRandomSessionGUID()
_userId = '"e8fed737-7c65-49c8-bf84-f8ae71c094f8"'
type(rmdf1522['userId'].dropna().unique()), type(getUserSessions( rmdf1522, _userId ))
_userId = 'e8fed737-7c65-49c8-bf84-f8ae71c094f8'
_uniqueSessions = getUserSessions( rmdf1522, _userId )
len(_uniqueSessions)
_uniqueSessions
#_userId = ''
_userId = '"e8fed737-7c65-49c8-bf84-f8ae71c094f8"'
#def getRandomSessionGUID( _userId = '' ):
rmId = _userId
if( not(isGUIDFormat(_userId))):
rmId = getRandomRedMetricsGUID()
_uniqueSessions = getUserSessions( rmdf1522, rmId )
_sessionsCount = len(_uniqueSessions)
_guid = ""
_sessionIndex = randint(0,_sessionsCount-1)
_guid = _uniqueSessions.iloc[_sessionIndex]
_guid
rmId
_sessionIndex
_sessionsCount
randint(0,0)
_uniqueSessions
getRandomSessionGUID()
userId = testGUID
userId = getRandomRedMetricsGUID()
#print('----------------------uid='+str(uid)+'----------------------')
sessions = getUserSessions(rmdf1522, userId)
firstGameTime = pd.to_datetime('2050-12-31T12:59:59.000Z', utc=True)
for session in sessions:
#print('-----------------------------------------session='+str(session))
timedEvents = rmdf1522[rmdf1522['sessionId']==session]
timedEvents = timedEvents.dropna(subset=['section'])
if(len(timedEvents) > 0):
timedEvents['userTime'] = timedEvents['userTime'].map(lambda t: pd.to_datetime(t, utc=True))
earliest = timedEvents['userTime'].min()
if(earliest < firstGameTime):
firstGameTime = earliest
#else:
#print('no event with section')
#print('-----------------------------------------')
print("firstGameTime=" + str(firstGameTime))
rmdf1522[rmdf1522['userId']==userId]
sessions = getUserSessions(rmdf1522, userId)
sessions
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tests
Step2: getUserSessions tinkering
Step3: getTranslatedForm tinkering - from 0.4 GF correct answers
Step4: print("part100="+str(part100.head(1)))
Step5: print("part100="+str(part100.columns))
Step6: getRandomSessionGUID tinkering
Step7: getFirstEventDate tinkering
|
430 | <ASSISTANT_TASK:>
Python Code:
with open('data/inflammation-01.csv', 'r') as f:
snippet = f.readlines()[:3]
print(*snippet)
import numpy as np
data = np.loadtxt(fname='data/inflammation-01.csv', delimiter=',') # Comma-separated...
print(data)
type(data)
data.shape
", ".join(dir(data))
print(data * 2)
data[0:3, 0:3]
data[:3,:3]
# Use this cell!
data.mean(), data.max(), data.min()
data[3].max() # Max inflammation for 4th patient
data[:,1].max() # Max infl for 2nd day
data.max(axis=0)
data.max(axis=0).shape
# Use this cell!
# Switch on the joy
% matplotlib inline
import matplotlib
pretty_pic = matplotlib.pyplot.imshow(data)
matplotlib.pyplot.show(pretty_pic)
# Use this cell!
import numpy as np
from matplotlib import pyplot
data = np.loadtxt(fname='data/inflammation-01.csv', delimiter=',')
pyplot.figure(figsize=(5.0, 3.0))
pyplot.xlabel('Day')
pyplot.ylabel('Inflammation')
pyplot.plot(data.mean(axis=0), label='Average')
pyplot.plot(data.max(axis=0), label='Max')
pyplot.plot(data.min(axis=0), label='Min')
pyplot.legend()
pyplot.show()
exec(In[42]) # Cheeky way to reshow output... forget immediately.
# This is the whole figure, possibly
# with multiple subplots
fig = pyplot.figure(figsize=(5.0, 3.0))
# This is a specific set of axes
axes = fig.add_subplot(1, 1, 1)
axes.set_xlabel('Day')
axes.set_ylabel('Inflammation')
axes.plot(data.mean(axis=0), label='Average')
axes.plot(data.max(axis=0), label='Max')
axes.plot(data.min(axis=0), label='Min')
axes.legend()
pyplot.show()
fig = pyplot.figure(figsize=(10.0, 3.0))
axes = [] # Blank list
for i in range(1, 4):
ax = fig.add_subplot(1, 3, i)
ax.set_xlabel('Day')
axes.append(ax)
axes[0].set_ylabel('Average')
axes[1].set_ylabel('Max')
axes[2].set_ylabel('Min')
axes[0].plot(data.mean(axis=0))
axes[1].plot(data.max(axis=0))
axes[2].plot(data.min(axis=0))
fig.tight_layout()
pyplot.show(fig)
import bokeh.plotting as bplot
from bokeh.io import output_notebook
output_notebook()
fig = bplot.figure()
days = np.arange(data.shape[1])
fig.line(days, data.mean(axis=0))
fig.xaxis.axis_label = "Day"
bplot.show(fig)
# Here's cell for you...
# RMV
# This is the whole figure, possibly
# with multiple subplots
fig = bplot.figure()
days = np.arange(data.shape[1])
# This is a specific set of axes
fig.xaxis.axis_label = 'Day'
fig.yaxis.axis_label = 'Inflammation'
fig.line(days, data.mean(axis=0), legend='Average', color='green')
fig.line(days, data.max(axis=0), legend='Max', color='blue')
fig.line(days, data.min(axis=0), legend='Min', color='red')
bplot.show(fig)
from bokeh.models import ColumnDataSource
from bokeh.io import push_notebook
# Start out with days vs average
initial_coordinates = {'x': days, 'y': data.mean(axis=0)}
source = ColumnDataSource(initial_coordinates)
# Define a callback to update the plot when we
# pick something else
def update_plot_statistic(statistic):
if statistic == "Average":
source.data['y'] = data.mean(axis=0)
elif statistic == "Max":
source.data['y'] = data.max(axis=0)
elif statistic == "Min":
source.data['y'] = data.min(axis=0)
push_notebook()
fig = bplot.figure()
days = np.arange(data.shape[1])
fig.xaxis.axis_label = 'Day'
fig.yaxis.axis_label = 'Inflammation'
fig.line(initial_coordinates['x'], initial_coordinates['y'], source=source)
bplot.show(fig)
from ipywidgets import interact
interact(update_plot_statistic, statistic=["Average", "Max", "Min"])
# Use this cell for the plot
# And this for the one line `interact` call afterwards
# RMV
# Start out with days vs average
initial_coordinates = {'x': days, 'y': data.mean(axis=0)}
source = ColumnDataSource(initial_coordinates)
# Define a callback to update the plot when we
# pick something else
def update_plot(N):
source.data['x'] = np.arange(N)
push_notebook()
fig = bplot.figure()
days = np.arange(data.shape[1])
fig.xaxis.axis_label = 'Day'
fig.yaxis.axis_label = 'Inflammation'
fig.line(initial_coordinates['x'], initial_coordinates['y'], source=source)
bplot.show(fig)
interact(update_plot, N=(1, 60, 1))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This construct, a with statement, addresses the age-old problem of cleaning up file descriptors. In general, a with context expects the object being used to have some open and close routines that will be called at the entrance and exit of the block, respectively. Note that we don't have scoping here - the snippet variable exists outside the block, making it easy to load data in a short block and handle it later.
Step2: While this has kindly been rendered for us as if it were a list of lists, in reality it is something much more useful...
Step3: This type is numpy's N-dimensional matrix class. This means we get a wide swathe of attributes and methods applicable to matrices, and a whole load of functions inside numpy and other libraries that will happily take it as an argument. One particularly useful property is shape
Step4: This tells us that there are 60 patients (rows), each with 40 days of inflammation measurements (columns).
Step5: A lot of the magic methods here, those with double underscores on either side, allow Python to treat this object more like a built-in. For instance, the __neg__ method will get called when we enter -data. If we try data1 < data2, then data1.__lt__ will be called with data2 as an argument, expecting a boolean return. The behaviour when the user writes data.previously_unknown_attr is defined by the __getattr__ method - this need not be an error. In some ways, you can think of this as an expansion of the concept of operator overloading.
Step6: This works because numpy has overridden the __mul__ method.
Step7: ...or more succinctly...
Step8: A few more practical uses of nparray
Step9: This could also be written data[3,
Step10: Suppose we want the max for every day...
Step11: As you can see, the second day does indeed have max of 1.0. Notice that it returns an array (this is a superclass of ndarray), so we can do...
Step12: This shouldn't be surprising - we take the average over all patients for each of 40 days, so we end up with a 40 element array.
Step13: RIGHT
Step14: Now for inline plotting we can use the matplotlib module, the go-to for Python plotting (but not in the Dijkstra sense)...
Step15: MMMM. Pretty. Red shows greater inflammation, and what we may have guessed from the bits of CSV we saw, inflammation starts low, grows and reduces. Patients along the side, days along the bottom.
Step16: Reproducible Plots
Step17: Now if I want to modify some part... I can do so and have the updated plot straight-away
Step18: Not particularly more complicated, but more flexible... say we have a series of plots...
Step19: Challenge
Step20: If this works, you should see an icon like a rainbow-coloured camera shutter.
Step21: Notice the syntax is quite similar to matplotlib but a little different, so make sure you don't get them mixed up. For example, plot has become specifically line and we now explicitly provide the days along the bottom. Note that numpy.arange is just like range but it returns a numpy array.
Step22: Don't forget to put up your star!
Step23: Interactive plots
Step24: First off, note that we do not have to tell the function to import the global variable, we can just use it. Now, examine the function - this is callback that we will use to update the plot when we change a drop-down menu choice of function.
Step25: Now we need a widget to use this...
Step26: This a very basic approach - the widget tool guesses you want a drop-down because you pass a list. Alternatively, you could pass a tuple with two floats as limits and get a slider back.
Step27: Don't forget your star!
|
431 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from parcels import FieldSet, ParticleSet, JITParticle, AdvectionRK4
from datetime import timedelta, datetime
filenames = {'U': "GlobCurrent_example_data/20*.nc",
'V': "GlobCurrent_example_data/20*.nc"}
variables = {'U': 'eastward_eulerian_current_velocity',
'V': 'northward_eulerian_current_velocity'}
dimensions = {'lat': 'lat',
'lon': 'lon',
'time': 'time'}
fieldset = FieldSet.from_netcdf(filenames, variables, dimensions)
pset = ParticleSet.from_line(fieldset=fieldset, size=5, pclass=JITParticle,
start=(31, -31), finish=(32, -31), time=datetime(2002, 1, 1))
pset.show()
pset.execute(AdvectionRK4, runtime=timedelta(days=7), dt=timedelta(minutes=5))
pset.show()
pset.show(land=False)
pset.show(domain={'N':-31, 'S':-35, 'E':33, 'W':26})
pset.show(field=fieldset.U)
pset.show(field=fieldset.U, show_time=datetime(2002, 1, 10, 2))
pset.show(field=fieldset.U, show_time=datetime(2002, 1, 10, 2), with_particles=False)
pset.show(field='vector')
pset.show(field='vector', vmax=3.0, domain={'N':-31, 'S':-39, 'E':33, 'W':18})
try: # Within a try/pass for unit testing on machines without cartopy installed
import cartopy
pset.show(field='vector', vmax=3.0, domain={'N':-31, 'S':-39, 'E':33, 'W':18}, projection=cartopy.crs.Robinson())
except:
pass
pset.show(field='vector', vmax=3.0, domain={'N':-31, 'S':-39, 'E':33, 'W':18}, land=True, savefile='particles')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We then instatiate a FieldSet with the velocity field data from GlobCurrent dataset.
Step2: Next, we instantiate a ParticeSet composed of JITParticles
Step3: Given this ParticleSet, we can now explore the different features of the show() method. To start, let's simply call show() with no arguments.
Step4: Then, let's advect the particles starting on January 1, 2002 for a week.
Step5: If we call show() again, we will see that the particles have been advected
Step6: To plot without the continents on the same plot, add land=False.
Step7: To set the domain of the plot, we specify the domain argument. The format domain expects a dictionary with entries {'S', 'N', 'E', 'W'} for South, North, East and West extent, respectively. Note that the plotted domain is found by interpolating the user-specified domain onto the velocity grid. For instance,
Step8: We can also easily display a scalar contour plot of a single component of the velocity vector field. This is done by setting the field argument equal to the desired scalar velocity field.
Step9: To plot the scalar U velocity field at a different date and time, we set the argument show_time equal to a datetime or timedelta object or simply the number of seconds since the time origin. For instance, let's view the U field on January, 10, 2002 at 2 PM.
Step10: Note that the particle locations do not change, but remain at the locations corresponding to the end of the last integration. To remove them from the plot, we set the argument with_particles equal to False.
Step11: By setting the field argument equal to vector, we can display the velocity in full vector form.
Step12: The normalized vector field is colored by speed. To control the maximum speed value on the colorbar, set the vmax argument equal to the desired value.
Step13: We can change the projection of the plot by providing one of the projections from cartopy. For example, to plot on a Robinson projection , we use projection=cartopy.crs.Robinson(). Note that not all projections support gridlines, so these may not be shown.
Step14: If we want to save the file rather than show it, we set the argument savefile equal to the 'path/to/save/file'.
|
432 | <ASSISTANT_TASK:>
Python Code:
# Examples are given for numpy. This code also setups ipython/jupyter
# so that numpy arrays in the output are displayed as images
import numpy
from utils import display_np_arrays_as_images
display_np_arrays_as_images()
ims = numpy.load('./resources/test_images.npy', allow_pickle=False)
# There are 6 images of shape 96x96 with 3 color channels packed into tensor
print(ims.shape, ims.dtype)
# display the first image (whole 4d tensor can't be rendered)
ims[0]
# second image in a batch
ims[1]
# we'll use three operations
from einops import rearrange, reduce, repeat
# rearrange, as its name suggests, rearranges elements
# below we swapped height and width.
# In other words, transposed first two axes (dimensions)
rearrange(ims[0], 'h w c -> w h c')
# einops allows seamlessly composing batch and height to a new height dimension
# We just rendered all images by collapsing to 3d tensor!
rearrange(ims, 'b h w c -> (b h) w c')
# or compose a new dimension of batch and width
rearrange(ims, 'b h w c -> h (b w) c')
# resulting dimensions are computed very simply
# length of newly composed axis is a product of components
# [6, 96, 96, 3] -> [96, (6 * 96), 3]
rearrange(ims, 'b h w c -> h (b w) c').shape
# we can compose more than two axes.
# let's flatten 4d array into 1d, resulting array has as many elements as the original
rearrange(ims, 'b h w c -> (b h w c)').shape
# decomposition is the inverse process - represent an axis as a combination of new axes
# several decompositions possible, so b1=2 is to decompose 6 to b1=2 and b2=3
rearrange(ims, '(b1 b2) h w c -> b1 b2 h w c ', b1=2).shape
# finally, combine composition and decomposition:
rearrange(ims, '(b1 b2) h w c -> (b1 h) (b2 w) c ', b1=2)
# slightly different composition: b1 is merged with width, b2 with height
# ... so letters are ordered by w then by h
rearrange(ims, '(b1 b2) h w c -> (b2 h) (b1 w) c ', b1=2)
# move part of width dimension to height.
# we should call this width-to-height as image width shrunk by 2 and height doubled.
# but all pixels are the same!
# Can you write reverse operation (height-to-width)?
rearrange(ims, 'b h (w w2) c -> (h w2) (b w) c', w2=2)
# compare with the next example
rearrange(ims, 'b h w c -> h (b w) c')
# order of axes in composition is different
# rule is just as for digits in the number: leftmost digit is the most significant,
# while neighboring numbers differ in the rightmost axis.
# you can also think of this as lexicographic sort
rearrange(ims, 'b h w c -> h (w b) c')
# what if b1 and b2 are reordered before composing to width?
rearrange(ims, '(b1 b2) h w c -> h (b1 b2 w) c ', b1=2) # produces 'einops'
rearrange(ims, '(b1 b2) h w c -> h (b2 b1 w) c ', b1=2) # produces 'eoipns'
# average over batch
reduce(ims, 'b h w c -> h w c', 'mean')
# the previous is identical to familiar:
ims.mean(axis=0)
# but is so much more readable
# Example of reducing of several axes
# besides mean, there are also min, max, sum, prod
reduce(ims, 'b h w c -> h w', 'min')
# this is mean-pooling with 2x2 kernel
# image is split into 2x2 patches, each patch is averaged
reduce(ims, 'b (h h2) (w w2) c -> h (b w) c', 'mean', h2=2, w2=2)
# max-pooling is similar
# result is not as smooth as for mean-pooling
reduce(ims, 'b (h h2) (w w2) c -> h (b w) c', 'max', h2=2, w2=2)
# yet another example. Can you compute result shape?
reduce(ims, '(b1 b2) h w c -> (b2 h) (b1 w)', 'mean', b1=2)
# rearrange can also take care of lists of arrays with the same shape
x = list(ims)
print(type(x), 'with', len(x), 'tensors of shape', x[0].shape)
# that's how we can stack inputs
# "list axis" becomes first ("b" in this case), and we left it there
rearrange(x, 'b h w c -> b h w c').shape
# but new axis can appear in the other place:
rearrange(x, 'b h w c -> h w c b').shape
# that's equivalent to numpy stacking, but written more explicitly
numpy.array_equal(rearrange(x, 'b h w c -> h w c b'), numpy.stack(x, axis=3))
# ... or we can concatenate along axes
rearrange(x, 'b h w c -> h (b w) c').shape
# which is equivalent to concatenation
numpy.array_equal(rearrange(x, 'b h w c -> h (b w) c'), numpy.concatenate(x, axis=1))
x = rearrange(ims, 'b h w c -> b 1 h w 1 c') # functionality of numpy.expand_dims
print(x.shape)
print(rearrange(x, 'b 1 h w 1 c -> b h w c').shape) # functionality of numpy.squeeze
# compute max in each image individually, then show a difference
x = reduce(ims, 'b h w c -> b () () c', 'max') - ims
rearrange(x, 'b h w c -> h (b w) c')
# repeat along a new axis. New axis can be placed anywhere
repeat(ims[0], 'h w c -> h new_axis w c', new_axis=5).shape
# shortcut
repeat(ims[0], 'h w c -> h 5 w c').shape
# repeat along w (existing axis)
repeat(ims[0], 'h w c -> h (repeat w) c', repeat=3)
# repeat along two existing axes
repeat(ims[0], 'h w c -> (2 h) (2 w) c')
# order of axes matters as usual - you can repeat each element (pixel) 3 times
# by changing order in parenthesis
repeat(ims[0], 'h w c -> h (w repeat) c', repeat=3)
repeated = repeat(ims, 'b h w c -> b h new_axis w c', new_axis=2)
reduced = reduce(repeated, 'b h new_axis w c -> b h w c', 'min')
assert numpy.array_equal(ims, reduced)
# interweaving pixels of different pictures
# all letters are observable
rearrange(ims, '(b1 b2) h w c -> (h b1) (w b2) c ', b1=2)
# interweaving along vertical for couples of images
rearrange(ims, '(b1 b2) h w c -> (h b1) (b2 w) c', b1=2)
# interweaving lines for couples of images
# exercise: achieve the same result without einops in your favourite framework
reduce(ims, '(b1 b2) h w c -> h (b2 w) c', 'max', b1=2)
# color can be also composed into dimension
# ... while image is downsampled
reduce(ims, 'b (h 2) (w 2) c -> (c h) (b w)', 'mean')
# disproportionate resize
reduce(ims, 'b (h 4) (w 3) c -> (h) (b w)', 'mean')
# spilt each image in two halves, compute mean of the two
reduce(ims, 'b (h1 h2) w c -> h2 (b w)', 'mean', h1=2)
# split in small patches and transpose each patch
rearrange(ims, 'b (h1 h2) (w1 w2) c -> (h1 w2) (b w1 h2) c', h2=8, w2=8)
# stop me someone!
rearrange(ims, 'b (h1 h2 h3) (w1 w2 w3) c -> (h1 w2 h3) (b w1 h2 w3) c', h2=2, w2=2, w3=2, h3=2)
rearrange(ims, '(b1 b2) (h1 h2) (w1 w2) c -> (h1 b1 h2) (w1 b2 w2) c', h1=3, w1=3, b2=3)
# patterns can be arbitrarily complicated
reduce(ims, '(b1 b2) (h1 h2 h3) (w1 w2 w3) c -> (h1 w1 h3) (b1 w2 h2 w3 b2) c', 'mean',
h2=2, w1=2, w3=2, h3=2, b2=2)
# subtract background in each image individually and normalize
# pay attention to () - this is composition of 0 axis, a dummy axis with 1 element.
im2 = reduce(ims, 'b h w c -> b () () c', 'max') - ims
im2 /= reduce(im2, 'b h w c -> b () () c', 'max')
rearrange(im2, 'b h w c -> h (b w) c')
# pixelate: first downscale by averaging, then upscale back using the same pattern
averaged = reduce(ims, 'b (h h2) (w w2) c -> b h w c', 'mean', h2=6, w2=8)
repeat(averaged, 'b h w c -> (h h2) (b w w2) c', h2=6, w2=8)
rearrange(ims, 'b h w c -> w (b h) c')
# let's bring color dimension as part of horizontal axis
# at the same time horizontal axis is downsampled by 2x
reduce(ims, 'b (h h2) (w w2) c -> (h w2) (b w c)', 'mean', h2=3, w2=3)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load a batch of images to play with
Step2: Composition of axes
Step3: Decomposition of axis
Step4: Order of axes matters
Step5: Meet einops.reduce
Step6: Stack and concatenate
Step7: Addition or removal of axes
Step8: Repeating elements
Step9: Note
Step10: Fancy examples in random order
|
433 | <ASSISTANT_TASK:>
Python Code:
PATH='data/aclImdb/'
TRN_PATH = 'train/all/'
VAL_PATH = 'test/all/'
TRN = f'{PATH}{TRN_PATH}'
VAL = f'{PATH}{VAL_PATH}'
%ls {PATH}
trn_files = !ls {TRN}
trn_files[:10]
review = !cat {TRN}{trn_files[6]}
review[0]
!find {TRN} -name '*.txt' | xargs cat | wc -w
!find {VAL} -name '*.txt' | xargs cat | wc -w
spacy_tok = spacy.load('en')
' '.join([sent.string.strip() for sent in spacy_tok(review[0])])
TEXT = data.Field(lower=True, tokenize="spacy")
bs=64; bptt=70
FILES = dict(train=TRN_PATH, validation=VAL_PATH, test=VAL_PATH)
md = LanguageModelData.from_text_files(PATH, TEXT, **FILES, bs=bs, bptt=bptt, min_freq=10)
pickle.dump(TEXT, open(f'{PATH}models/TEXT.pkl','wb'))
len(md.trn_dl), md.nt, len(md.trn_ds), len(md.trn_ds[0].text)
# 'itos': 'int-to-string'
TEXT.vocab.itos[:12]
# 'stoi': 'string to int'
TEXT.vocab.stoi['the']
md.trn_ds[0].text[:12]
TEXT.numericalize([md.trn_ds[0].text[:12]])
next(iter(md.trn_dl))
em_sz = 200 # size of each embedding vector
nh = 500 # number of hidden activations per layer
nl = 3 # number of layers
opt_fn = partial(optim.Adam, betas=(0.7, 0.99))
learner = md.get_model(opt_fn, em_sz, nh, nl,
dropouti=0.05, dropout=0.05, wdrop=0.1, dropoute=0.02, dropouth=0.05)
learner.reg_fn = partial(seq2seq_reg, alpha=2, beta=1)
learner.clip=0.3
learner.fit(3e-3, 4, wds=1e-6, cycle_len=1, cycle_mult=2)
learner.save_encoder('adam1_enc')
learner.load_encoder('adam1_enc')
learner.fit(3e-3, 1, wds=1e-6, cycle_len=10)
learner.save_encoder('adam3_10_enc')
learner.load_encoder('adam3_10_enc')
math.exp(4.165)
pickle.dump(TEXT, open(f'{PATH}models/TEXT.pkl','wb'))
m=learner.model
ss=. So, it wasn't quite was I was expecting, but I really liked it anyway! The best
s = [TEXT.preprocess(ss)]
t=TEXT.numericalize(s)
' '.join(s[0])
# Set batch size to 1
m[0].bs=1
# Turn off dropout
m.eval()
# Reset hidden state
m.reset()
# Get predictions from model
res,*_ = m(t)
# Put the batch size back to what it was
m[0].bs=bs
nexts = torch.topk(res[-1], 10)[1]
[TEXT.vocab.itos[o] for o in to_np(nexts)]
print(ss,"\n")
for i in range(50):
n=res[-1].topk(2)[1]
n = n[1] if n.data[0]==0 else n[0]
print(TEXT.vocab.itos[n.data[0]], end=' ')
res,*_ = m(n[0].unsqueeze(0))
print('...')
TEXT = pickle.load(open(f'{PATH}models/TEXT.pkl','rb'))
IMDB_LABEL = data.Field(sequential=False)
splits = torchtext.datasets.IMDB.splits(TEXT, IMDB_LABEL, 'data/')
t = splits[0].examples[0]
t.label, ' '.join(t.text[:16])
md2 = TextData.from_splits(PATH, splits, bs)
m3 = md2.get_model(opt_fn, 1500, bptt, emb_sz=em_sz, n_hid=nh, n_layers=nl,
dropout=0.1, dropouti=0.4, wdrop=0.5, dropoute=0.05, dropouth=0.3)
m3.reg_fn = partial(seq2seq_reg, alpha=2, beta=1)
m3.load_encoder(f'adam3_10_enc')
m3.clip=25.
lrs=np.array([1e-4,1e-4,1e-4,1e-3,1e-2])
m3.freeze_to(-1)
m3.fit(lrs/2, 1, metrics=[accuracy])
m3.unfreeze()
m3.fit(lrs, 1, metrics=[accuracy], cycle_len=1)
m3.fit(lrs, 7, metrics=[accuracy], cycle_len=2, cycle_save_name='imdb2')
m3.load_cycle('imdb2', 4)
accuracy_np(*m3.predict_with_targs())
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's look inside the training folder...
Step2: ...and at an example review.
Step3: Sounds like I'd really enjoy Zombiegeddon...
Step4: Before we can analyze text, we must first tokenize it. This refers to the process of splitting a sentence into an array of words (or more generally, into an array of tokens).
Step5: We use Pytorch's torchtext library to preprocess our data, telling it to use the wonderful spacy library to handle tokenization.
Step6: fastai works closely with torchtext. We create a ModelData object for language modeling by taking advantage of LanguageModelData, passing it our torchtext field object, and the paths to our training, test, and validation sets. In this case, we don't have a separate test set, so we'll just use VAL_PATH for that too.
Step7: After building our ModelData object, it automatically fills the TEXT object with a very important attribute
Step8: Here are the
Step9: This is the start of the mapping from integer IDs to unique tokens.
Step10: Note that in a LanguageModelData object there is only one item in each dataset
Step11: torchtext will handle turning this words into integer IDs for us automatically.
Step12: Our LanguageModelData object will create batches with 64 columns (that's our batch size), and varying sequence lengths of around 80 tokens (that's our bptt parameter - backprop through time).
Step13: Train
Step14: Researchers have found that large amounts of momentum (which we'll learn about later) don't work well with these kinds of RNN models, so we create a version of the Adam optimizer with less momentum than it's default of 0.9.
Step15: fastai uses a variant of the state of the art AWD LSTM Language Model developed by Stephen Merity. A key feature of this model is that it provides excellent regularization through Dropout. There is no simple way known (yet!) to find the best values of the dropout parameters below - you just have to experiment...
Step16: As you can see below, I gradually tuned the language model in a few stages. I possibly could have trained it further (it wasn't yet overfitting), but I didn't have time to experiment more. Maybe you can see if you can train it to a better accuracy! (I used lr_find to find a good learning rate, but didn't save the output in this notebook. Feel free to try running it yourself now.)
Step17: In the sentiment analysis section, we'll just need half of the language model - the encoder, so we save that part.
Step18: Language modeling accuracy is generally measured using the metric perplexity, which is simply exp() of the loss function we used.
Step20: Test
Step21: We haven't yet added methods to make it easy to test a language model, so we'll need to manually go through the steps.
Step22: Let's see what the top 10 predictions were for the next word after our short text
Step23: ...and let's see if our model can generate a bit more text all by itself!
Step24: Sentiment
Step25: sequential=False tells torchtext that a text field should be tokenized (in this case, we just want to store the 'positive' or 'negative' single label).
Step26: fastai can create a ModelData object directly from torchtext splits.
Step27: Because we're fine-tuning a pretrained model, we'll use differential learning rates, and also increase the max gradient for clipping, to allow the SGDR to work better.
|
434 | <ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import lxmls.readers.sentiment_reader as srs
scr = srs.SentimentCorpus("books")
import lxmls.classifiers.multinomial_naive_bayes as mnbb
mnb = mnbb.MultinomialNaiveBayes()
params_nb_sc = mnb.train(scr.train_X,scr.train_y)
y_pred_train = mnb.test(scr.train_X,params_nb_sc)
acc_train = mnb.evaluate(scr.train_y, y_pred_train)
y_pred_test = mnb.test(scr.test_X,params_nb_sc)
acc_test = mnb.evaluate(scr.test_y, y_pred_test)
print("Multinomial Naive Bayes Amazon Sentiment Accuracy train: %f test: %f"%(acc_train,acc_test))
%matplotlib inline
import lxmls.readers.simple_data_set as sds
sd = sds.SimpleDataSet(
nr_examples=100,
g1=[[-1,-1],1],
g2=[[1,1],1],
balance=0.5,
split=[0.5,0,0.5]
)
import lxmls.classifiers.perceptron as percc
perc = percc.Perceptron()
params_perc_sd = perc.train(sd.train_X,sd.train_y)
y_pred_train = perc.test(sd.train_X,params_perc_sd)
acc_train = perc.evaluate(sd.train_y, y_pred_train)
y_pred_test = perc.test(sd.test_X,params_perc_sd)
acc_test = perc.evaluate(sd.test_y, y_pred_test)
print("Perceptron Simple Dataset Accuracy train: %f test: %f"%(acc_train, acc_test))
fig, axis = sd.plot_data("osx")
fig, axis = sd.add_line(fig, axis, params_perc_sd, "Perceptron", "blue")
import lxmls.classifiers.mira as mirac
mira = mirac.Mira()
mira.regularizer = 1.0 # This is lambda
params_mira_sd = mira.train(sd.train_X,sd.train_y)
y_pred_train = mira.test(sd.train_X,params_mira_sd)
acc_train = mira.evaluate(sd.train_y, y_pred_train)
y_pred_test = mira.test(sd.test_X,params_mira_sd)
acc_test = mira.evaluate(sd.test_y, y_pred_test)
print("Mira Simple Dataset Accuracy train: %f test: %f"%(acc_train, acc_test))
fig, axis = sd.add_line(fig, axis, params_mira_sd, "Mira","green")
fig
import lxmls.classifiers.max_ent_batch as mebc
me_lbfgs = mebc.MaxEntBatch()
me_lbfgs.regularizer = 1.0
params_meb_sd = me_lbfgs.train(sd.train_X,sd.train_y)
y_pred_train = me_lbfgs.test(sd.train_X,params_meb_sd)
acc_train = me_lbfgs.evaluate(sd.train_y, y_pred_train)
y_pred_test = me_lbfgs.test(sd.test_X,params_meb_sd)
acc_test = me_lbfgs.evaluate(sd.test_y, y_pred_test)
print(
"Max-Ent batch Simple Dataset Accuracy train: %f test: %f" %
(acc_train,acc_test)
)
fig, axis = sd.add_line(fig, axis, params_meb_sd, "Max-Ent-Batch","orange")
fig
params_meb_sc = me_lbfgs.train(scr.train_X,scr.train_y)
y_pred_train = me_lbfgs.test(scr.train_X,params_meb_sc)
acc_train = me_lbfgs.evaluate(scr.train_y, y_pred_train)
y_pred_test = me_lbfgs.test(scr.test_X,params_meb_sc)
acc_test = me_lbfgs.evaluate(scr.test_y, y_pred_test)
print(
"Max-Ent Batch Amazon Sentiment Accuracy train: %f test: %f" %
(acc_train, acc_test)
)
import lxmls.classifiers.max_ent_online as meoc
me_sgd = meoc.MaxEntOnline()
me_sgd.regularizer = 1.0
params_meo_sc = me_sgd.train(scr.train_X,scr.train_y)
y_pred_train = me_sgd.test(scr.train_X,params_meo_sc)
acc_train = me_sgd.evaluate(scr.train_y, y_pred_train)
y_pred_test = me_sgd.test(scr.test_X,params_meo_sc)
acc_test = me_sgd.evaluate(scr.test_y, y_pred_test)
print(
"Max-Ent Online Amazon Sentiment Accuracy train: %f test: %f" %
(acc_train, acc_test)
)
import lxmls.classifiers.svm as svmc
svm = svmc.SVM()
svm.regularizer = 1.0 # This is lambda
params_svm_sd = svm.train(sd.train_X,sd.train_y)
y_pred_train = svm.test(sd.train_X,params_svm_sd)
acc_train = svm.evaluate(sd.train_y, y_pred_train)
y_pred_test = svm.test(sd.test_X,params_svm_sd)
acc_test = svm.evaluate(sd.test_y, y_pred_test)
print("SVM Online Simple Dataset Accuracy train: {} test: {}".format(acc_train,acc_test))
fig,axis = sd.add_line(fig,axis,params_svm_sd,"SVM","orange")
params_svm_sc = svm.train(scr.train_X,scr.train_y)
y_pred_train = svm.test(scr.train_X,params_svm_sc)
acc_train = svm.evaluate(scr.train_y, y_pred_train)
y_pred_test = svm.test(scr.test_X,params_svm_sc)
acc_test = svm.evaluate(scr.test_y, y_pred_test)
print("SVM Online Amazon Sentiment Accuracy train: {} test: {}".format(acc_train,acc_test))
fig, axis = sd.add_line(fig, axis, params_svm_sd, "SVM", "yellow")
fig
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This will load the data in a bag-of-words representation where rare words (occurring less than 5 times in the training data) are removed.
Step2: Observe that words that were not observed at training time cause problems at testtime. Why? To solve this problem, apply a simple add-one smoothing technique
Step3: Run the perceptron algorithm on the simple dataset previously generated and report its train and test set accuracy
Step4: Change the code to save the intermediate weight vectors, and plot them every five iterations. What do you observe?
Step5: Compare the results achieved and separating hyperplanes found.
Step6: Exercise 1.4
Step7: Train a maximum entropy model using L-BFGS, on the Amazon dataset (try different values of $\lambda$) and report training and test set accuracy. What do you observe?
Step8: Now, fix $\lambda$ = 1.0 and train with SGD (you might try to adjust the initial step). Compare the objective values obtained during training with those obtained with L-BFGS. What do you observe?
Step9: Exercise 1.5
Step10: Compare the results achieved and separating hyperplanes found.
|
435 | <ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import IPython
import sys
from music21 import *
import numpy as np
from grammar import *
from qa import *
from preprocess import *
from music_utils import *
from data_utils import *
from keras.models import load_model, Model
from keras.layers import Dense, Activation, Dropout, Input, LSTM, Reshape, Lambda, RepeatVector
from keras.initializers import glorot_uniform
from keras.utils import to_categorical
from keras.optimizers import Adam
from keras import backend as K
IPython.display.Audio('./data/30s_seq.mp3')
X, Y, n_values, indices_values = load_music_utils()
print('shape of X:', X.shape)
print('number of training examples:', X.shape[0])
print('Tx (length of sequence):', X.shape[1])
print('total # of unique values:', n_values)
print('Shape of Y:', Y.shape)
n_a = 64
reshapor = Reshape((1, 78)) # Used in Step 2.B of djmodel(), below
LSTM_cell = LSTM(n_a, return_state = True) # Used in Step 2.C
densor = Dense(n_values, activation='softmax') # Used in Step 2.D
# GRADED FUNCTION: djmodel
def djmodel(Tx, n_a, n_values):
Implement the model
Arguments:
Tx -- length of the sequence in a corpus
n_a -- the number of activations used in our model
n_values -- number of unique values in the music data
Returns:
model -- a keras model with the
# Define the input of your model with a shape
X = Input(shape=(Tx, n_values))
# Define s0, initial hidden state for the decoder LSTM
a0 = Input(shape=(n_a,), name='a0')
c0 = Input(shape=(n_a,), name='c0')
a = a0
c = c0
### START CODE HERE ###
# Step 1: Create empty list to append the outputs while you iterate (≈1 line)
outputs = None
# Step 2: Loop
for t in range(Tx):
# Step 2.A: select the "t"th time step vector from X.
x = None
# Step 2.B: Use reshapor to reshape x to be (1, n_values) (≈1 line)
x = None
# Step 2.C: Perform one step of the LSTM_cell
a, _, c = None
# Step 2.D: Apply densor to the hidden state output of LSTM_Cell
out = None
# Step 2.E: add the output to "outputs"
None
# Step 3: Create model instance
model = None
### END CODE HERE ###
return model
model = djmodel(Tx = 30 , n_a = 64, n_values = 78)
opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
m = 60
a0 = np.zeros((m, n_a))
c0 = np.zeros((m, n_a))
model.fit([X, a0, c0], list(Y), epochs=100)
# GRADED FUNCTION: music_inference_model
def music_inference_model(LSTM_cell, densor, n_values = 78, n_a = 64, Ty = 100):
Uses the trained "LSTM_cell" and "densor" from model() to generate a sequence of values.
Arguments:
LSTM_cell -- the trained "LSTM_cell" from model(), Keras layer object
densor -- the trained "densor" from model(), Keras layer object
n_values -- integer, umber of unique values
n_a -- number of units in the LSTM_cell
Ty -- integer, number of time steps to generate
Returns:
inference_model -- Keras model instance
# Define the input of your model with a shape
x0 = Input(shape=(1, n_values))
# Define s0, initial hidden state for the decoder LSTM
a0 = Input(shape=(n_a,), name='a0')
c0 = Input(shape=(n_a,), name='c0')
a = a0
c = c0
x = x0
### START CODE HERE ###
# Step 1: Create an empty list of "outputs" to later store your predicted values (≈1 line)
outputs = None
# Step 2: Loop over Ty and generate a value at every time step
for t in range(None):
# Step 2.A: Perform one step of LSTM_cell (≈1 line)
a, _, c = None
# Step 2.B: Apply Dense layer to the hidden state output of the LSTM_cell (≈1 line)
out = None
# Step 2.C: Append the prediction "out" to "outputs". out.shape = (None, 78) (≈1 line)
None
# Step 2.D: Select the next value according to "out", and set "x" to be the one-hot representation of the
# selected value, which will be passed as the input to LSTM_cell on the next step. We have provided
# the line of code you need to do this.
x = None
# Step 3: Create model instance with the correct "inputs" and "outputs" (≈1 line)
inference_model = None
### END CODE HERE ###
return inference_model
inference_model = music_inference_model(LSTM_cell, densor, n_values = 78, n_a = 64, Ty = 50)
x_initializer = np.zeros((1, 1, 78))
a_initializer = np.zeros((1, n_a))
c_initializer = np.zeros((1, n_a))
# GRADED FUNCTION: predict_and_sample
def predict_and_sample(inference_model, x_initializer = x_initializer, a_initializer = a_initializer,
c_initializer = c_initializer):
Predicts the next value of values using the inference model.
Arguments:
inference_model -- Keras model instance for inference time
x_initializer -- numpy array of shape (1, 1, 78), one-hot vector initializing the values generation
a_initializer -- numpy array of shape (1, n_a), initializing the hidden state of the LSTM_cell
c_initializer -- numpy array of shape (1, n_a), initializing the cell state of the LSTM_cel
Returns:
results -- numpy-array of shape (Ty, 78), matrix of one-hot vectors representing the values generated
indices -- numpy-array of shape (Ty, 1), matrix of indices representing the values generated
### START CODE HERE ###
# Step 1: Use your inference model to predict an output sequence given x_initializer, a_initializer and c_initializer.
pred = None
# Step 2: Convert "pred" into an np.array() of indices with the maximum probabilities
indices = None
# Step 3: Convert indices to one-hot vectors, the shape of the results should be (1, )
results = None
### END CODE HERE ###
return results, indices
results, indices = predict_and_sample(inference_model, x_initializer, a_initializer, c_initializer)
print("np.argmax(results[12]) =", np.argmax(results[12]))
print("np.argmax(results[17]) =", np.argmax(results[17]))
print("list(indices[12:18]) =", list(indices[12:18]))
out_stream = generate_music(inference_model)
IPython.display.Audio('./data/30s_trained_model.mp3')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1 - Problem statement
Step2: We have taken care of the preprocessing of the musical data to render it in terms of musical "values." You can informally think of each "value" as a note, which comprises a pitch and a duration. For example, if you press down a specific piano key for 0.5 seconds, then you have just played a note. In music theory, a "value" is actually more complicated than this--specifically, it also captures the information needed to play multiple notes at the same time. For example, when playing a music piece, you might press down two piano keys at the same time (playng multiple notes at the same time generates what's called a "chord"). But we don't need to worry about the details of music theory for this assignment. For the purpose of this assignment, all you need to know is that we will obtain a dataset of values, and will learn an RNN model to generate sequences of values.
Step3: You have just loaded the following
Step4: Here's how you can create a Keras model with multiple inputs and outputs. If you're building an RNN where even at test time entire input sequence $x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, \ldots, x^{\langle T_x \rangle}$ were given in advance, for example if the inputs were words and the output was a label, then Keras has simple built-in functions to build the model. However, for sequence generation, at test time we don't know all the values of $x^{\langle t\rangle}$ in advance; instead we generate them one at a time using $x^{\langle t\rangle} = y^{\langle t-1 \rangle}$. So the code will be a bit more complicated, and you'll need to implement your own for-loop to iterate over the different time steps.
Step6: Each of reshapor, LSTM_cell and densor are now layer objects, and you can use them to implement djmodel(). In order to propagate a Keras tensor object X through one of these layers, use layer_object(X) (or layer_object([X,Y]) if it requires multiple inputs.). For example, reshapor(X) will propagate X through the Reshape((1,78)) layer defined above.
Step7: Run the following cell to define your model. We will use Tx=30, n_a=64 (the dimension of the LSTM activations), and n_values=78. This cell may take a few seconds to run.
Step8: You now need to compile your model to be trained. We will Adam and a categorical cross-entropy loss.
Step9: Finally, lets initialize a0 and c0 for the LSTM's initial state to be zero.
Step10: Lets now fit the model! We will turn Y to a list before doing so, since the cost function expects Y to be provided in this format (one list item per time-step). So list(Y) is a list with 30 items, where each of the list items is of shape (60,78). Lets train for 100 epochs. This will take a few minutes.
Step12: You should see the model loss going down. Now that you have trained a model, lets go on the the final section to implement an inference algorithm, and generate some music!
Step13: Run the cell below to define your inference model. This model is hard coded to generate 50 values.
Step14: Finally, this creates the zero-valued vectors you will use to initialize x and the LSTM state variables a and c.
Step16: Exercise
Step17: Expected Output
Step18: To listen to your music, click File->Open... Then go to "output/" and download "my_music.midi". Either play it on your computer with an application that can read midi files if you have one, or use one of the free online "MIDI to mp3" conversion tools to convert this to mp3.
|
436 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from parcels import Variable, Field, FieldSet, ParticleSet, ScipyParticle, AdvectionRK4, plotTrajectoriesFile
import numpy as np
from datetime import timedelta as delta
import netCDF4
import matplotlib.pyplot as plt
# Velocity fields
fname = r'GlobCurrent_example_data/*.nc'
filenames = {'U': fname, 'V': fname}
variables = {'U': 'eastward_eulerian_current_velocity', 'V': 'northward_eulerian_current_velocity'}
dimensions = {'U': {'lat': 'lat', 'lon': 'lon', 'time': 'time'},
'V': {'lat': 'lat', 'lon': 'lon', 'time': 'time'},
}
fieldset = FieldSet.from_netcdf(filenames, variables, dimensions)
# In order to assign the same grid to the tracer field, it is convenient to load a single velocity file
fname1 = r'GlobCurrent_example_data/20030101000000-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc'
filenames1 = {'U': fname1, 'V': fname1}
field_for_size = FieldSet.from_netcdf(filenames1, variables, dimensions) # this field has the same variables and dimensions as the other velocity fields
# Adding the tracer field to the FieldSet
dimsC = [len(field_for_size.U.lat),len(field_for_size.U.lon)] # it has to have the same dimentions as the velocity fields
dataC = np.zeros([dimsC[0],dimsC[1]])
fieldC = Field('C', dataC, grid=field_for_size.U.grid, interp_method='nearest') # the new Field will be called C, for tracer Concentration. For mass conservation, interp_method='nearest'
fieldset.add_field(fieldC) # C field added to the velocity FieldSet
fieldset.C.to_write = True # enabling the writing of Field C during execution
fieldset.C.show() # our new C field has been added to the FieldSet
fieldset.add_constant('a', 10)
fieldset.add_constant('b', .2)
fieldset.add_constant('weight', .01)
class VectorParticle(ScipyParticle):
c = Variable('c', dtype=np.float32, initial=100.) # particle concentration c is initialized with a non-zero value
def Interaction(particle, fieldset, time):
deltaC = (fieldset.a*fieldset.C[particle]-fieldset.b*particle.c) # the exchange is obtained as a discretized mass transfer equation
xi, yi = particle.xi[fieldset.C.igrid], particle.yi[fieldset.C.igrid],
if abs(particle.lon - fieldset.C.grid.lon[xi+1]) < abs(particle.lon - fieldset.C.grid.lon[xi]):
xi += 1
if abs(particle.lat - fieldset.C.grid.lat[yi+1]) < abs(particle.lat - fieldset.C.grid.lat[yi]):
yi += 1
particle.c += deltaC
fieldset.C.data[0, yi, xi] += -deltaC*fieldset.weight # weight, defined as a constant for the FieldSet, acts here as a conversion factor between c_particle and C_field
fieldset.C.grid.time[0] = time # updating Field C time
def WriteInitial(particle, fieldset, time): # will be used to store the initial conditions of fieldset.C
fieldset.C.grid.time[0] = time
pset = ParticleSet(fieldset=fieldset, pclass=VectorParticle, lon=[24.5], lat=[-34.8]) # for simplicity, we'll track a single particle here
pset.show(field=fieldset.C) # Initial particle location and the tracer field C
output_file = pset.ParticleFile(name=r'interaction.nc', outputdt=delta(days=1))
pset.execute(WriteInitial, dt=0., output_file=output_file)
pset.execute(AdvectionRK4 + pset.Kernel(Interaction), # the particle will FIRST be transported by currents and THEN interact with the field
dt=delta(days=1),
runtime=delta(days=24), # we are going to track the particle and save its trajectory and tracer concentration for 24 days
output_file=output_file)
output_file.close()
pset_traj = netCDF4.Dataset(r'interaction.nc')
print(pset_traj['c'][:])
plotTrajectoriesFile('interaction.nc');
c_results = fieldset.C.data[0,:,:].copy() # Copying the final field data in a new array
c_results[[field_for_size.U.data==0][0][0]]= np.nan # using a mask for fieldset.C.data on land
c_results[c_results==0] = np.nan # masking the field where its value is zero -- areas that have not been modified by the particle, for clearer plotting
try: # Works if Cartopy is installed
import cartopy
import cartopy.crs as ccrs
extent = [10, 33, -37, -29]
X = fieldset.U.lon
Y = fieldset.U.lat
plt.figure(figsize=(12, 6))
ax = plt.axes(projection=ccrs.Mercator())
ax.set_extent(extent)
ax.add_feature(cartopy.feature.OCEAN, facecolor='lightgrey')
ax.add_feature(cartopy.feature.LAND, edgecolor='black', facecolor='floralwhite')
gl=ax.gridlines(xlocs = np.linspace(10,34,13) , ylocs=np.linspace(-29,-37,9),draw_labels=True)
gl.right_labels = False
gl.bottom_labels = False
xx, yy = np.meshgrid(X,Y)
results = ax.pcolormesh(xx,yy,(c_results),transform=ccrs.PlateCarree(),vmin=0,)
cbar=plt.colorbar(mappable = results, ax=ax)
cbar.ax.text(.8,.070,'$C_{field}$ concentration', rotation=270, fontsize=12)
except:
print('Please install the Cartopy package.')
x_centers, y_centers = np.meshgrid(fieldset.U.lon-np.diff(fieldset.U.lon[:2])/2, fieldset.U.lat-np.diff(fieldset.U.lat[:2])/2)
fig,ax = plt.subplots(1,1,figsize=(10,7),constrained_layout=True)
ax.set_facecolor('lightgrey') # For visual coherence with the plot above
fieldplot=ax.pcolormesh(x_centers[-28:-17,22:41],y_centers[-28:-17,22:41],c_results[-28:-18,22:40], vmin=0, vmax=0.2,cmap='viridis')
# Zoom on the area of interest
field_cbar = plt.colorbar(fieldplot,ax=ax)
field_cbar.ax.text(.6,.070,'$C_{field}$ concentration', rotation=270, fontsize=12)
particle = plt.scatter(pset_traj['lon'][:].data[0,:],pset_traj['lat'][:].data[0,:], c=pset_traj['c'][:].data[0,:],vmin=0, s=100, edgecolor='white')
particle_cbar = plt.colorbar(particle,ax=ax, location = 'top')
particle_cbar.ax.text(40,300,'$c_{particle}$ concentration', fontsize=12);
fig, ax = plt.subplots(5,5, figsize=(30,20))
daycounter = 1
for i in range(len(ax)):
for j in range(len(ax)):
data = netCDF4.Dataset(r'interaction_00'+ '%02d' % daycounter+'C.nc')
c_results = data['C'][0,0,:,:].data.copy() # copying the final field data in a new array
c_results[[field_for_size.U.data==0][0][0]]= np.nan # using a mask for fieldset.C.data on land
c_results[c_results==0] = np.nan # masking the field where its value is zero -- areas that have not been modified by the particle, for clearer plotting
ax[i,j].set_facecolor('lightgrey') # For visual coherence with the plots above
fieldplot=ax[i,j].pcolormesh(x_centers[-28:-17,22:41],y_centers[-28:-17,22:41],c_results[-28:-18,22:40], vmin=0, vmax=0.2,cmap='viridis')
particle = ax[i,j].scatter(pset_traj['lon'][:].data[0,daycounter-1],pset_traj['lat'][:].data[0,daycounter-1], c=pset_traj['c'][:].data[0,daycounter-1],vmin=0, vmax=100, s=100, edgecolor='white')
# plotting particle location at current time step -- daycounter-1 due to different indexing
ax[i,j].set_title('Day '+ str(daycounter-1))
daycounter +=1 # next day
fig.subplots_adjust(right=0.8)
fig.subplots_adjust(top=0.8)
cbar_ax = fig.add_axes([0.82, 0.12, 0.03, 0.7])
fig.colorbar(fieldplot, cax=cbar_ax)
cbar_ax.tick_params(labelsize=18)
cbar_ax.text(.4,.08,'$C_{field}$ concentration', fontsize=25, rotation=270)
cbar_ax1 = fig.add_axes([0.1, .85, .7, 0.04])
fig.colorbar(particle, cax=cbar_ax1, orientation = 'horizontal')
cbar_ax1.tick_params(labelsize=18)
cbar_ax1.text(42,170,'$c_{particle}$ concentration', fontsize=25);
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this specific example, particles will be advected by surface ocean velocities stored in netCDF files in the folder GlobCurrent_example_data. We will store these in a FieldSet object, and then add a Field to it to represent the tracer field. This latter field will be initialized with zeroes, as we assume that this tracer is absent on the ocean surface and released by particles only. Note that, in order to conserve mass, it is important to set interp_method='nearest' for the tracer Field.
Step2: Some global parameters have to be defined, such as $a$ and $b$ of Eq.1, and a weight that works as a conversion factor from $\Delta c_{particle}$ to $C_{field}$.
Step3: We will now define a new particle class. A VectorParticle is a ScipyParticle having a Variable to store the current tracer concentration c associated with it. As in this case we want our particles to release a tracer into a clean field, we will initialize c with an arbitrary value of 100.
Step4: Three things are worth noticing in the code above
Step5: Now we are going to execute the advection of the particle and the simultaneous release of the tracer it carries. We will thus add the interactionKernel defined above to the built-in Kernel AdvectionRK4.
Step6: We can see that $c_{particle}$ has been saved along with particle trajectory, as expected.
Step7: But what about fieldset.C? We can see that it has been accordingly modified during particle motion. Using fieldset.C we can access the field as resulting at the end of the run, with no information about the previous time steps.
Step8: When looking at tracer concentrations, we see that $c_{particle}$ decreases along its trajectory (right to left), as it is releasing the tracer it carries. Accordingly, values of $C_{field}$ provided by particle interaction progressively reduce along the particle's route.
Step9: Finally, to see the C field in time we have to load the .nc files produced during the run. In the following plots, particle location and field values are shown at each time step.
|
437 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from alibi.explainers import AnchorTabular
from alibi.datasets import fetch_adult
from minio import Minio
from minio.error import ResponseError
from joblib import dump, load
import dill
import time
import json
from subprocess import run, Popen, PIPE
from alibi_detect.utils.data import create_outlier_batch
MINIO_HOST="minio-service.kubeflow:9000"
MINIO_ACCESS_KEY="minio"
MINIO_SECRET_KEY="minio123"
MINIO_MODEL_BUCKET="seldon"
INCOME_MODEL_PATH="sklearn/income/model"
EXPLAINER_MODEL_PATH="sklearn/income/explainer"
OUTLIER_MODEL_PATH="sklearn/income/outlier"
DEPLOY_NAMESPACE="admin"
def get_minio():
return Minio(MINIO_HOST,
access_key=MINIO_ACCESS_KEY,
secret_key=MINIO_SECRET_KEY,
secure=False)
minioClient = get_minio()
buckets = minioClient.list_buckets()
for bucket in buckets:
print(bucket.name, bucket.creation_date)
if not minioClient.bucket_exists(MINIO_MODEL_BUCKET):
minioClient.make_bucket(MINIO_MODEL_BUCKET)
adult = fetch_adult()
adult.keys()
data = adult.data
target = adult.target
feature_names = adult.feature_names
category_map = adult.category_map
from alibi.utils.data import gen_category_map
np.random.seed(0)
data_perm = np.random.permutation(np.c_[data, target])
data = data_perm[:,:-1]
target = data_perm[:,-1]
idx = 30000
X_train,Y_train = data[:idx,:], target[:idx]
X_test, Y_test = data[idx+1:,:], target[idx+1:]
ordinal_features = [x for x in range(len(feature_names)) if x not in list(category_map.keys())]
ordinal_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = list(category_map.keys())
categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(transformers=[('num', ordinal_transformer, ordinal_features),
('cat', categorical_transformer, categorical_features)])
np.random.seed(0)
clf = RandomForestClassifier(n_estimators=50)
model=Pipeline(steps=[("preprocess",preprocessor),("model",clf)])
model.fit(X_train,Y_train)
def predict_fn(x):
return model.predict(x)
#predict_fn = lambda x: clf.predict(preprocessor.transform(x))
print('Train accuracy: ', accuracy_score(Y_train, predict_fn(X_train)))
print('Test accuracy: ', accuracy_score(Y_test, predict_fn(X_test)))
dump(model, 'model.joblib')
print(get_minio().fput_object(MINIO_MODEL_BUCKET, f"{INCOME_MODEL_PATH}/model.joblib", 'model.joblib'))
model.predict(X_train)
explainer = AnchorTabular(predict_fn, feature_names, categorical_names=category_map)
explainer.fit(X_train, disc_perc=[25, 50, 75])
with open("explainer.dill", "wb") as dill_file:
dill.dump(explainer, dill_file)
dill_file.close()
print(get_minio().fput_object(MINIO_MODEL_BUCKET, f"{EXPLAINER_MODEL_PATH}/explainer.dill", 'explainer.dill'))
model.predict(X_train)
idx = 0
class_names = adult.target_names
print('Prediction: ', class_names[explainer.predict_fn(X_test[idx].reshape(1, -1))[0]])
explanation = explainer.explain(X_test[idx], threshold=0.95)
print('Anchor: %s' % (' AND '.join(explanation['names'])))
print('Precision: %.2f' % explanation['precision'])
print('Coverage: %.2f' % explanation['coverage'])
from alibi_detect.od import IForest
od = IForest(
threshold=0.,
n_estimators=200,
)
od.fit(X_train)
np.random.seed(0)
perc_outlier = 5
threshold_batch = create_outlier_batch(X_train, Y_train, n_samples=1000, perc_outlier=perc_outlier)
X_threshold, y_threshold = threshold_batch.data.astype('float'), threshold_batch.target
#X_threshold = (X_threshold - mean) / stdev
print('{}% outliers'.format(100 * y_threshold.mean()))
od.infer_threshold(X_threshold, threshold_perc=100-perc_outlier)
print('New threshold: {}'.format(od.threshold))
threshold = od.threshold
X_outlier = [[300, 4, 4, 2, 1, 4, 4, 0, 0, 0, 600, 9]]
od.predict(
X_outlier
)
from alibi_detect.utils.saving import save_detector, load_detector
from os import listdir
from os.path import isfile, join
filepath="ifoutlier"
save_detector(od, filepath)
onlyfiles = [f for f in listdir(filepath) if isfile(join(filepath, f))]
for filename in onlyfiles:
print(filename)
print(get_minio().fput_object(MINIO_MODEL_BUCKET, f"{OUTLIER_MODEL_PATH}/{filename}", join(filepath, filename)))
secret = fapiVersion: v1
kind: Secret
metadata:
name: seldon-init-container-secret
namespace: {DEPLOY_NAMESPACE}
type: Opaque
stringData:
AWS_ACCESS_KEY_ID: {MINIO_ACCESS_KEY}
AWS_SECRET_ACCESS_KEY: {MINIO_SECRET_KEY}
AWS_ENDPOINT_URL: http://{MINIO_HOST}
USE_SSL: "false"
with open("secret.yaml","w") as f:
f.write(secret)
run("cat secret.yaml | kubectl apply -f -", shell=True)
sa = fapiVersion: v1
kind: ServiceAccount
metadata:
name: minio-sa
namespace: {DEPLOY_NAMESPACE}
secrets:
- name: seldon-init-container-secret
with open("sa.yaml","w") as f:
f.write(sa)
run("kubectl apply -f sa.yaml", shell=True)
model_yaml=fapiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: income-classifier
namespace: {DEPLOY_NAMESPACE}
spec:
predictors:
- componentSpecs:
graph:
implementation: SKLEARN_SERVER
modelUri: s3://{MINIO_MODEL_BUCKET}/{INCOME_MODEL_PATH}
envSecretRefName: seldon-init-container-secret
name: classifier
logger:
mode: all
explainer:
type: AnchorTabular
modelUri: s3://{MINIO_MODEL_BUCKET}/{EXPLAINER_MODEL_PATH}
envSecretRefName: seldon-init-container-secret
name: default
replicas: 1
with open("model.yaml","w") as f:
f.write(model_yaml)
run("kubectl apply -f model.yaml", shell=True)
run(f"kubectl rollout status -n {DEPLOY_NAMESPACE} deploy/$(kubectl get deploy -l seldon-deployment-id=income-classifier -o jsonpath='{{.items[0].metadata.name}}' -n {DEPLOY_NAMESPACE})", shell=True)
run(f"kubectl rollout status -n {DEPLOY_NAMESPACE} deploy/$(kubectl get deploy -l seldon-deployment-id=income-classifier -o jsonpath='{{.items[1].metadata.name}}' -n {DEPLOY_NAMESPACE})", shell=True)
payload='{"data": {"ndarray": [[53,4,0,2,8,4,4,0,0,0,60,9]]}}'
cmd=fcurl -d '{payload}' \
http://income-classifier-default.{DEPLOY_NAMESPACE}:8000/api/v1.0/predictions \
-H "Content-Type: application/json"
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
print(raw)
payload='{"data": {"ndarray": [[53,4,0,2,8,4,4,0,0,0,60,9]]}}'
cmd=fcurl -d '{payload}' \
http://income-classifier-default-explainer.{DEPLOY_NAMESPACE}:9000/api/v1.0/explain \
-H "Content-Type: application/json"
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
print(raw)
outlier_yaml=fapiVersion: serving.knative.dev/v1
kind: Service
metadata:
name: income-outlier
namespace: {DEPLOY_NAMESPACE}
spec:
template:
metadata:
annotations:
autoscaling.knative.dev/minScale: "1"
spec:
containers:
- image: seldonio/alibi-detect-server:1.2.2-dev_alibidetect
imagePullPolicy: IfNotPresent
args:
- --model_name
- adultod
- --http_port
- '8080'
- --protocol
- seldon.http
- --storage_uri
- s3://{MINIO_MODEL_BUCKET}/{OUTLIER_MODEL_PATH}
- --reply_url
- http://default-broker
- --event_type
- io.seldon.serving.inference.outlier
- --event_source
- io.seldon.serving.incomeod
- OutlierDetector
envFrom:
- secretRef:
name: seldon-init-container-secret
with open("outlier.yaml","w") as f:
f.write(outlier_yaml)
run("kubectl apply -f outlier.yaml", shell=True)
trigger_outlier_yaml=fapiVersion: eventing.knative.dev/v1alpha1
kind: Trigger
metadata:
name: income-outlier-trigger
namespace: {DEPLOY_NAMESPACE}
spec:
filter:
sourceAndType:
type: io.seldon.serving.inference.request
subscriber:
ref:
apiVersion: serving.knative.dev/v1alpha1
kind: Service
name: income-outlier
with open("outlier_trigger.yaml","w") as f:
f.write(trigger_outlier_yaml)
run("kubectl apply -f outlier_trigger.yaml", shell=True)
run(f"kubectl rollout status -n {DEPLOY_NAMESPACE} deploy/$(kubectl get deploy -l serving.knative.dev/service=income-outlier -o jsonpath='{{.items[0].metadata.name}}' -n {DEPLOY_NAMESPACE})", shell=True)
event_display=fapiVersion: apps/v1
kind: Deployment
metadata:
name: event-display
namespace: {DEPLOY_NAMESPACE}
spec:
replicas: 1
selector:
matchLabels: &labels
app: event-display
template:
metadata:
labels: *labels
spec:
containers:
- name: helloworld-go
# Source code: https://github.com/knative/eventing-contrib/tree/master/cmd/event_display
image: gcr.io/knative-releases/knative.dev/eventing-contrib/cmd/event_display@sha256:f4628e97a836c77ed38bd3b6fd3d0b06de4d5e7db6704772fe674d48b20bd477
---
kind: Service
apiVersion: v1
metadata:
name: event-display
namespace: {DEPLOY_NAMESPACE}
spec:
selector:
app: event-display
ports:
- protocol: TCP
port: 80
targetPort: 8080
---
apiVersion: eventing.knative.dev/v1alpha1
kind: Trigger
metadata:
name: income-outlier-display
namespace: {DEPLOY_NAMESPACE}
spec:
broker: default
filter:
attributes:
type: io.seldon.serving.inference.outlier
subscriber:
ref:
apiVersion: v1
kind: Service
name: event-display
with open("event_display.yaml","w") as f:
f.write(event_display)
run("kubectl apply -f event_display.yaml", shell=True)
run(f"kubectl rollout status -n {DEPLOY_NAMESPACE} deploy/event-display -n {DEPLOY_NAMESPACE}", shell=True)
def predict():
payload='{"data": {"ndarray": [[300, 4, 4, 2, 1, 4, 4, 0, 0, 0, 600, 9]]}}'
cmd=fcurl -d '{payload}' \
http://income-classifier-default.{DEPLOY_NAMESPACE}:8000/api/v1.0/predictions \
-H "Content-Type: application/json"
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
print(raw)
def get_outlier_event_display_logs():
cmd=f"kubectl logs $(kubectl get pod -l app=event-display -o jsonpath='{{.items[0].metadata.name}}' -n {DEPLOY_NAMESPACE}) -n {DEPLOY_NAMESPACE}"
ret = Popen(cmd, shell=True,stdout=PIPE)
res = ret.stdout.read().decode("utf-8").split("\n")
data= []
for i in range(0,len(res)):
if res[i] == 'Data,':
j = json.loads(json.loads(res[i+1]))
if "is_outlier"in j["data"].keys():
data.append(j)
if len(data) > 0:
return data[-1]
else:
return None
j = None
while j is None:
predict()
print("Waiting for outlier logs, sleeping")
time.sleep(2)
j = get_outlier_event_display_logs()
print(j)
print("Outlier",j["data"]["is_outlier"]==[1])
run(f"kubectl delete sdep income-classifier -n {DEPLOY_NAMESPACE}", shell=True)
run(f"kubectl delete ksvc income-outlier -n {DEPLOY_NAMESPACE}", shell=True)
run(f"kubectl delete sa minio-sa -n {DEPLOY_NAMESPACE}", shell=True)
run(f"kubectl delete secret seldon-init-container-secret -n {DEPLOY_NAMESPACE}", shell=True)
run(f"kubectl delete deployment event-display -n {DEPLOY_NAMESPACE}", shell=True)
run(f"kubectl delete svc event-display -n {DEPLOY_NAMESPACE}", shell=True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Train Model
Step2: Note that for your own datasets you can use our utility function gen_category_map to create the category map
Step3: Define shuffled training and test set
Step4: Create feature transformation pipeline
Step5: Categorical features
Step6: Combine and fit
Step7: Train Random Forest model
Step8: Define predict function
Step9: Train Explainer
Step10: Discretize the ordinal features into quartiles
Step11: Get Explanation
Step12: We set the precision threshold to 0.95. This means that predictions on observations where the anchor holds will be the same as the prediction on the explained instance at least 95% of the time.
Step13: Train Outlier Detector
Step17: Deploy Seldon Core Model
Step19: Make a prediction request
Step21: Make an explanation request
Step24: Deploy Outier Detector
Step26: Deploy KNative Eventing Event Display
Step28: Test Outlier Detection
Step29: Clean Up Resources
|
438 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install --quiet --upgrade --pre tensorflow tensorflow-datasets
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.experimental import dtensor
def configure_virtual_cpus(ncpu):
phy_devices = tf.config.list_physical_devices('CPU')
tf.config.set_logical_device_configuration(
phy_devices[0],
[tf.config.LogicalDeviceConfiguration()] * ncpu)
configure_virtual_cpus(8)
tf.config.list_logical_devices('CPU')
devices = [f'CPU:{i}' for i in range(8)]
tf.keras.backend.experimental.enable_tf_random_generator()
tf.keras.utils.set_random_seed(1337)
mesh = dtensor.create_mesh([("batch", 8)], devices=devices)
example_weight_layout = dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh) # or
example_weight_layout = dtensor.Layout.replicated(mesh, rank=2)
example_data_layout = dtensor.Layout(['batch', dtensor.UNSHARDED], mesh) # or
example_data_layout = dtensor.Layout.batch_sharded(mesh, 'batch', rank=2)
unsharded_layout_2d = dtensor.Layout.replicated(mesh, 2)
unsharded_layout_1d = dtensor.Layout.replicated(mesh, 1)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128,
activation='relu',
name='d1',
kernel_layout=unsharded_layout_2d,
bias_layout=unsharded_layout_1d),
tf.keras.layers.Dense(10,
name='d2',
kernel_layout=unsharded_layout_2d,
bias_layout=unsharded_layout_1d)
])
for weight in model.weights:
print(f'Weight name: {weight.name} with layout: {weight.layout}')
break
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
Normalizes images: `uint8` -> `float32`.
return tf.cast(image, tf.float32) / 255., label
batch_size = 128
ds_train = ds_train.map(
normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(batch_size)
ds_train = ds_train.prefetch(tf.data.AUTOTUNE)
ds_test = ds_test.map(
normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
ds_test = ds_test.batch(batch_size)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.AUTOTUNE)
@tf.function
def train_step(model, x, y, optimizer, metrics):
with tf.GradientTape() as tape:
logits = model(x, training=True)
# tf.reduce_sum sums the batch sharded per-example loss to a replicated
# global loss (scalar).
loss = tf.reduce_sum(tf.keras.losses.sparse_categorical_crossentropy(
y, logits, from_logits=True))
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for metric in metrics.values():
metric.update_state(y_true=y, y_pred=logits)
loss_per_sample = loss / len(x)
results = {'loss': loss_per_sample}
return results
@tf.function
def eval_step(model, x, y, metrics):
logits = model(x, training=False)
loss = tf.reduce_sum(tf.keras.losses.sparse_categorical_crossentropy(
y, logits, from_logits=True))
for metric in metrics.values():
metric.update_state(y_true=y, y_pred=logits)
loss_per_sample = loss / len(x)
results = {'eval_loss': loss_per_sample}
return results
def pack_dtensor_inputs(images, labels, image_layout, label_layout):
num_local_devices = image_layout.mesh.num_local_devices()
images = tf.split(images, num_local_devices)
labels = tf.split(labels, num_local_devices)
images = dtensor.pack(images, image_layout)
labels = dtensor.pack(labels, label_layout)
return images, labels
optimizer = tf.keras.dtensor.experimental.optimizers.Adam(0.01, mesh=mesh)
metrics = {'accuracy': tf.keras.metrics.SparseCategoricalAccuracy(mesh=mesh)}
eval_metrics = {'eval_accuracy': tf.keras.metrics.SparseCategoricalAccuracy(mesh=mesh)}
num_epochs = 3
image_layout = dtensor.Layout.batch_sharded(mesh, 'batch', rank=4)
label_layout = dtensor.Layout.batch_sharded(mesh, 'batch', rank=1)
for epoch in range(num_epochs):
print("============================")
print("Epoch: ", epoch)
for metric in metrics.values():
metric.reset_state()
step = 0
results = {}
pbar = tf.keras.utils.Progbar(target=None, stateful_metrics=[])
for input in ds_train:
images, labels = input[0], input[1]
images, labels = pack_dtensor_inputs(
images, labels, image_layout, label_layout)
results.update(train_step(model, images, labels, optimizer, metrics))
for metric_name, metric in metrics.items():
results[metric_name] = metric.result()
pbar.update(step, values=results.items(), finalize=False)
step += 1
pbar.update(step, values=results.items(), finalize=True)
for metric in eval_metrics.values():
metric.reset_state()
for input in ds_test:
images, labels = input[0], input[1]
images, labels = pack_dtensor_inputs(
images, labels, image_layout, label_layout)
results.update(eval_step(model, images, labels, eval_metrics))
for metric_name, metric in eval_metrics.items():
results[metric_name] = metric.result()
for metric_name, metric in results.items():
print(f"{metric_name}: {metric.numpy()}")
class SubclassedModel(tf.keras.Model):
def __init__(self, name=None):
super().__init__(name=name)
self.feature = tf.keras.layers.Dense(16)
self.feature_2 = tf.keras.layers.Dense(24)
self.dropout = tf.keras.layers.Dropout(0.1)
def call(self, inputs, training=None):
x = self.feature(inputs)
x = self.dropout(x, training=training)
return self.feature_2(x)
layout_map = tf.keras.dtensor.experimental.LayoutMap(mesh=mesh)
layout_map['feature.*kernel'] = dtensor.Layout.batch_sharded(mesh, 'batch', rank=2)
layout_map['feature.*bias'] = dtensor.Layout.batch_sharded(mesh, 'batch', rank=1)
with tf.keras.dtensor.experimental.layout_map_scope(layout_map):
subclassed_model = SubclassedModel()
dtensor_input = dtensor.copy_to_mesh(tf.zeros((16, 16)), layout=unsharded_layout_2d)
# Trigger the weights creation for subclass model
subclassed_model(dtensor_input)
print(subclassed_model.feature.kernel.layout)
layout_map = tf.keras.dtensor.experimental.LayoutMap(mesh=mesh)
layout_map['feature.*kernel'] = dtensor.Layout.batch_sharded(mesh, 'batch', rank=2)
layout_map['feature.*bias'] = dtensor.Layout.batch_sharded(mesh, 'batch', rank=1)
with tf.keras.dtensor.experimental.layout_map_scope(layout_map):
inputs = tf.keras.Input((16,), batch_size=16)
x = tf.keras.layers.Dense(16, name='feature')(inputs)
x = tf.keras.layers.Dropout(0.1)(x)
output = tf.keras.layers.Dense(32, name='feature_2')(x)
model = tf.keras.Model(inputs, output)
print(model.layers[1].kernel.layout)
with tf.keras.dtensor.experimental.layout_map_scope(layout_map):
model = tf.keras.Sequential([
tf.keras.layers.Dense(16, name='feature', input_shape=(16,)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(32, name='feature_2')
])
print(model.layers[2].kernel.layout)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using DTensors with Keras
Step2: Next, import tensorflow and tensorflow.experimental.dtensor, and configure TensorFlow to use 8 virtual CPUs.
Step3: Deterministic pseudo-random number generators
Step4: Creating a Data Parallel Mesh
Step5: As each device runs a full replica of the model, the model variables shall be fully replicated across the mesh (unsharded). As an example, a fully replicated Layout for a rank-2 weight on this Mesh would be as follows
Step6: A layout for a rank-2 data tensor on this Mesh would be sharded along the first dimension (sometimes known as batch_sharded),
Step7: Create Keras layers with layout
Step8: You can check the layout information by examining the layout property on the weights.
Step10: Load a dataset and build input pipeline
Step11: Define the training logic for the model
Step12: Metrics and Optimizers
Step13: Train the model
Step14: Specify Layout for existing model code
Step15: There are 4 weights in this model, which are kernel and bias for two Dense layers. Each of them are mapped based on the object path
Step16: The model weights are created on the first call, so call the model with a DTensor input and confirm the weights have the expected layouts.
Step17: With this, you can quickly map the Layout to your models without updating any of your existing code.
|
439 | <ASSISTANT_TASK:>
Python Code:
import rockbag as rb
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
mpl.rcParams["figure.facecolor"] = "white"
mpl.rcParams["axes.facecolor"] = "white"
mpl.rcParams["savefig.facecolor"] = "white"
import numpy as np
# stub list of files for 2012 09
def get_files():
files = ["/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120901_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120902_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120903_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120904_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120905_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120906_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120907_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120908_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120909_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120910_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120911_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120912_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120913_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120914_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120915_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120916_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120917_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120918_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120919_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120920_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120921_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120922_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120923_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120924_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120925_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120926_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120927_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120928_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120929_f17_v01_n.bin",
"/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/daily/2012/nt_20120930_f17_v01_n.bin"]
return files
files = get_files()
def load_month(files):
cube = np.dstack(tuple([file_data(f) for f in files]))
return cube
def file_data(file):
a = rb.suckgrid(file, gpd='N3B', header=300)
return a
z = load_month(get_files())
average = z.mean(axis=2)
stddev = z.std(axis=2)
np.any(z == 255)
np.any((average > 100) & (average < 250))
with mpl.rc_context(rc={'figure.figsize': (10,9), 'axes.grid':False}, ):
plt.imshow(stddev)
with mpl.rc_context(rc={'figure.figsize': (10,9), 'axes.grid':False}, ):
plt.imshow(average)
month_file = '/projects/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/monthly/nt_201209_f17_v01_n.bin'
gsfc_month_data = rb.suckgrid(month_file, gpd='N3B', header=300)
with mpl.rc_context(rc={'figure.figsize': (10,9), 'axes.grid':False}, ):
plt.imshow(gsfc_month_data)
np.min(gsfc_month_data[gsfc_month_data > 0])
np.min(average[average > 0])
gsfc_month_data
diff = average - gsfc_month_data
with mpl.rc_context(rc={'figure.figsize': (10,9), 'axes.grid':False}, ):
plt.imshow(np.abs(diff) > .6, cmap="Blues")
(np.abs(diff) > .6).sum()
with mpl.rc_context(rc={'figure.figsize': (10,9), 'axes.grid':False}, ):
plt.imshow(np.abs(diff) > .5, cmap="Blues")
(np.abs(diff) > .5).sum()
with mpl.rc_context(rc={'figure.figsize': (10,9), 'axes.grid':False}, ):
plt.imshow(np.abs(diff) > .2, cmap="Blues")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: any missing data in the set?
Step2: total points different by more than .5
|
440 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
titanic=pd.read_csv('./titanic_clean_data.csv')
cols_to_norm=['Age','Fare']
col_norms=['Age_z','Fare_z']
titanic[col_norms]=titanic[cols_to_norm].apply(lambda x: (x-x.mean())/x.std())
#titanic['cabin_clean']=(pd.notnull(titanic.Cabin))
from sklearn.cross_validation import train_test_split, KFold, cross_val_score, StratifiedKFold
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest, f_classif
titanic['Gender']=titanic['Sex'].replace(to_replace=[True,False],value=['M','F'])
titanic['Parch_ind']=titanic.Parch>=1
titanic=pd.merge(titanic, pd.get_dummies(titanic['Gender'].str.cat(titanic['Pclass'].astype(str),sep='_')),\
on=titanic['PassengerId'])
titanic=pd.merge(titanic, pd.get_dummies(titanic['Gender'].str.cat(titanic['Parch_ind'].astype(str),sep='_')),\
on=titanic['PassengerId'])
titanic=pd.merge(titanic, pd.get_dummies(titanic['Gender'].str.cat(titanic['Age_cut'].astype(str),sep='_')),\
on=titanic['PassengerId'])
titanic_target=titanic.Survived.values
features=['Sex','SibSp','Parch','Pclass_1','Pclass_2','Pclass_3','Emb_C','Emb_Q','Emb_S',\
'Emb_nan','Age_ct_C','Age_ct_A','Age_ct_S', 'Sp_ct','Age_z','Fare_z',\
'Ti_Dr', 'Ti_Master', 'Ti_Mil', 'Ti_Miss', 'Ti_Mr', 'Ti_Mrs', 'Ti_Other', 'Ti_Rev',\
'Fl_AB', 'Fl_CD', 'Fl_EFG', 'Fl_nan',\
'F_1', 'F_2', 'F_3', 'M_1', 'M_2', 'M_3', 'F_False', 'F_True', 'M_False', 'M_True',\
'F_A', 'F_C', 'M_A', 'M_C', 'M_S']
titanic_features=titanic[features].values
titanic_features, ensemble_features, titanic_target, ensemble_target= \
train_test_split(titanic_features,
titanic_target,
test_size=.1,
random_state=7132016)
score=0
for x in range(10,43):
for y in np.linspace(.1,.5,5):
var_filter=SelectKBest(f_classif)
clf=svm.SVC(kernel='rbf')
pipe_svm = Pipeline([('anova', var_filter), ('svc', clf)])
pipe_svm.set_params(anova__k=x, svc__C=y)
score_test = cross_validation.cross_val_score(pipe_svm, titanic_features, titanic_target, n_jobs=1, \
cv=StratifiedKFold(titanic_target, n_folds=10, shuffle=True, random_state=7132016))
if score_test.mean()>score:
score=score_test.mean()
k_out=x
C_out=y
print k_out
print C_out
print score
model=pipe_svm.set_params(anova__k=k_out, svc__C=C_out).fit(titanic_features, titanic_target)
test_data=pd.read_csv('./test.csv')
test_data.Sex.replace(['male','female'],[True,False], inplace=True)
test_data.Age= test_data.groupby(['Sex','Pclass'])[['Age']].transform(lambda x: x.fillna(x.mean()))
test_data.Fare= titanic.groupby(['Pclass'])[['Fare']].transform(lambda x: x.fillna(x.mean()))
titanic_class=pd.get_dummies(test_data.Pclass,prefix='Pclass',dummy_na=False)
test_data=pd.merge(test_data,titanic_class,on=test_data['PassengerId'])
test_data=pd.merge(test_data,pd.get_dummies(test_data.Embarked, prefix='Emb', dummy_na=True), on=test_data['PassengerId'])
titanic['Floor']=titanic['Cabin'].str.extract('^([A-Z])', expand=False)
titanic['Floor'].replace(to_replace='T',value=np.NaN ,inplace=True)
titanic=pd.merge(titanic,pd.get_dummies(titanic.Floor, prefix="Fl", dummy_na=True),on=titanic['PassengerId'])
test_data['Age_cut']=pd.cut(test_data['Age'],[0,17.9,64.9,99], labels=['C','A','S'])
test_data=pd.merge(test_data,pd.get_dummies(test_data.Age_cut, prefix="Age_ct", dummy_na=False),on=test_data['PassengerId'])
test_data['Title']=test_data['Name'].str.extract(', (.*)\.', expand=False)
test_data['Title'].replace(to_replace='Mrs\. .*',value='Mrs', inplace=True, regex=True)
test_data.loc[test_data.Title.isin(['Col','Major','Capt']),['Title']]='Mil'
test_data.loc[test_data.Title=='Mlle',['Title']]='Miss'
test_data.loc[test_data.Title=='Mme',['Title']]='Mrs'
test_data['Title_ct']=test_data.groupby(['Title'])['Title'].transform('count')
test_data.loc[test_data.Title_ct<5,['Title']]='Other'
test_data=pd.merge(test_data,pd.get_dummies(test_data.Title, prefix='Ti',dummy_na=False), on=test_data['PassengerId'])
test_data['NameTest']=test_data.Name
test_data['NameTest'].replace(to_replace=" \(.*\)",value="",inplace=True, regex=True)
test_data['NameTest'].replace(to_replace=", M.*\.",value=", ",inplace=True, regex=True)
cols_to_norm=['Age','Fare']
col_norms=['Age_z','Fare_z']
test_data['Age_z']=(test_data.Age-titanic.Age.mean())/titanic.Age.std()
test_data['Fare_z']=(test_data.Fare-titanic.Fare.mean())/titanic.Fare.std()
#test_data[col_norms]=test_data[cols_to_norm].apply(lambda x: (x-titanic.x.mean())/titanic.x.std())
test_data['cabin_clean']=(pd.notnull(test_data.Cabin))
test_data['Gender']=test_data['Sex'].replace(to_replace=[True,False],value=['M','F'])
test_data['Parch_ind']=test_data.Parch>=1
#pd.get_dummies(str.cat(titanic[['Gender','Pclass']], sep='_'))
test_data=pd.merge(test_data, pd.get_dummies(test_data['Gender'].str.cat(test_data['Pclass'].astype(str),sep='_')), on=test_data['PassengerId'])
test_data=pd.merge(test_data, pd.get_dummies(test_data['Gender'].str.cat(test_data['Parch_ind'].astype(str),sep='_')), on=test_data['PassengerId'])
test_data=pd.merge(test_data, pd.get_dummies(test_data['Gender'].str.cat(test_data['Age_cut'].astype(str),sep='_')), on=test_data['PassengerId'])
name_list=pd.concat([titanic[['PassengerId','NameTest']],test_data[['PassengerId','NameTest']]])
name_list['Sp_ct']=name_list.groupby('NameTest')['NameTest'].transform('count')-1
test_data=pd.merge(test_data,name_list[['PassengerId','Sp_ct']],on='PassengerId',how='left')
def add_cols(var_check,df):
if var_check not in df.columns.values:
df[var_check]=0
for x in features:
add_cols(x, test_data)
features=['Sex','SibSp','Parch','Pclass_1','Pclass_2','Pclass_3','Emb_C','Emb_Q','Emb_S',\
'Emb_nan','Age_ct_C','Age_ct_A','Age_ct_S', 'Sp_ct','Age_z','Fare_z',\
'Ti_Dr', 'Ti_Master', 'Ti_Mil', 'Ti_Miss', 'Ti_Mr', 'Ti_Mrs', 'Ti_Other', 'Ti_Rev',\
'Fl_AB', 'Fl_CD', 'Fl_EFG', 'Fl_nan',\
'F_1', 'F_2', 'F_3', 'M_1', 'M_2', 'M_3', 'F_False', 'F_True', 'M_False', 'M_True',\
'F_A', 'F_C', 'M_A', 'M_C', 'M_S']
test_features=test_data[features].values
predictions=model.predict(ensemble_features)
ensemble_svm=pd.DataFrame({'svm_pred':predictions})
ensemble_svm.to_csv('./ensemble_svm.csv', index=False)
predictions=model.predict(test_features)
test_data['Survived']=predictions
kaggle=test_data[['PassengerId','Survived']]
kaggle.to_csv('./kaggle_titanic_submission_svm.csv', index=False)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Our key parameters here are the penalty term, and the best k features from the univariate analysis
Step2: 22
Step3: Prep the Kaggle test data, as well as the ensembling test data
|
441 | <ASSISTANT_TASK:>
Python Code:
def swap(a, b):
a, b = b, a
x, y = 1, 2
print("Before swap, x = %d and y = %d." % (x, y))
swap(x, y)
print("After swap, x = %d and y = %d." % (x, y))
def add_function_of_integers(func, upto):
total = 0
for n in range(upto + 1):
total = total + func(n)
return total
def identity(n):
return n
N = 10
print("Sum of integers up to %d is %d." %
(N, add_function_of_integers(identity, N)))
def square(n):
return n * n
print("Sum of the squares of integers up to %d is %d." %
(N, add_function_of_integers(square, N)))
N = 3
print("Sum of cubes of integers up to %d is %d." %
(N, add_function_of_integers(lambda x : x**3, N)))
#def cube(x):
# return x**3
cube = lambda x : x**3
cube(10)
add_two_numbers = lambda x, y : x + y
add_two_numbers(10, 20)
# To find the sum 1 + 1 + 1 ... + 1
print("Sum of the %d ones is %d." %
(N + 1, add_function_of_integers(lambda x : 1, N)))
n_squares = []
for i in range(N + 1):
n_squares.append(square(i))
print(n_squares)
whole_numbers = range(N + 1)
n_cubes = [i * i * i for i in whole_numbers]
n_cubes
N = 6
# Express dice rolls as (first die face, second die face)
cartesian_product = [(a, b)
for a in range(1, 7)
for b in range(1, 7)]
print(cartesian_product)
odd_sum_cases = [(a, b)
for a in range(1, 7)
for b in range(1, 7)
if (a + b) % 2 == 1]
print(odd_sum_cases)
list_of_numbers = range(11)
odd_squares = [i * i
for i in list_of_numbers
if i % 2 == 1]
print(odd_squares)
# Using map
list_of_squares = list(map(lambda x : x * x,
list_of_numbers))
print(list_of_squares)
# We will use filter on the dice pairs to print
# only those that sum to an even number
even_sum = filter(lambda x :
(x[0] + x[1]) % 2 == 0,
cartesian_product)
for die_rolls in even_sum:
print("%d and %d sum to even!" %
(die_rolls[0], die_rolls[1]))
print("Let's try printing it again!")
for die_rolls in even_sum:
print("%d and %d sum to even!" %
(die_rolls[0], die_rolls[1]))
pairs_of_numbers = [(a, b) for a in range(1, 21)
for b in range(1, 21)]
pairs_of_numbers = list(filter(lambda x: x[0] <= x[1],
pairs_of_numbers))
print(pairs_of_numbers)
numbers_with_cube_sums = [(a[0]**3 + a[1]**3,a)
for a in pairs_of_numbers]
print(numbers_with_cube_sums)
#dict_of_cube_sums = {}
#for i in numbers_with_cube_sums:
# if i[0] in dict_of_cube_sums:
# dict_of_cube_sums[i[0]].append(i[1])
# else:
# dict_of_cube_sums[i[0]] = [i[1]]
dict_of_cube_sums = {}
[dict_of_cube_sums.setdefault(i[0], [])
for i in numbers_with_cube_sums]
[dict_of_cube_sums[i[0]].append(i[1])
for i in numbers_with_cube_sums]
print(dict_of_cube_sums)
# Find the elements which have more than one element
# in their list
ramanujan_candidates = list(
filter(lambda i : len(i[1]) > 1,
dict_of_cube_sums.items()))
ramanujan_candidates
def add_n(n):
def adder(a):
return a + n
return adder
add10 = add_n(10)
print(add10(23))
add3 = add_n(3)
print(add3(23))
def logger(func):
def inner(*args, **kwargs):
print("Arguments: %s %s" % (args, kwargs))
return func(*args, **kwargs)
return inner
def add_two_numbers(x, y):
return x + y
print("Adding %d and %d gives %d" % (1, 3,
add_two_numbers(1, 3)))
logged_add_two_numbers = logger(add_two_numbers)
mysum = logged_add_two_numbers(1, 3)
print(mysum)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Moral
Step2: Anonymous functions
Step3: List comprehensions
Step4: Example
Step5: Now we form a new list with the sum and the actual tuples linked together in a tuple
Step6: Finally, we create a dictionary with the cube as the key and the (a, b) tuple pairs in a list as the values
Step7: Functions within functions
|
442 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.datasets import load_linnerud
linnerud = load_linnerud()
chinups = linnerud.data[:,0]
plt.hist(chinups, histtype = "step", lw = 3)
plt.hist(chinups, bins = 5, histtype="step", lw = 3)
plt.hist(chinups, align = "left", histtype="step", lw = 3)
bins = np.append(np.sort(chinups)[::5], np.max(chinups))
plt.hist(chinups, bins = bins, histtype = "step", normed = True, lw = 3)
plt.hist(chinups, histtype = 'step')
# this is the code for the rug plot
plt.plot(chinups, np.zeros_like(chinups), '|', color='k', ms = 25, mew = 4)
# execute this cell
from sklearn.neighbors import KernelDensity
def kde_sklearn(data, grid, bandwidth = 1.0, **kwargs):
kde_skl = KernelDensity(bandwidth = bandwidth, **kwargs)
kde_skl.fit(data[:, np.newaxis])
log_pdf = kde_skl.score_samples(grid[:, np.newaxis]) # sklearn returns log(density)
return np.exp(log_pdf)
grid = np.arange(0 + 1e-4,20,0.01)
PDFtophat = kde_sklearn(chinups, grid, bandwidth = 0.1, kernel = 'tophat')
plt.plot(grid, PDFtophat)
PDFtophat1 = kde_sklearn(chinups, grid, bandwidth = 1, kernel = 'tophat')
plt.plot(grid, PDFtophat1, 'MediumAquaMarine', lw = 3, label = "bw = 1")
PDFtophat5 = kde_sklearn(chinups, grid, bandwidth = 5, kernel = 'tophat')
plt.plot(grid, PDFtophat5, 'Tomato', lw = 3, label = "bw = 5")
plt.legend()
PDFgaussian = kde_sklearn(chinups, grid, bandwidth = 1, kernel = 'gaussian')
plt.plot(grid, PDFgaussian, 'DarkOrange', lw = 3, label = "gaussian")
PDFepanechnikov = kde_sklearn(chinups, grid, bandwidth = 2, kernel = 'epanechnikov')
plt.plot(grid, PDFepanechnikov, 'SlateGrey', lw = 3, label = "epanechnikov")
plt.legend(loc = 2)
x = np.arange(0, 6*np.pi, 0.1)
y = np.cos(x)
plt.plot(x,y, lw = 2)
plt.xlabel('X')
plt.ylabel('Y')
plt.xlim(0, 6*np.pi)
import seaborn as sns
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x,y, lw = 2)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_xlim(0, 6*np.pi)
sns.set_style("ticks")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x,y, lw = 2)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_xlim(0, 6*np.pi)
# default color palette
current_palette = sns.color_palette()
sns.palplot(current_palette)
# set palette to colorblind
sns.set_palette("colorblind")
current_palette = sns.color_palette()
sns.palplot(current_palette)
iris = sns.load_dataset("iris")
iris
# note - hist, kde, and rug all set to True, set to False to turn them off
with sns.axes_style("dark"):
sns.distplot(iris['petal_length'], bins=20, hist=True, kde=True, rug=True)
plt.scatter(iris['petal_length'], iris['petal_width'])
plt.xlabel("petal length (cm)")
plt.ylabel("petal width (cm)")
with sns.axes_style("darkgrid"):
xexample = np.random.normal(loc = 0.2, scale = 1.1, size = 10000)
yexample = np.random.normal(loc = -0.1, scale = 0.9, size = 10000)
plt.scatter(xexample, yexample)
# hexbin w/ bins = "log" returns the log of counts/bin
# mincnt = 1 displays only hexpix with at least 1 source present
with sns.axes_style("darkgrid"):
plt.hexbin(xexample, yexample, bins = "log", cmap = "viridis", mincnt = 1)
plt.colorbar()
with sns.axes_style("darkgrid"):
sns.kdeplot(xexample, yexample,shade=False)
sns.jointplot(x=iris['petal_length'], y=iris['petal_width'])
sns.jointplot(x=iris['petal_length'], y=iris['petal_width'], kind = 'kde', shade = 'False')
sns.pairplot(iris[["sepal_length", "sepal_width", "petal_length", "petal_width"]])
sns.pairplot(iris, vars = ["sepal_length", "sepal_width", "petal_length", "petal_width"],
hue = "species", diag_kind = 'kde')
g = sns.PairGrid(iris, vars = ["sepal_length", "sepal_width", "petal_length", "petal_width"],
hue = "species", diag_sharey=False)
g.map_lower(sns.kdeplot)
g.map_upper(plt.scatter, edgecolor='white')
g.map_diag(sns.kdeplot, lw=3)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 1) Density Estimation
Step2: Problem 1a
Step3: Already with this simple plot we see a problem - the choice of bin centers and number of bins suggest that there is a 0% probability that middle aged men can do 10 chinups. Intuitively this seems incorrect, so lets examine how the histogram changes if we change the number of bins or the bin centers.
Step4: These small changes significantly change the output PDF. With fewer bins we get something closer to a continuous distribution, while shifting the bin centers reduces the probability to zero at 9 chinups.
Step5: Ending the lie
Step6: Of course, even rug plots are not a perfect solution. Many of the chinup measurements are repeated, and those instances cannot be easily isolated above. One (slightly) better solution is to vary the transparency of the rug "whiskers" using alpha = 0.3 in the whiskers plot call. But this too is far from perfect.
Step7: Problem 1e
Step8: In this representation, each "block" has a height of 0.25. The bandwidth is too narrow to provide any overlap between the blocks. This choice of kernel and bandwidth produces an estimate that is essentially a histogram with a large number of bins. It gives no sense of continuity for the distribution. Now, we examine the difference (relative to histograms) upon changing the the width (i.e. kernel) of the blocks.
Step9: It turns out blocks are not an ideal representation for continuous data (see discussion on histograms above). Now we will explore the resulting PDF from other kernels.
Step10: So, what is the optimal choice of bandwidth and kernel? Unfortunately, there is no hard and fast rule, as every problem will likely have a different optimization. Typically, the choice of bandwidth is far more important than the choice of kernel. In the case where the PDF is likely to be gaussian (or close to gaussian), then Silverman's rule of thumb can be used
Step11: Seaborn
Step12: We can see that the plot has significantly changed styles.
Step13: The folks behind seaborn have thought a lot about color palettes, which is a good thing. Remember - the choice of color for plots is one of the most essential aspects of visualization. A poor choice of colors can easily mask interesting patterns or suggest structure that is not real. To learn more about what is available, see the seaborn color tutorial.
Step14: which we will now change to colorblind, which is clearer to those that are colorblind.
Step15: Now that we have covered the basics of seaborn (and the above examples truly only scratch the surface of what is possible), we will explore the power of seaborn for higher dimension data sets. We will load the famous Iris data set, which measures 4 different features of 3 different types of Iris flowers. There are 150 different flowers in the data set.
Step16: Now that we have a sense of the data structure, it is useful to examine the distribution of features. Above, we went to great pains to produce histograms, KDEs, and rug plots. seaborn handles all of that effortlessly with the distplot function.
Step17: Of course, this data set lives in a 4D space, so plotting more than univariate distributions is important (and as we will see tomorrow this is particularly useful for visualizing classification results). Fortunately, seaborn makes it very easy to produce handy summary plots.
Step18: Of course, when there are many many data points, scatter plots become difficult to interpret. As in the example below
Step19: Here, we see that there are many points, clustered about the origin, but we have no sense of the underlying density of the distribution. 2D histograms, such as plt.hist2d(), can alleviate this problem. I prefer to use plt.hexbin() which is a little easier on the eyes (though note - these histograms are just as subject to the same issues discussed above).
Step20: While the above plot provides a significant improvement over the scatter plot by providing a better sense of the density near the center of the distribution, the binedge effects are clearly present. An even better solution, like before, is a density estimate, which is easily built into seaborn via the kdeplot function.
Step21: This plot is much more appealing (and informative) than the previous two. For the first time we can clearly see that the distribution is not actually centered on the origin. Now we will move back to the Iris data set.
Step22: But! Histograms and scatter plots can be problematic as we have discussed many times before.
Step23: That is much nicer than what was presented above. However - we still have a problem in that our data live in 4D, but we are (mostly) limited to 2D projections of that data. One way around this is via the seaborn version of a pairplot, which plots the distribution of every variable in the data set against each other. (Here is where the integration with pandas DataFrames becomes so powerful.)
Step24: For data sets where we have classification labels, we can even color the various points using the hue option, and produce KDEs along the diagonal with diag_type = 'kde'.
Step25: Even better - there is an option to create a PairGrid which allows fine tuned control of the data as displayed above, below, and along the diagonal. In this way it becomes possible to avoid having symmetric redundancy, which is not all that informative. In the example below, we will show scatter plots and contour plots simultaneously.
|
443 | <ASSISTANT_TASK:>
Python Code:
%%bash
cd ~/Downloads
wget https://s3.amazonaws.com/ed-college-choice-public/CollegeScorecard_Raw_Data.zip
unzip CollegeScorecard_Raw_Data.zip
!ls ~/Downloads/CollegeScorecard_Raw_Data
import pandas as pd
df = pd.read_csv('~/Downloads/CollegeScorecard_Raw_Data/MERGED2011_PP.csv', na_values=['PrivacySuppressed'])
len(df)
len(df.columns)
ddict = pd.read_csv('~/Downloads/CollegeScorecard_Raw_Data/CollegeScorecardDataDictionary-09-12-2015.csv')
ddict['dev-category'].unique()
pd.options.display.max_colwidth = 87
ddict[ddict['dev-category'] == 'earnings'].set_index('VARIABLE NAME')['NAME OF DATA ELEMENT']
ddict[ddict['VARIABLE NAME'] == 'mn_earn_wne_male1_p10'].values
df['diffp10'] = df.mn_earn_wne_male1_p10 - df.mn_earn_wne_male0_p10
df = df.sort(columns=['diffp10'], ascending=False)
for name in ['Massachusetts Institute of Technology', 'Stanford University']:
print name, df[df.INSTNM == name].diffp10.values[0]
df[['INSTNM','diffp10','mn_earn_wne_male0_p10','mn_earn_wne_male1_p10']].set_index('INSTNM').head(30)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: lets explore
Step2: for some reason 2011 was the last year for which there is earning information
Step3: the number of schools covered
Step4: for each there are lots of columns to read
Step5: but there is a dictionary just for exploring what each column is
Step6: the columns are grouped
Step7: there are many columns in the earning category
Step8: and here is the field for male earning 10 years after finishing school. Yes. females are male0
Step9: compute the difference between women and men
Step10: results
|
444 | <ASSISTANT_TASK:>
Python Code:
# Answer
# Answer
# Answer
# Answer
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualize the data
Step2: Well.. Train the model
Step3: Show some quantitative results
|
445 | <ASSISTANT_TASK:>
Python Code:
!pip install -q amplpy ampltools pandas bokeh
MODULES=['ampl', 'gurobi']
from ampltools import cloud_platform_name, ampl_notebook
from amplpy import AMPL, register_magics
if cloud_platform_name() is None:
ampl = AMPL() # Use local installation of AMPL
else:
ampl = ampl_notebook(modules=MODULES) # Install AMPL and use it
register_magics(ampl_object=ampl) # Evaluate %%ampl_eval cells with ampl.eval()
import pandas as pd
from bokeh.layouts import row
from bokeh.plotting import figure, show
from bokeh.io import output_notebook
output_notebook()
from amplpy import AMPL, Environment, DataFrame
ampl = AMPL()
ampl.eval('option version;')
ampl.setOption('solver', 'gurobi')
ampl.eval('''
set NUTR;
set FOOD;
param cost {FOOD} > 0;
param f_min {FOOD} >= 0;
param f_max {j in FOOD} >= f_min[j];
param n_min {NUTR} >= 0;
param n_max {i in NUTR} >= n_min[i];
param amt {NUTR,FOOD} >= 0;
var Buy {j in FOOD} >= f_min[j], <= f_max[j];
minimize Total_Cost: sum {j in FOOD} cost[j] * Buy[j];
subject to Diet {i in NUTR}:
n_min[i] <= sum {j in FOOD} amt[i,j] * Buy[j] <= n_max[i];
''')
foods = ['BEEF', 'CHK', 'FISH', 'HAM', 'MCH', 'MTL', 'SPG', 'TUR']
nutrients = ['A', 'C', 'B1', 'B2', 'NA', 'CAL']
ampl.getSet('FOOD').setValues(foods)
ampl.getSet('NUTR').setValues(nutrients)
ampl.setData(DataFrame(
index=[('FOOD', foods)],
columns=[
('cost', [3.59, 2.59, 2.29, 2.89, 1.89, 1.99, 1.99, 2.49]),
('f_min', [2, 2, 2, 2, 2, 2, 2, 2]),
('f_max', [10, 10, 10, 10, 10, 10, 10, 10])
]
))
df = pd.DataFrame({
'n_min': [700, 700, 700, 700, 0, 16000],
'n_max': [20000, 20000, 20000, 20000, 50000, 24000]
},
index=nutrients
)
ampl.setData(DataFrame.fromPandas(df))
amounts = [
[ 60, 8, 8, 40, 15, 70, 25, 60],
[ 20, 0, 10, 40, 35, 30, 50, 20],
[ 10, 20, 15, 35, 15, 15, 25, 15],
[ 15, 20, 10, 10, 15, 15, 15, 10],
[928, 2180, 945, 278, 1182, 896, 1329, 1397],
[295, 770, 440, 430, 315, 400, 379, 450]
]
df = DataFrame(('NUTR', 'FOOD'), 'amt')
df.setValues({
(nutrient, food): amounts[i][j]
for i, nutrient in enumerate(nutrients)
for j, food in enumerate(foods)
})
ampl.setData(df)
ampl.solve()
ampl.getVariable('Buy').getValues().toPandas()
totalcost = ampl.getObjective('Total_Cost')
print("Objective is:", totalcost.value())
cost = ampl.getParameter('cost')
cost.setValues({'BEEF': 5.01, 'HAM': 4.55})
print("Increased costs of beef and ham.")
ampl.solve()
print("New objective value:", totalcost.value())
Buy = ampl.getVariable('Buy')
print("Buy['BEEF'].val = {}".format(Buy['BEEF'].value()))
diet = ampl.getConstraint('Diet')
for nutr in nutrients:
print("Diet['{}'].dual = {}".format(nutr, diet[nutr].dual()))
rows = [tuple(row) for row in Buy.getValues()]
factors = [index for index, value in rows]
x = [value for index, value in rows]
dot = figure(
title="Categorical Dot Plot", tools='', toolbar_location=None,
y_range=factors, x_range=[0,12]
)
dot.segment(0, factors, x, factors, line_width=2, line_color='green')
dot.circle(x, factors, size=15, fill_color='orange', line_color='green', line_width=3)
show(dot)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Google Colab & Kaggle interagration
Step2: Quick start
Step 1
Step3: Import Bokeh (do not run if you do not have Bokeh installed)
Step4: For Jupyter Notebooks only (do not run if you are not using Bokeh and Jupyter)
Step5: Step 2
Step6: Step 3
Step7: If the AMPL installation directory is not in the system search path, you should create the AMPL object as follows instead
Step8: Step 5
Step9: Note
Step10: Define AMPL sets fom python lists
Step11: Define data using an amplpy DataFrame
Step12: Define data using a Pandas DataFrame
Step13: Define data using a python dictionary
Step14: Step 7
Step15: Step 8
Step16: Step 9
Step17: Step 10
Step18: Step 11
Step19: Step 12
Step20: Step 13
Step21: Step 14
Step22: Step 15
|
446 | <ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
encoded[:100]
len(vocab)
def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the number of characters per batch and number of batches we can make
characters_per_batch = n_steps * n_seqs
n_batches = len(arr) // characters_per_batch
# Keep only enough characters to make full batches
arr = arr[:n_batches*characters_per_batch]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n+n_steps]
# The targets, shifted by one
y = arr[:, n+1:n+1+n_steps]
yield x, y
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell outputs
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop]*num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
seq_output = tf.concat(lstm_output, axis=1)
# Reshape seq_output to a 2D tensor with lstm_size columns
x = tf.reshape(seq_output, [-1, in_size])
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w = tf.Variable(tf.truncated_normal([in_size, out_size], stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, siftmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name='predictions')
return out, logits
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot =
y_reshaped =
# Softmax cross entropy loss
loss =
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
#self.loss =
#self.optimizer =
batch_size = 10 # Sequences per batch
num_steps = 50 # Number of sequence steps per batch
lstm_size = 128 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.01 # Learning rate
keep_prob = 0.5 # Dropout keep probability
epochs = 20
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
tf.train.get_checkpoint_state('checkpoints')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
Step2: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
Step3: And we can see the characters encoded as integers.
Step4: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
Step5: Making training mini-batches
Step6: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
Step7: If you implemented get_batches correctly, the above output should look something like
Step8: LSTM Cell
Step9: RNN Output
Step10: Training loss
Step11: Optimizer
Step12: Build the network
Step13: Hyperparameters
Step14: Time for training
Step15: Saved checkpoints
Step16: Sampling
Step17: Here, pass in the path to a checkpoint and sample from the network.
|
447 | <ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Chris Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
events_1 = mne.read_events(fname, include=1)
events_1_2 = mne.read_events(fname, include=[1, 2])
events_not_4_32 = mne.read_events(fname, exclude=[4, 32])
print(events_1[:5], '\n\n---\n\n', events_1_2[:5], '\n\n')
for ind, before, after in events_1[:5]:
print("At sample %d stim channel went from %d to %d"
% (ind, before, after))
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
mne.viz.plot_events(events_1, axes=axs[0], show=False)
axs[0].set(title="restricted to event 1")
mne.viz.plot_events(events_1_2, axes=axs[1], show=False)
axs[1].set(title="restricted to event 1 or 2")
mne.viz.plot_events(events_not_4_32, axes=axs[2], show=False)
axs[2].set(title="keep all but 4 and 32")
plt.setp([ax.get_xticklabels() for ax in axs], rotation=45)
plt.tight_layout()
plt.show()
mne.write_events('example-eve.fif', events_1)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading events
Step2: Events objects are essentially numpy arrays with three columns
Step3: Plotting events
Step4: Writing events
|
448 | <ASSISTANT_TASK:>
Python Code:
products = pd.read_csv('amazon_baby_subset.csv')
products = products.fillna({'review':''}) # fill in N/A's in the review column
def remove_punctuation(text):
import string
return text.translate(None, string.punctuation)
products['review_clean'] = products['review'].apply(remove_punctuation)
products.head(3)
with open('important_words.json') as important_words_file:
important_words = json.load(important_words_file)
print important_words[:3]
for word in important_words:
products[word] = products['review_clean'].apply(lambda s : s.split().count(word))
products['perfect'][:3]
with open('module-4-assignment-train-idx.json') as train_data_file:
train_data_idx = json.load(train_data_file)
with open('module-4-assignment-validation-idx.json') as validation_data_file:
validation_data_idx = json.load(validation_data_file)
print train_data_idx[:3]
print validation_data_idx[:3]
print len(train_data_idx)
print len(validation_data_idx)
train_data = products.iloc[train_data_idx]
train_data.head(2)
validation_data = products.iloc[validation_data_idx]
validation_data.head(2)
def get_numpy_data(dataframe, features, label):
dataframe['constant'] = 1
features = ['constant'] + features
features_frame = dataframe[features]
feature_matrix = features_frame.as_matrix()
label_sarray = dataframe[label]
label_array = label_sarray.as_matrix()
return(feature_matrix, label_array)
feature_matrix_train, sentiment_train = get_numpy_data(train_data, important_words, 'sentiment')
feature_matrix_valid, sentiment_valid = get_numpy_data(validation_data, important_words, 'sentiment')
print feature_matrix_train.shape
print feature_matrix_valid.shape
'''
feature_matrix: N * D(intercept term included)
coefficients: D * 1
predictions: N * 1
produces probablistic estimate for P(y_i = +1 | x_i, w).
estimate ranges between 0 and 1.
'''
def predict_probability(feature_matrix, coefficients):
# Take dot product of feature_matrix and coefficients
# YOUR CODE HERE
score = np.dot(feature_matrix, coefficients) # N * 1
# Compute P(y_i = +1 | x_i, w) using the link function
# YOUR CODE HERE
predictions = 1.0/(1+np.exp(-score))
# return predictions
return predictions
def feature_derivative_with_L2(errors, feature, coefficient, l2_penalty, feature_is_constant):
# Compute the dot product of errors and feature
## YOUR CODE HERE
errors: N * 1
feature: N * 1
derivative: 1
coefficient: 1
derivative = np.dot(np.transpose(errors), feature)
# add L2 penalty term for any feature that isn't the intercept.
if not feature_is_constant:
## YOUR CODE HERE
derivative -= 2 * l2_penalty * coefficient
return derivative
def compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty):
indicator = (sentiment==+1)
scores = np.dot(feature_matrix, coefficients)
# scores.shape (53072L, 1L)
# indicator.shape (53072L,)
# lp = np.sum((indicator-1)*scores - np.log(1. + np.exp(-scores))) - l2_penalty*np.sum(coefficients[1:]**2)
lp = np.sum((np.transpose(np.array([indicator]))-1)*scores - np.log(1. + np.exp(-scores))) - l2_penalty*np.sum(coefficients[1:]**2)
return lp
# coefficients: D * 1
def logistic_regression_with_L2(feature_matrix, sentiment, initial_coefficients, step_size, l2_penalty, max_iter):
coefficients = np.array(initial_coefficients) # make sure it's a numpy array
for itr in xrange(max_iter):
# Predict P(y_i = +1|x_i,w) using your predict_probability() function
## YOUR CODE HERE
predictions = predict_probability(feature_matrix, coefficients)
# Compute indicator value for (y_i = +1)
indicator = (sentiment==+1)
# Compute the errors as indicator - predictions
errors = np.transpose(np.array([indicator])) - predictions
for j in xrange(len(coefficients)): # loop over each coefficient
is_intercept = (j == 0)
# Recall that feature_matrix[:,j] is the feature column associated with coefficients[j].
# Compute the derivative for coefficients[j]. Save it in a variable called derivative
## YOUR CODE HERE
derivative = feature_derivative_with_L2(errors, feature_matrix[:,j], coefficients[j], l2_penalty, is_intercept)
# add the step size times the derivative to the current coefficient
## YOUR CODE HERE
coefficients[j] += step_size*derivative
# Checking whether log likelihood is increasing
if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \
or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:
lp = compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty)
print 'iteration %*d: log likelihood of observed labels = %.8f' % \
(int(np.ceil(np.log10(max_iter))), itr, lp)
return coefficients
initial_coefficients = np.zeros((194,1))
step_size = 5e-6
max_iter = 501
coefficients_0_penalty = logistic_regression_with_L2(feature_matrix_train , sentiment_train , initial_coefficients, step_size, 0, max_iter)
coefficients_4_penalty = logistic_regression_with_L2(feature_matrix_train , sentiment_train , initial_coefficients, step_size, 4, max_iter)
coefficients_10_penalty = logistic_regression_with_L2(feature_matrix_train , sentiment_train , initial_coefficients, step_size, 10, max_iter)
coefficients_1e2_penalty = logistic_regression_with_L2(feature_matrix_train , sentiment_train , initial_coefficients, step_size, 1e2, max_iter)
coefficients_1e3_penalty = logistic_regression_with_L2(feature_matrix_train , sentiment_train , initial_coefficients, step_size, 1e3, max_iter)
coefficients_1e5_penalty = logistic_regression_with_L2(feature_matrix_train , sentiment_train , initial_coefficients, step_size, 1e5, max_iter)
coefficients_0_penalty_without_intercept = list(coefficients_0_penalty[1:]) # exclude intercept
word_coefficient_tuples = [(word, coefficient) for word, coefficient in zip(important_words, coefficients_0_penalty_without_intercept)]
word_coefficient_tuples = sorted(word_coefficient_tuples, key=lambda x:x[1], reverse=True)
positive_words = []
for i in range(5):
positive_words.append(word_coefficient_tuples[:5][i][0])
positive_words
negative_words = []
for i in range(5):
negative_words.append(word_coefficient_tuples[-5:][i][0])
negative_words
table = pd.DataFrame(data=[coefficients_0_penalty.flatten(), coefficients_4_penalty.flatten(), coefficients_10_penalty.flatten(), coefficients_1e2_penalty.flatten(), coefficients_1e3_penalty.flatten(), coefficients_1e5_penalty.flatten()],
index=[0, 4, 10, 100.0, 1000.0, 100000.0],
columns=['(intercept)'] + important_words)
table.head(2)
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = 10, 6
def make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list):
cmap_positive = plt.get_cmap('Reds')
cmap_negative = plt.get_cmap('Blues')
xx = l2_penalty_list
plt.plot(xx, [0.]*len(xx), '--', lw=1, color='k')
table_positive_words = table[positive_words]
table_negative_words = table[negative_words]
#del table_positive_words['word']
#del table_negative_words['word']
for i, value in enumerate(positive_words):
color = cmap_positive(0.8*((i+1)/(len(positive_words)*1.2)+0.15))
plt.plot(xx, table_positive_words[value].as_matrix().flatten(),
'-', label=positive_words[i], linewidth=4.0, color=color)
for i, value in enumerate(negative_words):
color = cmap_negative(0.8*((i+1)/(len(negative_words)*1.2)+0.15))
plt.plot(xx, table_negative_words[value].as_matrix().flatten(),
'-', label=negative_words[i], linewidth=4.0, color=color)
plt.legend(loc='best', ncol=3, prop={'size':16}, columnspacing=0.5)
plt.axis([1, 1e5, -1, 2])
plt.title('Coefficient path')
plt.xlabel('L2 penalty ($\lambda$)')
plt.ylabel('Coefficient value')
plt.xscale('log')
plt.rcParams.update({'font.size': 18})
plt.tight_layout()
make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list=[0, 4, 10, 1e2, 1e3, 1e5])
feature_matrix: N * D
coefficients: D * 1
predictions: N * 1
training_accuracy = []
for coefficient in [coefficients_0_penalty, coefficients_4_penalty, coefficients_10_penalty, coefficients_1e2_penalty, coefficients_1e3_penalty, coefficients_1e5_penalty]:
predictions = predict_probability(feature_matrix_train, coefficient)
correct_num = np.sum((np.transpose(predictions.flatten())> 0.5) == (np.array(sentiment_train)>0))
total_num = len(sentiment_train)
#print "correct_num: {}, total_num: {}".format(correct_num, total_num)
training_accuracy.append(correct_num * 1./ total_num)
#print accuracy
l2_penalty_list=[x for x in range(6)]
plt.plot(l2_penalty_list, training_accuracy,'ro')
plt.title('training_accuracy')
plt.xlabel('L2 penalty ($\lambda$)')
plt.ylabel('training_accuracy')
plt.show()
[0, 4, 10, 1e2, 1e3, 1e5][training_accuracy.index(max(training_accuracy))]
training_accuracy[training_accuracy.index(max(training_accuracy))]
vali_accuracy = []
for coefficient in [coefficients_0_penalty, coefficients_4_penalty, coefficients_10_penalty, coefficients_1e2_penalty, coefficients_1e3_penalty, coefficients_1e5_penalty]:
predictions = predict_probability(feature_matrix_valid, coefficient)
correct_num = np.sum((np.transpose(predictions.flatten())> 0.5) == (np.array(sentiment_valid)>0))
total_num = len(sentiment_valid)
#print "correct_num: {}, total_num: {}".format(correct_num, total_num)
vali_accuracy.append(correct_num * 1./ total_num)
#print accuracy
l2_penalty_list=[x for x in range(6)]
plt.plot(l2_penalty_list, vali_accuracy,'ro')
plt.title('vali_accuracy')
plt.xlabel('L2 penalty ($\lambda$)')
plt.ylabel('vali_accuracy')
plt.show()
[0, 4, 10, 1e2, 1e3, 1e5][vali_accuracy.index(max(vali_accuracy))]
vali_accuracy[vali_accuracy.index(max(vali_accuracy))]
plt.plot(l2_penalty_list, training_accuracy,'g')
plt.plot(l2_penalty_list, vali_accuracy,'r')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. data transformations
Step2: 3. Compute word counts (only for important_words)
Step3: 4. Show 'perfect' word counts
Step4: Train-Validation split
Step5: 6. Convert train_data and validation_data into multi-dimensional arrays.
Step6: Building on logistic regression with no L2 penalty assignment
Step8: Adding L2 penalty
Step9: 1. Quiz question
Step10: 2. Quiz question
Step11: Explore effects of L2 regularization
Step12: Compare coefficients
Step13: 14. observe the effect of increasing L2 penalty on the 10 words
Step15: 3. Quiz Question
Step16: 4. Quiz question
Step17: 5. Quiz question
|
449 | <ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
from pyensae.datasource import download_data
files = download_data("td2a_eco_exercices_de_manipulation_de_donnees.zip",
url="https://github.com/sdpython/ensae_teaching_cs/raw/master/_doc/notebooks/td2a_eco/data/")
files
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Données
|
450 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import math
import numpy as np
import matplotlib.pyplot as plt
##import seaborn as sbn
##from scipy import *
x = .5
print x
x_vector = np.array([1,2,3])
print x_vector
print type(x_vector)
c_list = [1,2]
print "The list:",c_list
print "Has length:", len(c_list)
c_vector = np.array(c_list)
print "The vector:", c_vector
print "Has shape:",c_vector.shape
z = [5,6]
print "This is a list, not an array:",z
print type(z)
A = np.array([[0, 1, 2], [5, 6, 7]])
print A.shape
print type(A)
v = np.array([1,2,3,4])
print v.shape
print len(v)
A
result = A + 3
#or
result = 3 + A
print result
B = np.random.randn(2,2)
print B
A = np.array([[1,0], [0,1]])
A
A*B
A * 3
# Let's redefine A and C to demonstrate matrix multiplication:
A = np.arange(6).reshape((3,2))
C = np.random.randn(2,2)
print A.shape
print C.shape
print A.dot(C)
print np.dot(A,C)
# What would happen to
C.dot(A)
# note, we need a square matrix (# rows = # cols), use C:
C_inverse = np.linalg.inv(C)
print C_inverse
print C.dot(C_inverse)
print "Is identical to:"
print C_inverse.dot(C)
A = np.arange(6).reshape((6,1))
B = np.arange(6).reshape((1,6))
A.dot(B)
B.dot(A)
A = np.arange(6).reshape((3,2))
B = np.arange(8).reshape((2,4))
print "A is"
print A
print "The Transpose of A is"
print A.T
print B.T.dot(A.T)
print "Is identical to:"
print (A.dot(B)).T
B.shape
B[0, 3]
A = np.arange(12).reshape((3,4))
A
A[2,:].shape
A[:,1].reshape(1,3).shape
a = np.arange(10)
s = slice(2,7,2)
print a[s]
a = np.arange(10)
b = a[2:7:2]
print b
a = np.arange(10)
b = a[5]
print b
a = np.arange(10)
print a
print a[2:5]
import numpy as np
a = np.arange(10)
print a[2:5]
a = np.array([[1,2,3],[3,4,5],[4,5,6]])
print a
# slice items starting from index
print 'Now we will slice the array from the index a[1:]'
print a[1:]
# array to begin with
a = np.array([[1,2,3],[3,4,5],[4,5,6]])
print 'Our array is:'
print a
print '\n'
# this returns array of items in the second column
print 'The items in the second column are:'
print a[...,1]
print '\n'
# Now we will slice all items from the second row
print 'The items in the second row are:'
print a[1,...]
print '\n'
# Now we will slice all items from column 1 onwards
print 'The items column 1 onwards are:'
print a[...,1:]
A = np.random.rand(5,5)*10
print A
print (A < 5)
print A[A < 5]
A[A<5] = 0
A
A[A>=5] = 1
A
np.ones((10,5), int)
np.zeros((10,5), int)
np.eye(5, dtype="int")
v1 = np.random.rand(100)
v2 = np.random.randn(100)
plt.plot(range(v1.shape[0]), v1, '.')
plt.plot(range(v2.shape[0]), v2, '.')
plt.hist(v1)
;
v2 = np.random.randn(10000)
plt.hist(v2, bins=100)
;
v3 = np.random.beta(3,2, 1000)
plt.hist(v3, bins=100)
;
ls -l HW03/
%%sh
./HW03/preprocess_data.sh HW03/Camera.csv HW03/Camera_cleaned.csv
head HW03/Camera_cleaned.csv
DATA = np.genfromtxt('HW03/Camera_cleaned.csv', delimiter=';')
DATA.
np.max(DATA[1:,2])
np.nanargmin()
### Pure iterative Python ###
points = [[9,2,8],[4,7,2],[3,4,4],[5,6,9],[5,0,7],[8,2,7],[0,3,2],[7,3,0],[6,1,1],[2,9,6]]
qPoint = [4,5,3]
minIdx = -1
minDist = -1
for idx, point in enumerate(points): # iterate over all points
print "index is %d, point is %s" % (idx, point)
dist = sum([(dp-dq)**2 for dp,dq in zip(point,qPoint)])**0.5 # compute the euclidean distance for each point to q
if dist < minDist or minDist < 0: # if necessary, update minimum distance and index of the corresponding point
minDist = dist
minIdx = idx
print 'Nearest point to q: ', points[minIdx]
zip(point, qPoint)
# # # Equivalent NumPy vectorization # # #
import numpy as np
points = np.array([[9,2,8],[4,7,2],[3,4,4],[5,6,9],[5,0,7],[8,2,7],[0,3,2],[7,3,0],[6,1,1],[2,9,6]])
qPoint = np.array([4,5,3])
minIdx = np.argmin(np.linalg.norm(points-qPoint,axis=1)) # compute all euclidean distances at once and return the index of the smallest one
print 'Nearest point to q: ', points[minIdx]
print points.shape
print qPoint.shape
print points
print qPoint
print points-qPoint
from numpy.linalg import norm
norm(points-qPoint)
1.0-points[0,:].dot(qPoint)/(norm(points[0,:])*norm(qPoint))
n = 100 # numeber of samples
Xr = np.random.rand(n)*99.0
y = -7.3 + 2.5*Xr + np.random.randn(n)*27.0
plt.plot(Xr, y, "o", alpha=0.5)
X = np.vstack((np.ones(n), Xr)).T
print X.shape
X[0:10,:]
beta = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
yhat = X.dot(beta)
yhat.shape
plt.plot(X[:,1], y, "o", alpha=0.5)
plt.plot(X[:,1], yhat, "-", alpha=1, color="red")
n = 100 # numeber of samples
X1 = np.random.rand(n)*99.0
X2 = np.random.rand(n)*51.0 - 26.8
X3 = np.random.rand(n)*5.0 + 6.1
X4 = np.random.rand(n)*1.0 - 0.5
X5 = np.random.rand(n)*300.0
y_m = -7.3 + 2.5*X1 + -7.9*X2 + 1.5*X3 + 10.0*X4 + 0.13*X5 + np.random.randn(n)*7.0
plt.hist(y_m, bins=20)
;
X_m = np.vstack((np.ones(n), X1, X2, X3, X4, X5)).T
X_m.shape
beta_m = np.linalg.inv(X_m.T.dot(X_m)).dot(X_m.T).dot(y_m)
beta_m
yhat_m = X.dot(beta_m)
yhat_m.shape
import math
RSMD = math.sqrt(np.square(yhat_m-y_m).sum()/n)
print RSMD
p = X.shape[1] ## get number of parameters
lam = 10.0
p, lam
beta2 = np.linalg.inv(X.T.dot(X) + lam*np.eye(p)).dot(X.T).dot(y)
yhat2 = X.dot(beta2)
RSMD2 = math.sqrt(np.square(yhat2-y).sum()/n)
print RSMD2
##n = float(X.shape[0])
print " RMSE = ", math.sqrt(np.square(yhat-y).sum()/n)
print "Ridge RMSE = ", math.sqrt(np.square(yhat2-y).sum()/n)
plt.plot(X[:,1], y, "o", alpha=0.5)
plt.plot(X[:,1], yhat, "-", alpha=0.7, color="red")
plt.plot(X[:,1], yhat2, "-", alpha=0.7, color="green")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This code sets up Ipython Notebook environments (lines beginning with %), and loads several libraries and functions. The core scientific stack in python consists of a number of free libraries. The ones I have loaded above include
Step2: Vectors and Lists
Step3: We could have done this by defining a python list and converting it to an array
Step4: Matrix Addition and Subtraction
Step5: Adding or subtracting two matrices
Step6: Matrix Multiplication
Step7: Multiplying two matricies
Step8: We will use the numpy dot operator to perform the these multiplications. You can use it two ways to yield the same result
Step9: Matrix Division
Step10: Check that $C\times C^{-1} = I$
Step11: Transposing a Matrix
Step12: One important property of transposing a matrix is the transpose of a product of two matrices. Let matrix A be of dimension $N \times M$ and let B of of dimension $M \times P$. Then
Step13: Mechanics
Step14: Logic, Comparison
Step15: Concatenate, Reshape
Step16: Random Numbers
Step17: Numpy load, save data files
Step18: Similarity
Step19: Example
Step20: Let's add the bias, i.e. a column of $1$s to the explanatory variables
Step21: Closed-form Linear Regression
Step22: Multiple Linear Regression
Step23: Evaluation
Step24: Regularization, Ridge-Regression
|
451 | <ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('C:\Anaconda2\envs\dato-env\Lib\site-packages')
import graphlab
sales = graphlab.SFrame('kc_house_data.gl/')
import numpy as np # note this allows us to refer to numpy as np instead
def get_numpy_data(data_sframe, features, output):
data_sframe['constant'] = 1 # this is how you add a constant column to an SFrame
# add the column 'constant' to the front of the features list so that we can extract it along with the others:
features = ['constant'] + features # this is how you combine two lists
# select the columns of data_SFrame given by the features list into the SFrame features_sframe (now including constant):
features_sframe = data_sframe[features]
# the following line will convert the features_SFrame into a numpy matrix:
feature_matrix = features_sframe.to_numpy()
# assign the column of data_sframe associated with the output to the SArray output_sarray
output_sarray = data_sframe[output]
# the following will convert the SArray into a numpy array by first converting it to a list
output_array = output_sarray.to_numpy()
return(feature_matrix, output_array)
(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price') # the [] around 'sqft_living' makes it a list
print example_features[0,:] # this accesses the first row of the data the ':' indicates 'all columns'
print example_output[0] # and the corresponding output
my_weights = np.array([1., 1.]) # the example weights
my_features = example_features[0,] # we'll use the first data point
predicted_value = np.dot(my_features, my_weights)
print predicted_value
def predict_output(feature_matrix, weights):
# assume feature_matrix is a numpy matrix containing the features as columns and weights is a corresponding numpy array
# create the predictions vector by using np.dot()
predictions = np.dot(feature_matrix, weights)
return(predictions)
test_predictions = predict_output(example_features, my_weights)
print test_predictions[0] # should be 1181.0
print test_predictions[1] # should be 2571.0
def feature_derivative(errors, feature):
# Assume that errors and feature are both numpy arrays of the same length (number of data points)
# compute twice the dot product of these vectors as 'derivative' and return the value
derivative = 2*np.dot(errors, feature)
return(derivative)
(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price')
my_weights = np.array([0., 0.]) # this makes all the predictions 0
test_predictions = predict_output(example_features, my_weights)
# just like SFrames 2 numpy arrays can be elementwise subtracted with '-':
errors = test_predictions - example_output # prediction errors in this case is just the -example_output
feature = example_features[:,0] # let's compute the derivative with respect to 'constant', the ":" indicates "all rows"
derivative = feature_derivative(errors, feature)
print derivative
print -np.sum(example_output)*2 # should be the same as derivative
print example_output
print errors
print feature
from math import sqrt # recall that the magnitude/length of a vector [g[0], g[1], g[2]] is sqrt(g[0]^2 + g[1]^2 + g[2]^2)
def regression_gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance):
converged = False
weights = np.array(initial_weights) # make sure it's a numpy array
while not converged:
# compute the predictions based on feature_matrix and weights using your predict_output() function
predictions = predict_output(feature_matrix, weights)
# compute the errors as predictions - output
errors = predictions - output
gradient_sum_squares = 0 # initialize the gradient sum of squares
# while we haven't reached the tolerance yet, update each feature's weight
for i in range(len(weights)): # loop over each weight
# Recall that feature_matrix[:, i] is the feature column associated with weights[i]
# compute the derivative for weight[i]:
derivative = feature_derivative(errors, feature_matrix[:, i])
# add the squared value of the derivative to the gradient magnitude (for assessing convergence)
gradient_sum_squares += (derivative**2)
# subtract the step size times the derivative from the current weight
weights[i] -= (step_size * derivative)
# compute the square-root of the gradient sum of squares to get the gradient matnigude:
gradient_magnitude = sqrt(gradient_sum_squares)
if gradient_magnitude < tolerance:
converged = True
return(weights)
train_data,test_data = sales.random_split(.8,seed=0)
# let's test out the gradient descent
simple_features = ['sqft_living']
my_output = 'price'
(simple_feature_matrix, output) = get_numpy_data(train_data, simple_features, my_output)
initial_weights = np.array([-47000., 1.])
step_size = 7e-12
tolerance = 2.5e7
test_weight = regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, tolerance)
print test_weight
(test_simple_feature_matrix, test_output) = get_numpy_data(test_data, simple_features, my_output)
test_predictions = predict_output(test_simple_feature_matrix, test_weight)
print test_predictions
print test_predictions[0]
test_residuals = test_output - test_predictions
test_RSS = (test_residuals * test_residuals).sum()
print test_RSS
model_features = ['sqft_living', 'sqft_living15'] # sqft_living15 is the average squarefeet for the nearest 15 neighbors.
my_output = 'price'
(feature_matrix, output) = get_numpy_data(train_data, model_features, my_output)
initial_weights = np.array([-100000., 1., 1.])
step_size = 4e-12
tolerance = 1e9
weight_2 = regression_gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance)
print weight_2
(test_feature_matrix, test_output) = get_numpy_data(test_data, model_features, my_output)
test_predictions_2 = predict_output(test_feature_matrix, weight_2)
print test_predictions_2
print test_predictions_2[0]
print test_data['price'][0]
test_residuals_2 = test_output - test_predictions_2
test_RSS_2 = (test_residuals_2**2).sum()
print test_RSS_2
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: If we want to do any "feature engineering" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the other Week 2 notebook. For this notebook, however, we will work with the existing features.
Step3: Now we will write a function that will accept an SFrame, a list of feature names (e.g. ['sqft_living', 'bedrooms']) and an target feature e.g. ('price') and will return two things
Step4: For testing let's use the 'sqft_living' feature and a constant as our features and price as our output
Step5: Predicting output given regression weights
Step6: np.dot() also works when dealing with a matrix and a vector. Recall that the predictions from all the observations is just the RIGHT (as in weights on the right) dot product between the features matrix and the weights vector. With this in mind finish the following predict_output function to compute the predictions for an entire matrix of features given the matrix and the weights
Step7: If you want to test your code run the following cell
Step8: Computing the Derivative
Step9: To test your feature derivartive run the following
Step10: Gradient Descent
Step11: Discussion
Step12: A few things to note before we run the gradient descent. Since the gradient is a sum over all the data points and involves a product of an error and a feature the gradient itself will be very large since the features are large (squarefeet) and the output is large (prices). So while you might expect "tolerance" to be small, small is only relative to the size of the features.
Step13: Although the gradient descent is designed for multiple regression since the constant is now a feature we can use the gradient descent function to estimat the parameters in the simple regression on squarefeet. The folowing cell sets up the feature_matrix, output, initial weights and step size for the first model
Step14: Next run your gradient descent with the above parameters.
Step15: How do your weights compare to those achieved in week 1 (don't expect them to be exactly the same)?
Step16: Now compute your predictions using test_simple_feature_matrix and your weights from above.
Step17: Quiz Question
Step18: Now that you have the predictions on test data, compute the RSS on the test data set. Save this value for comparison later. Recall that RSS is the sum of the squared errors (difference between prediction and output).
Step19: Running a multiple regression
Step20: Use the above parameters to estimate the model weights. Record these values for your quiz.
Step21: Use your newly estimated weights and the predict_output function to compute the predictions on the TEST data. Don't forget to create a numpy array for these features from the test set first!
Step22: Quiz Question
Step23: What is the actual price for the 1st house in the test data set?
Step24: Quiz Question
|
452 | <ASSISTANT_TASK:>
Python Code:
!ls *fits
import astropy.io.fits as afits
from astropy.wcs import WCS
from astropy.visualization import ZScaleInterval
import matplotlib
%matplotlib notebook
%pylab
f1 = afits.open('wdd7.040920_0452.051_6.fits')
f2 = afits.open('wdd7.080104_0214.1025_6.fits')
f1wcs = WCS(f1[0].header)
f2wcs = WCS(f2[0].header)
zscaler = ZScaleInterval(nsamples=1000, contrast=0.25)
f1d = zscaler(f1[0].data)
f2d = zscaler(f2[0].data)
fig = figure(figsize=(10,4))
ax = fig.add_subplot(1,1,1)
ax.imshow(f1d.T, cmap='Reds')
ax.imshow(f2d.T, cmap='Blues', alpha=0.5)
tight_layout()
xlabel('x')
ylabel('y')
savefig('out/original_misalignment.pdf')
!solve-field -h
print(WCS(f1[0].header))
!imhead wdd7.040920_0452.051_6.fits
!wcs-to-tan -h
!cat Register_images_solutions_commands.txt
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: While the images have been de-trended, they still have the original WCS from the telescope. They aren't aligned. You could use ds9 to check this trivially, but lets do it with astropy instead.
Step2: Use the astrometry.net client (solve-field) to determine an accurate WCS solution for this field.
Step3: Options you might want to look at
Step4: or
Step5: Use the above info to solve for the WCS for both images.
Step6: The commands I used are in Register_images_solution.txt and the output I generated in the out/ subdirectory.
|
453 | <ASSISTANT_TASK:>
Python Code:
import re, os, sys, shutil
import shlex, subprocess
import glob
import pandas as pd
import panedr
import numpy as np
import MDAnalysis as mda
import nglview
import matplotlib.pyplot as plt
import parmed as pmd
import py
import scipy
from scipy import stats
from importlib import reload
from thtools import cd
from paratemp import copy_no_overwrite
from paratemp import geometries as gm
from paratemp import coordinate_analysis as ca
import paratemp.para_temp_setup as pts
import paratemp as pt
from gautools import submit_gaussian as subg
from gautools.tools import use_gen_template as ugt
def plot_prop_PT(edict, prop):
fig, axes = plt.subplots(4, 4, figsize=(16,16))
for i in range(16):
ax = axes.flat[i]
edict[i][prop].plot(ax=ax)
fig.tight_layout()
return fig, axes
def plot_e_props(df, labels, nrows=2, ncols=2):
fig, axes = plt.subplots(nrows, ncols, sharex=True)
for label, ax in zip(labels, axes.flat):
df[label].plot(ax=ax)
ax.set_title(label)
fig.tight_layout()
return fig, axes
def plot_rd(univ): # rd = reaction distance
univ.calculate_distances(rd=(20,39))
fig, axes = plt.subplots(1, 2, figsize=(12, 4))
univ.data.rd.plot(ax=axes[0])
univ.data.rd.hist(ax=axes[1], grid=False)
print(f'reaction distance mean: {univ.data.rd.mean():.2f} and sd: {univ.data.rd.std():.2f}')
return fig, axes
def plot_hist_dist(univ, name, indexes=None):
if indexes is not None:
kwargs = {name: indexes}
univ.calculate_distances(**kwargs)
fig, axes = plt.subplots(1, 2, figsize=(12, 4))
univ.data[name].plot(ax=axes[0])
univ.data[name].hist(ax=axes[1], grid=False)
print(f'{name} distance mean: {univ.data[name].mean():.2f} and sd: {univ.data[name].std():.2f}')
def get_solvent_count_solvate(proc):
for line in proc.stdout.split('\n'):
m = re.search(r'(?:atoms\):\s+)(\d+)(?:\s+residues)', line)
if m:
return int(m.group(1))
else:
raise ValueError('Solvent count not found.')
def set_solv_count(n_gro, s_count,
res_name='DCM', prepend='unequal-'):
Remove solvent residues from the end of a gro file to match s_count
This assumes all non-solvent molecules are listed in the input gro
file before the solvent residues.
bak_name = os.path.join(os.path.dirname(n_gro),
prepend+os.path.basename(n_gro))
copy_no_overwrite(n_gro, bak_name)
with open(n_gro, 'r') as in_gro:
lines = in_gro.readlines()
for line in lines[2:]:
if res_name in line:
non_s_res_count = resid
break
else:
resid = int(line[:5])
res_count = s_count + non_s_res_count
# TODO check reasonability of this number
box = lines.pop()
while True:
line = lines.pop()
if int(line[:5]) > res_count:
continue
elif int(line[:5]) == res_count:
atom_count = line[15:20]
lines.append(line)
break
elif int(line[:5]) < res_count:
raise ValueError("Desired res "
"count is larger than "
"line's resid.\n" +
"res_count: {}\n".format(res_count) +
"line: {}".format(line))
lines[1] = atom_count + '\n'
lines.append(box)
with open(n_gro, 'w') as out_gro:
for line in lines:
out_gro.write(line)
def get_solv_count_top(n_top, res_name='DCM'):
Return residue count of specified residue from n_top
with open(n_top, 'r') as in_top:
mol_section = False
for line in in_top:
if line.strip().startswith(';'):
pass
elif not mol_section:
if re.search(r'\[\s*molecules\s*\]', line,
flags=re.IGNORECASE):
mol_section = True
else:
if res_name.lower() in line.lower():
return int(line.split()[1])
def set_solv_count_top(n_top, s_count,
res_name='DCM', prepend='unequal-'):
Set count of res_name residues in n_top
This will make a backup copy of the top file with `prepend`
prepended to the name of the file.
bak_name = os.path.join(os.path.dirname(n_top),
prepend+os.path.basename(n_top))
copy_no_overwrite(n_top, bak_name)
with open(n_top, 'r') as in_top:
lines = in_top.readlines()
with open(n_top, 'w') as out_top:
mol_section = False
for line in lines:
if line.strip().startswith(';'):
pass
elif not mol_section:
if re.search(r'\[\s*molecules\s*\]', line,
flags=re.IGNORECASE):
mol_section = True
else:
if res_name.lower() in line.lower():
line = re.sub(r'\d+', str(s_count), line)
out_top.write(line)
d_charge_params = dict(opt='SCF=tight Test Pop=MK iop(6/33=2) iop(6/42=6) iop(6/50=1)',
func='HF',
basis='6-31G*',
footer='\ng16.gesp\n\ng16.gesp\n\n')
l_scripts = []
s = subg.write_sub_script('01-charges/TS2.com',
executable='g16',
make_xyz='../TS2.pdb',
make_input=True,
ugt_dict={'job_name':'GPX TS2 charges',
'charg_mult':'+1 1',
**d_charge_params})
l_scripts.append(s)
s = subg.write_sub_script('01-charges/R-NO2-CPA.com',
executable='g16',
make_xyz='../R-NO2-CPA.pdb',
make_input=True,
ugt_dict={'job_name':'GPX R-NO2-CPA charges',
'charg_mult':'-1 1',
**d_charge_params})
l_scripts.append(s)
l_scripts
subg.submit_scripts(l_scripts, batch=True, submit=True)
gpx = pmd.gromacs.GromacsTopologyFile('01-charges/GPX-ts.acpype/GPX-ts_GMX.top', xyz='01-charges/GPX-ts.acpype/GPX-ts_GMX.gro')
cpa = pmd.gromacs.GromacsTopologyFile('01-charges/CPA-gesp.acpype/CPA-gesp_GMX.top', xyz='01-charges/CPA-gesp.acpype/CPA-gesp_GMX.gro')
for res in gpx.residues:
if res.name == 'MOL':
res.name = 'GPX'
for res in cpa.residues:
if res.name == 'MOL':
res.name = 'CPA'
struc_comb = gpx + cpa
struc_comb
struc_comb.write('gpx-cpa-dry.top')
struc_comb.save('gpx-cpa-dry.gro')
f_dcm = py.path.local('~/GROMACS-basics/DCM-GAFF/')
f_solvate = py.path.local('02-solvate/')
sep_gro = py.path.local('gpx-cpa-sep.gro')
boxed_gro = f_solvate.join('gpx-cpa-boxed.gro')
box = '3.5 3.5 3.5'
solvent_source = f_dcm.join('dichloromethane-T293.15.gro')
solvent_top = f_dcm.join('dichloromethane.top')
solv_gro = f_solvate.join('gpx-cpa-dcm.gro')
top = py.path.local('../params/gpxTS-cpa-dcm.top')
verbose = True
solvent_counts, key = dict(), 'GPX'
with f_solvate.as_cwd():
## Make box
cl = shlex.split(f'gmx_mpi editconf -f {sep_gro} ' +
f'-o {boxed_gro} -box {box}')
proc = subprocess.run(cl, universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outputs[key+'_editconf'] = proc.stdout
proc.check_returncode()
## Solvate
cl = shlex.split(f'gmx_mpi solvate -cp {boxed_gro} ' +
f'-cs {solvent_source} -o {solv_gro}')
proc = subprocess.run(cl, universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outputs[key+'_solvate'] = proc.stdout
proc.check_returncode()
solvent_counts[key] = get_solvent_count_solvate(proc)
if verbose:
print(f'Solvated system into {solv_gro}')
struc_g_c = pmd.load_file('gpx-cpa-dry.top')
struc_dcm = pmd.load_file(str(f_dcm.join('dichloromethane.top')))
struc_g_c_d = struc_g_c + solvent_counts['GPX'] * struc_dcm
struc_g_c_d.save(str(top))
ppl = py.path.local
f_min = ppl('03-minimize/')
f_g_basics = py.path.local('~/GROMACS-basics/')
mdp_min = f_g_basics.join('minim.mdp')
tpr_min = f_min.join('min.tpr')
deffnm_min = f_min.join('min-out')
gro_min = deffnm_min + '.gro'
with f_min.as_cwd():
## Compile tpr
if not tpr_min.exists():
cl = shlex.split(f'gmx_mpi grompp -f {mdp_min} '
f'-c {solv_gro} '
f'-p {top} '
f'-o {tpr_min}')
proc = subprocess.run(cl, universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outputs[key+'_grompp_em'] = proc.stdout
proc.check_returncode()
if verbose:
print(f'Compiled em tpr to {tpr_min}')
elif verbose:
print(f'em tpr file already exists ({tpr_min})')
## Run minimization
if not gro_min.exists():
cl = shlex.split('gmx_mpi mdrun '
f'-s {tpr_min} '
f'-deffnm {deffnm_min} ')
proc = subprocess.run(cl, universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outputs[key+'_mdrun_em'] = proc.stdout
# TODO Get the potential energy from this output
proc.check_returncode()
if verbose:
print(f'Ran {key} em to make {gro_min}')
elif verbose:
print(f'em output gro already exists (gro_min)')
f_equil = ppl('04-equilibrate/')
plumed = f_equil.join('plumed.dat')
mdp_equil = f_g_basics.join('npt-298.mdp')
tpr_equil = f_equil.join('equil.tpr')
deffnm_equil = f_equil.join('equil-out')
gro_equil = deffnm_equil + '.gro'
gro_input = gro_min
with f_equil.as_cwd():
## Compile equilibration
if not tpr_equil.exists():
cl = shlex.split(f'gmx_mpi grompp -f {mdp_equil} '
f'-c {gro_input} '
f'-p {top} '
f'-o {tpr_equil}')
proc = subprocess.run(cl, universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outputs[key+'_grompp_equil'] = proc.stdout
proc.check_returncode()
if verbose:
print(f'Compiled equil tpr to {tpr_equil}')
elif verbose:
print(f'equil tpr file already exists ({tpr_equil})')
## Run equilibration
if not gro_equil.exists():
cl = shlex.split('gmx_mpi mdrun '
f'-s {tpr_equil} '
f'-deffnm {deffnm_equil} '
f'-plumed {plumed}')
proc = subprocess.run(cl, universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outputs[key+'_mdrun_equil'] = proc.stdout
proc.check_returncode()
if verbose:
print(f'Ran {key} equil to make {gro_equil}')
elif verbose:
print(f'equil output gro already exists (gro_equil)')
f_pt = ppl('05-PT/')
template = f_pt.join('template-mdp.txt')
index = ppl('index.ndx')
sub_templ = f_g_basics.join('sub-template-128.sub')
d_sub_templ = dict(tpr_base = 'TOPO/npt',
deffnm = 'PT-out',
name = 'GPX-PT',
plumed = plumed,
)
scaling_exponent = 0.025
maxwarn = 0
start_temp = 298.
verbose = True
skip_existing = True
jobs = []
failed_procs = []
for key in ['GPX']:
kwargs = {'template': str(template),
'topology': str(top),
'structure': str(gro_equil),
'index': str(index),
'scaling_exponent': scaling_exponent,
'start_temp': start_temp,
'maxwarn': maxwarn}
with f_pt.as_cwd():
try:
os.mkdir('TOPO')
except FileExistsError:
if skip_existing:
print(f'Skipping {key} because it seems to '
'already be done.\nMoving on...')
continue
with cd('TOPO'):
print(f'Now in {os.getcwd()}\nAttempting to compile TPRs...')
pts.compile_tprs(**kwargs)
print('Done compiling. Moving on...')
print(f'Now in {os.getcwd()}\nWriting submission script...')
with sub_templ.open(mode='r') as templ_f, \
open('gromacs-start-job.sub', 'w') as sub_s:
[sub_s.write(l.format(**d_sub_templ)) for l in templ_f]
print('Done.\nNow submitting job...')
cl = ['qsub', 'gromacs-start-job.sub']
proc = subprocess.run(cl,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
if proc.returncode == 0:
output = proc.stdout
jobs.append(re.search('[0-9].+\)', output).group(0))
print(output, '\nDone.\nMoving to next...')
else:
print('\n\n'+5*'!!!---'+'\n')
print(f'Error with calling qsub on {key}')
print('Command line input was', cl)
print('Check input and try again manually.'
'\nMoving to next anyway...')
failed_procs.append(proc)
print('-----Done-----\nSummary of jobs submitted:')
for job in jobs:
print(job)
e_05s = dict()
for i in range(16):
e_05s[i] = panedr.edr_to_df(f'05-PT/PT-out{i}.edr')
fig, axes = plot_prop_PT(e_05s, 'Pressure')
l_scripts = []
s = subg.write_sub_script('01-charges/TS1.com',
executable='g16',
make_xyz='../TS1protonated.mol2',
make_input=True,
ugt_dict={'job_name':'GPX TS1 charges',
'charg_mult':'+1 1',
**d_charge_params})
l_scripts.append(s)
s = subg.write_sub_script('01-charges/TS3.com',
executable='g16',
make_xyz='../TS3protonated.mol2',
make_input=True,
ugt_dict={'job_name':'GPX TS3 charges',
'charg_mult':'+1 1',
**d_charge_params})
l_scripts.append(s)
s = subg.write_sub_script('01-charges/anti-cat-yamamoto.com',
executable='g16',
make_xyz='../R-Yamamoto-Cat.pdb',
make_input=True,
ugt_dict={'job_name':
'yamamoto catalyst charges',
'charg_mult':'-1 1',
**d_charge_params})
l_scripts.append(s)
l_scripts
subg.submit_scripts(l_scripts, batch=True, submit=True)
ts1 = pmd.gromacs.GromacsTopologyFile(
'01-charges/TS1-gesp.acpype/TS1-gesp_GMX.top',
xyz='01-charges/TS1-gesp.acpype/TS1-gesp_GMX.gro')
ts3 = pmd.gromacs.GromacsTopologyFile(
'01-charges/TS3-gesp.acpype/TS3-gesp_GMX.top',
xyz='01-charges/TS3-gesp.acpype/TS3-gesp_GMX.gro')
ycp = pmd.gromacs.GromacsTopologyFile(
'01-charges/YCP-gesp.acpype/YCP-gesp_GMX.top',
xyz='01-charges/YCP-gesp.acpype/YCP-gesp_GMX.gro')
for res in ts1.residues:
if res.name == 'MOL':
res.name = 'TS1'
for res in ts3.residues:
if res.name == 'MOL':
res.name = 'TS3'
for res in ycp.residues:
if res.name == 'MOL':
res.name = 'YCP'
ts1_en = ts1.copy(pmd.gromacs.GromacsTopologyFile)
ts3_en = ts3.copy(pmd.gromacs.GromacsTopologyFile)
ts1_en.coordinates = - ts1.coordinates
ts3_en.coordinates = - ts3.coordinates
sys_ts1 = ts1 + ycp
sys_ts1_en = ts1_en + ycp
sys_ts3 = ts3 + ycp
sys_ts3_en = ts3_en + ycp
sys_ts1.write('ts1-ycp-dry.top')
sys_ts3.write('ts3-ycp-dry.top')
sys_ts1.save('ts1-ycp-dry.gro')
sys_ts1_en.save('ts1_en-ycp-dry.gro')
sys_ts3.save('ts3-ycp-dry.gro')
sys_ts3_en.save('ts3_en-ycp-dry.gro')
f_dcm = py.path.local('~/GROMACS-basics/DCM-GAFF/')
f_solvate = py.path.local('37-solvate-anti/')
box = '3.7 3.7 3.7'
solvent_source = f_dcm.join('dichloromethane-T293.15.gro')
solvent_top = f_dcm.join('dichloromethane.top')
solv_gro = f_solvate.join('gpx-cpa-dcm.gro')
ts1_top = ppl('../params/ts1-ycp-dcm.top')
ts3_top = ppl('../params/ts3-ycp-dcm.top')
l_syss = ['TS1', 'TS1_en', 'TS3', 'TS3_en']
verbose = True
solvent_counts = dict()
for key in l_syss:
sep_gro = ppl(f'{key.lower()}-ycp-dry.gro')
if not sep_gro.exists():
raise FileNotFoundError(f'{sep_gro} does not exist')
boxed_gro = f'{key.lower()}-ycp-box.gro'
solv_gro = f'{key.lower()}-ycp-dcm.gro'
with f_solvate.ensure_dir().as_cwd():
## Make box
cl = shlex.split(f'gmx_mpi editconf -f {sep_gro} ' +
f'-o {boxed_gro} -box {box}')
proc = subprocess.run(cl, universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outputs[key+'_editconf'] = proc.stdout
proc.check_returncode()
## Solvate
cl = shlex.split(f'gmx_mpi solvate -cp {boxed_gro} ' +
f'-cs {solvent_source} -o {solv_gro}')
proc = subprocess.run(cl, universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outputs[key+'_solvate'] = proc.stdout
proc.check_returncode()
solvent_counts[key] = get_solvent_count_solvate(proc)
if verbose:
print(f'Solvated system into {solv_gro}')
# min_solv_count = min(solvent_counts.values())
min_solv_count = 328 # want to match with syn calculations
if min(solvent_counts.values()) < min_solv_count:
raise ValueError('At least one of the structures has <328 DCMs.\n'
'Check and/or make the box larger')
for key in l_syss:
solv_gro = f'{key.lower()}-ycp-dcm.gro'
with f_solvate.as_cwd():
set_solv_count(solv_gro, min_solv_count)
struc_ts1 = pmd.load_file('ts1-ycp-dry.top')
struc_ts3 = pmd.load_file('ts3-ycp-dry.top')
struc_dcm = pmd.load_file(str(f_dcm.join('dichloromethane.top')))
struc_ts1_d = struc_ts1 + min_solv_count * struc_dcm
struc_ts1_d.save(str(ts1_top))
struc_ts3_d = struc_ts3 + min_solv_count * struc_dcm
struc_ts3_d.save(str(ts3_top))
f_min = ppl('38-relax-anti/')
f_min.ensure_dir()
f_g_basics = py.path.local('~/GROMACS-basics/')
mdp_min = f_g_basics.join('minim.mdp')
d_tops = dict(TS1=ts1_top, TS1_en=ts1_top, TS3=ts3_top, TS3_en=ts3_top)
for key in l_syss:
solv_gro = ppl(f'37-solvate-anti/{key.lower()}-ycp-dcm.gro')
tpr_min = f_min.join(f'{key.lower()}-min.tpr')
deffnm_min = f_min.join(f'{key.lower()}-min-out')
gro_min = deffnm_min + '.gro'
top = d_tops[key]
with f_min.as_cwd():
## Compile tpr
if not tpr_min.exists():
cl = shlex.split(f'gmx_mpi grompp -f {mdp_min} '
f'-c {solv_gro} '
f'-p {top} '
f'-o {tpr_min}')
proc = subprocess.run(cl, universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outputs[key+'_grompp_em'] = proc.stdout
proc.check_returncode()
if verbose:
print(f'Compiled em tpr to {tpr_min}')
elif verbose:
print(f'em tpr file already exists ({tpr_min})')
## Run minimization
if not gro_min.exists():
cl = shlex.split('gmx_mpi mdrun '
f'-s {tpr_min} '
f'-deffnm {deffnm_min} ')
proc = subprocess.run(cl, universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outputs[key+'_mdrun_em'] = proc.stdout
# TODO Get the potential energy from this output
proc.check_returncode()
if verbose:
print(f'Ran {key} em to make {gro_min}')
elif verbose:
print(f'em output gro already exists (gro_min)')
f_pt = ppl('38-relax-anti/')
template = ppl('33-SA-NPT-rest-no-LINCS/template-mdp.txt')
index = ppl('../params/index-ycp.ndx')
scaling_exponent = 0.025
maxwarn = 0
start_temp = 298.
nsims = 16
verbose = True
skip_existing = True
jobs = []
failed_procs = []
for key in l_syss:
d_sub_templ = dict(
tpr = f'{key.lower()}-TOPO/npt',
deffnm = f'{key.lower()}-SA-out',
name = f'{key.lower()}-SA',
nsims = nsims,
tpn = 16,
cores = 128,
multi = True,
)
gro_equil = f_min.join(f'{key.lower()}-min-out.gro')
top = d_tops[key]
kwargs = {'template': str(template),
'topology': str(top),
'structure': str(gro_equil),
'index': str(index),
'scaling_exponent': scaling_exponent,
'start_temp': start_temp,
'maxwarn': maxwarn,
'number': nsims,
'grompp_exe': 'gmx_mpi grompp'}
with f_pt.as_cwd():
try:
os.mkdir(f'{key.lower()}-TOPO/')
except FileExistsError:
if (os.path.exists(f'{key.lower()}-TOPO/temperatures.dat') and
skip_existing):
print(f'Skipping {key} because it seems to '
'already be done.\nMoving on...')
continue
with cd(f'{key.lower()}-TOPO/'):
print(f'Now in {os.getcwd()}\nAttempting to compile TPRs...')
pts.compile_tprs(**kwargs)
print('Done compiling. Moving on...')
print(f'Now in {os.getcwd()}\nWriting submission script...')
lp_sub = pt.sim_setup.make_gromacs_sub_script(
f'gromacs-start-{key}-job.sub', **d_sub_templ)
print('Done.\nNow submitting job...')
cl = shlex.split(f'qsub {lp_sub}')
proc = subprocess.run(cl,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
if proc.returncode == 0:
output = proc.stdout
jobs.append(re.search('[0-9].+\)', output).group(0))
print(output, '\nDone.\nMoving to next...')
else:
print('\n\n'+5*'!!!---'+'\n')
print(f'Error with calling qsub on {key}')
print('Command line input was', cl)
print('Check input and try again manually.'
'\nMoving to next anyway...')
failed_procs.append(proc)
print('-----Done-----\nSummary of jobs submitted:')
for job in jobs:
print(job)
e_38s = dict()
for key in l_syss:
deffnm = f'{key.lower()}-SA-out'
e_38s[key] = dict()
d = e_38s[key]
for i in range(16):
d[i] = panedr.edr_to_df(f'38-relax-anti/{deffnm}{i}.edr')
for key in l_syss:
d = e_38s[key]
fig, axes = plot_prop_PT(d, 'Volume')
for key in l_syss:
d = e_38s[key]
fig, ax = plt.subplots()
for key in list(d.keys()):
ax.hist(d[key]['Total Energy'], bins=100)
del d[key]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: Common functions
Step5: Get charges
Step6: Parameterize molecule in GAFF with ANTECHAMBER and ACPYPE
Step7: Move molecules
Step8: Minimize
Step9: Equilibrate
Step10: Setup and submit parallel tempering (PT)
Step11: The energies from the simulations can be read in as a pandas DataFrame using panedr and then analyzed or plotted to check on equilibration, convergence, etc.
Step12: Setup for several systems/molecules at once
Step13: Copied over the g16.gesp files and renamed them for each molecule.
Step14: Move molecules
Step15: Minimize
Step16: Made index file (called index-ycp.ndx) with solutes and solvent groups.
Step17: !!! Need to check distance on restraint !!!
Step18: The volumes seem to look okay.
|
454 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases_v2 import *
from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
np.random.seed(1)
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x)*0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters = initialize_parameters(2,2,1)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: initialize_parameters_deep
def initialize_parameters_deep(layer_dims):
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
parameters = initialize_parameters_deep([5,4,3])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: linear_forward
def linear_forward(A, W, b):
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
### START CODE HERE ### (≈ 1 line of code)
Z = np.dot(W, A) + b
### END CODE HERE ###
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
A, W, b = linear_forward_test_case()
Z, linear_cache = linear_forward(A, W, b)
print("Z = " + str(Z))
# GRADED FUNCTION: linear_activation_forward
def linear_activation_forward(A_prev, W, b, activation):
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
### END CODE HERE ###
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
A_prev, W, b = linear_activation_forward_test_case()
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid")
print("With sigmoid: A = " + str(A))
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu")
print("With ReLU: A = " + str(A))
# GRADED FUNCTION: L_model_forward
def L_model_forward(X, parameters):
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
### START CODE HERE ### (≈ 2 lines of code)
A, cache = linear_activation_forward(A_prev, parameters['W'+str(l)], parameters['b'+str(l)], activation = "relu")
caches.append(cache)
### END CODE HERE ###
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
### START CODE HERE ### (≈ 2 lines of code)
AL, cache = linear_activation_forward(A, parameters['W'+str(L)], parameters['b'+str(L)], activation = "sigmoid")
caches.append(cache)
### END CODE HERE ###
assert(AL.shape == (1,X.shape[1]))
return AL, caches
X, parameters = L_model_forward_test_case()
AL, caches = L_model_forward(X, parameters)
print("AL = " + str(AL))
print("Length of caches list = " + str(len(caches)))
# GRADED FUNCTION: compute_cost
def compute_cost(AL, Y):
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
m = Y.shape[1]
# Compute loss from aL and y.
### START CODE HERE ### (≈ 1 lines of code)
cost = -1./m * np.sum(Y*np.log(AL) + (1-Y)*np.log(1-AL))
### END CODE HERE ###
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
Y, AL = compute_cost_test_case()
print("cost = " + str(compute_cost(AL, Y)))
# GRADED FUNCTION: linear_backward
def linear_backward(dZ, cache):
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
A_prev, W, b = cache
m = A_prev.shape[1]
### START CODE HERE ### (≈ 3 lines of code)
dW = 1./m * np.dot(dZ, A_prev.T)
db = 1./m * np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
### END CODE HERE ###
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
# Set up some test inputs
dZ, linear_cache = linear_backward_test_case()
dA_prev, dW, db = linear_backward(dZ, linear_cache)
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
# GRADED FUNCTION: linear_activation_backward
def linear_activation_backward(dA, cache, activation):
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
linear_cache, activation_cache = cache
if activation == "relu":
### START CODE HERE ### (≈ 2 lines of code)
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
### END CODE HERE ###
elif activation == "sigmoid":
### START CODE HERE ### (≈ 2 lines of code)
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
### END CODE HERE ###
return dA_prev, dW, db
AL, linear_activation_cache = linear_activation_backward_test_case()
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid")
print ("sigmoid:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db) + "\n")
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu")
print ("relu:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
# GRADED FUNCTION: L_model_backward
def L_model_backward(AL, Y, caches):
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
### START CODE HERE ### (1 line of code)
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
### END CODE HERE ###
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
### START CODE HERE ### (approx. 2 lines)
current_cache = caches[L-1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid")
### END CODE HERE ###
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
### START CODE HERE ### (approx. 5 lines)
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads['dA' + str(l + 2)], current_cache, activation = "relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
### END CODE HERE ###
return grads
AL, Y_assess, caches = L_model_backward_test_case()
grads = L_model_backward(AL, Y_assess, caches)
print ("dW1 = "+ str(grads["dW1"]))
print ("db1 = "+ str(grads["db1"]))
print ("dA1 = "+ str(grads["dA1"]))
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate):
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
### START CODE HERE ### (≈ 3 lines of code)
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*grads["db" + str(l+1)]
### END CODE HERE ###
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print ("W1 = "+ str(parameters["W1"]))
print ("b1 = "+ str(parameters["b1"]))
print ("W2 = "+ str(parameters["W2"]))
print ("b2 = "+ str(parameters["b2"]))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 2 - Outline of the Assignment
Step4: Expected output
Step6: Expected output
Step8: Expected output
Step10: Expected output
Step12: <table style="width
Step14: Expected Output
Step16: Expected Output
Step18: Expected output with sigmoid
Step20: Expected Output
|
455 | <ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
# TODO: Implement Min-Max scaling for grayscale image data
out=[]
a=0.1
b=0.9
min_v=0
max_v=255
for k in range(image_data.shape[0]):
out.append(a+(((image_data[k]-min_v)*(b-a))/max_v-min_v))
return out
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal((features_count,labels_count)))
biases = tf.Variable(tf.zeros(labels_count))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 5
learning_rate = 0.2
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/Mean_Variance_Image.png" style="height
Step6: Checkpoint
Step7: Problem 2
Step8: <img src="image/Learn_Rate_Tune_Image.png" style="height
Step9: Test
|
456 | <ASSISTANT_TASK:>
Python Code:
# 检查你的Python版本
from sys import version_info
if version_info.major != 2 and version_info.minor != 7:
raise Exception('请使用Python 2.7来完成此项目')
# 引入这个项目需要的库
import numpy as np
import pandas as pd
import visuals as vs
from IPython.display import display # 使得我们可以对DataFrame使用display()函数
# 设置以内联的形式显示matplotlib绘制的图片(在notebook中显示更美观)
%matplotlib inline
# 载入整个客户数据集
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)
except:
print "Dataset could not be loaded. Is the dataset missing?"
# 显示数据集的一个描述
display(data.describe())
# TODO:从数据集中选择三个你希望抽样的数据点的索引
indices = [20,60,160]
# 为选择的样本建立一个DataFrame
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print "Chosen samples of wholesale customers dataset:"
display(samples)
from sklearn.cross_validation import train_test_split
from sklearn import tree
from sklearn.metrics import r2_score
# TODO:为DataFrame创建一个副本,用'drop'函数丢弃一个特征
detergents_array = data['Detergents_Paper']
new_data = data.drop(['Detergents_Paper'],axis = 1)
# TODO:使用给定的特征作为目标,将数据分割成训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(new_data, detergents_array,
test_size=0.25, random_state=23)
# TODO:创建一个DecisionTreeRegressor(决策树回归器)并在训练集上训练它
regressor = tree.DecisionTreeRegressor(random_state=29)
regressor.fit(X_train, y_train)
# TODO:输出在测试集上的预测得分
score = regressor.score(X_test, y_test)
print("Score Is ", score)
# 对于数据中的每一对特征构造一个散布矩阵
pd.plotting.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# TODO:使用自然对数缩放数据
log_data = np.log(data)
# TODO:使用自然对数缩放样本数据
log_samples = np.log(samples)
# 为每一对新产生的特征制作一个散射矩阵
pd.plotting.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# 展示经过对数变换后的样本数据
display(log_samples)
# 对于每一个特征,找到值异常高或者是异常低的数据点
for feature in log_data.keys():
# TODO:计算给定特征的Q1(数据的25th分位点)
Q1 = np.percentile(log_data[feature],25)
# TODO:计算给定特征的Q3(数据的75th分位点)
Q3 = np.percentile(log_data[feature],75)
# TODO:使用四分位范围计算异常阶(1.5倍的四分位距)
step = (Q3-Q1)*1.5
# 显示异常点
print "Data points considered outliers for the feature '{}':".format(feature)
display(log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))])
# 可选:选择你希望移除的数据点的索引
outliers = []
# 如果选择了的话,移除异常点
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
from sklearn.decomposition import PCA
# TODO:通过在good_data上使用PCA,将其转换成和当前特征数一样多的维度
pca = PCA(n_components=len(good_data.columns)).fit(good_data)
print log_samples
# TODO:使用上面的PCA拟合将变换施加在log_samples上
pca_samples = pca.transform(log_samples)
print pca_samples
# 生成PCA的结果图
pca_results = vs.pca_results(good_data, pca)
# 展示经过PCA转换的sample log-data
display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))
# TODO:通过在good data上进行PCA,将其转换成两个维度
pca = PCA(n_components=2).fit(good_data)
# TODO:使用上面训练的PCA将good data进行转换
reduced_data = pca.transform(good_data)
# TODO:使用上面训练的PCA将log_samples进行转换
pca_samples = pca.transform(log_samples)
# 为降维后的数据创建一个DataFrame
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
# 展示经过两个维度的PCA转换之后的样本log-data
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
# Create a biplot
vs.biplot(good_data, reduced_data, pca)
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
# TODO:在降维后的数据上使用你选择的聚类算法
clusterer = KMeans(n_clusters=2, random_state=29).fit(reduced_data)
# TODO:预测每一个点的簇
preds = clusterer.predict(reduced_data)
# TODO:找到聚类中心
centers = clusterer.cluster_centers_
# TODO:预测在每一个转换后的样本点的类
sample_preds = clusterer.predict(pca_samples)
# TODO:计算选择的类别的平均轮廓系数(mean silhouette coefficient)
score = silhouette_score(reduced_data, clusterer.labels_, metric='euclidean')
print score
# 2 => 0.419166083203
# 3 => 0.392358689958
# 4 => 0.333872503172
# 5 => 0.333872503172
# 6 => 0.359310196851
# 从已有的实现中展示聚类的结果
vs.cluster_results(reduced_data, preds, centers, pca_samples)
# TODO:反向转换中心点
log_centers = pca.inverse_transform(centers)
# TODO:对中心点做指数转换
true_centers = np.exp(log_centers)
# 显示真实的中心点
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
# 显示预测结果
for i, pred in enumerate(sample_preds):
print "Sample point", i, "predicted to be in Cluster", pred
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# 读取包含聚类结果的数据
cluster_data = pd.read_csv("cluster.csv")
y = cluster_data['Region']
X = cluster_data.drop(['Region'], axis = 1)
# 划分训练集测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=24)
clf = RandomForestClassifier(random_state=24)
clf.fit(X_train, y_train)
print "使用cluster特征的得分", clf.score(X_test, y_test)
# 移除cluster特征
X_train = X_train.copy()
X_train.drop(['cluster'], axis=1, inplace=True)
X_test = X_test.copy()
X_test.drop(['cluster'], axis=1, inplace=True)
clf.fit(X_train, y_train)
print "不使用cluster特征的得分", clf.score(X_test, y_test)
# 根据‘Channel‘数据显示聚类的结果
vs.channel_results(reduced_data, outliers, pca_samples)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 分析数据
Step2: 练习
Step3: 问题 1
Step4: 问题 2
Step5: 问题 3
Step6: 观察
Step7: 练习
Step8: 问题 4
Step9: 问题 5
Step10: 练习:降维
Step11: 观察
Step12: 可视化一个双标图(Biplot)
Step13: 观察
Step14: 问题 7
Step15: 练习
Step16: 问题 8
Step17: 回答
Step18: 回答:
|
457 | <ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
DO NOT MODIFY THIS CELL
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
DO NOT MODIFY THIS CELL
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)
return conv_layer
DO NOT MODIFY THIS CELL
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units, istraining):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
layer = tf.layers.batch_normalization(inputs=layer, training=istraining)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, istraining):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=None, use_bias=False)
conv_layer = tf.layers.batch_normalization(conv_layer, training=istraining)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
istraining = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, istraining)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, istraining)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, istraining:True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
istraining:False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, istraining:False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
istraining:False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
istraining:False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
istraining:False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units, istraining):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=None, use_bias=False)
gamma = tf.Variable(tf.ones([num_units]))
beta = tf.Variable(tf.zeros([num_units]))
pop_mean = tf.Variable(tf.zeros([num_units]), trainable=False)
pop_variance = tf.Variable(tf.ones([num_units]), trainable=False)
epsilon = 1e-3
def batch_norm_training():
batch_mean, batch_variance = tf.nn.moments(layer, axes=[0])
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)
batch_normalization_out = tf.cond(istraining, batch_norm_training, batch_norm_inference)
return tf.nn.relu(batch_normalization_out)
def conv_layer(prev_layer, layer_depth, istraining):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
in_channels = prev_layer.get_shape().as_list()[3]
out_channels = layer_depth*4
weights = tf.Variable(
tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))
# bias = tf.Variable(tf.zeros(out_channels))
conv_layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME')
# conv_layer = tf.nn.bias_add(conv_layer, bias)
# conv_layer = tf.nn.relu(conv_layer)
gamma = tf.Variable(tf.ones([out_channels]))
beta = tf.Variable(tf.zeros([out_channels]))
pop_mean = tf.Variable(tf.zeros([out_channels]), trainable=False)
pop_variance = tf.Variable(tf.ones([out_channels]), trainable=False)
epsilon = 1e-3
def batch_norm_training():
batch_mean, batch_variance = tf.nn.moments(conv_layer, [0,1,2], keep_dims=False)
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(conv_layer, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
return tf.nn.batch_normalization(conv_layer, pop_mean, pop_variance, beta, gamma, epsilon)
batch_normalization_out = tf.cond(istraining, batch_norm_training, batch_norm_inference)
return tf.nn.relu(batch_normalization_out)
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
istraining = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, istraining)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, istraining)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, istraining:True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
istraining:False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, istraining:False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels, istraining:False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels, istraining:False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]], istraining:False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Batch Normalization using tf.layers.batch_normalization<a id="example_1"></a>
Step6: We'll use the following function to create convolutional layers in our network. They are very basic
Step8: Run the following cell, along with the earlier cells (to load the dataset and define the necessary functions).
Step10: With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)
Step12: TODO
Step13: TODO
Step15: With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output
Step17: TODO
Step18: TODO
|
458 | <ASSISTANT_TASK:>
Python Code:
# Import necessary libraries
import matplotlib.pyplot as plt
import os
import re
import shutil
import string
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import losses
# Print the TensorFlow version
print(tf.__version__)
# Download the IMDB dataset
url = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
dataset = tf.keras.utils.get_file("aclImdb_v1", url,
untar=True, cache_dir='.',
cache_subdir='')
dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb')
# Explore the dataset
os.listdir(dataset_dir)
train_dir = os.path.join(dataset_dir, 'train')
os.listdir(train_dir)
# Print the file content
sample_file = os.path.join(train_dir, 'pos/1181_9.txt')
with open(sample_file) as f:
print(f.read())
remove_dir = os.path.join(train_dir, 'unsup')
shutil.rmtree(remove_dir)
# Create the validation set
batch_size = 32
seed = 42
raw_train_ds = tf.keras.utils.text_dataset_from_directory(
'aclImdb/train',
batch_size=batch_size,
validation_split=0.2,
subset='training',
seed=seed)
# Print few examples
for text_batch, label_batch in raw_train_ds.take(1):
for i in range(3):
print("Review", text_batch.numpy()[i])
print("Label", label_batch.numpy()[i])
print("Label 0 corresponds to", raw_train_ds.class_names[0])
print("Label 1 corresponds to", raw_train_ds.class_names[1])
raw_val_ds = tf.keras.utils.text_dataset_from_directory(
'aclImdb/train',
batch_size=batch_size,
validation_split=0.2,
subset='validation',
seed=seed)
raw_test_ds = tf.keras.utils.text_dataset_from_directory(
'aclImdb/test',
batch_size=batch_size)
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ')
return tf.strings.regex_replace(stripped_html,
'[%s]' % re.escape(string.punctuation),
'')
max_features = 10000
sequence_length = 250
# Created the TextVectorization layer
vectorize_layer = # TODO 1 -- Your code goes here(
standardize=custom_standardization,
max_tokens=max_features,
output_mode='int',
output_sequence_length=sequence_length)
# Make a text-only dataset (without labels), then call adapt
train_text = raw_train_ds.map(lambda x, y: x)
vectorize_layer.adapt(train_text)
def vectorize_text(text, label):
text = tf.expand_dims(text, -1)
return vectorize_layer(text), label
# retrieve a batch (of 32 reviews and labels) from the dataset
text_batch, label_batch = next(iter(raw_train_ds))
first_review, first_label = text_batch[0], label_batch[0]
print("Review", first_review)
print("Label", raw_train_ds.class_names[first_label])
print("Vectorized review", vectorize_text(first_review, first_label))
# Print the token (string) that each integer corresponds
print("1287 ---> ",vectorize_layer.get_vocabulary()[1287])
print(" 313 ---> ",vectorize_layer.get_vocabulary()[313])
print('Vocabulary size: {}'.format(len(vectorize_layer.get_vocabulary())))
# Apply the TextVectorization layer you created earlier to the train, validation, and test dataset
train_ds = raw_train_ds.map(vectorize_text)
val_ds = raw_val_ds.map(vectorize_text)
test_ds = raw_test_ds.map(vectorize_text)
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE)
embedding_dim = 16
# Create your neural network
model = tf.keras.Sequential([
layers.Embedding(max_features + 1, embedding_dim),
layers.Dropout(0.2),
layers.GlobalAveragePooling1D(),
layers.Dropout(0.2),
layers.Dense(1)])
model.summary()
# Configure the model to use an optimizer and a loss function
model.compile(loss=# TODO 2 -- Your code goes here(from_logits=True),
optimizer='adam',
metrics=tf.metrics.BinaryAccuracy(threshold=0.0))
# Train the model
epochs = 10
history = # TODO 3 -- Your code goes here(
train_ds,
validation_data=val_ds,
epochs=epochs)
# Evaluate the model
loss, accuracy = # TODO 4 -- Your code goes here(test_ds)
print("Loss: ", loss)
print("Accuracy: ", accuracy)
history_dict = history.history
history_dict.keys()
# Plot the loss over time
acc = history_dict['binary_accuracy']
val_acc = history_dict['val_binary_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Plot the accuracy over time
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
# Export the model
export_model = tf.keras.Sequential([
vectorize_layer,
model,
layers.Activation('sigmoid')
])
# TODO 5 -- Your code goes here(
loss=losses.BinaryCrossentropy(from_logits=False), optimizer="adam", metrics=['accuracy']
)
# Test it with `raw_test_ds`, which yields raw strings
loss, accuracy = export_model.evaluate(raw_test_ds)
print(accuracy)
examples = [
"The movie was great!",
"The movie was okay.",
"The movie was terrible..."
]
export_model.predict(examples)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sentiment analysis
Step2: The aclImdb/train/pos and aclImdb/train/neg directories contain many text files, each of which is a single movie review. Let's take a look at one of them.
Step3: Load the dataset
Step4: Next, you will use the text_dataset_from_directory utility to create a labeled tf.data.Dataset. tf.data is a powerful collection of tools for working with data.
Step5: As you can see above, there are 25,000 examples in the training folder, of which you will use 80% (or 20,000) for training. As you will see in a moment, you can train a model by passing a dataset directly to model.fit. If you're new to tf.data, you can also iterate over the dataset and print out a few examples as follows.
Step6: Notice the reviews contain raw text (with punctuation and occasional HTML tags like <br/>). You will show how to handle these in the following section.
Step7: Next, you will create a validation and test dataset. You will use the remaining 5,000 reviews from the training set for validation.
Step8: Prepare the dataset for training
Step9: Next, you will create a TextVectorization layer. You will use this layer to standardize, tokenize, and vectorize our data. You set the output_mode to int to create unique integer indices for each token.
Step10: Next, you will call adapt to fit the state of the preprocessing layer to the dataset. This will cause the model to build an index of strings to integers.
Step11: Let's create a function to see the result of using this layer to preprocess some data.
Step12: As you can see above, each token has been replaced by an integer. You can lookup the token (string) that each integer corresponds to by calling .get_vocabulary() on the layer.
Step13: You are nearly ready to train your model. As a final preprocessing step, you will apply the TextVectorization layer you created earlier to the train, validation, and test dataset.
Step14: Configure the dataset for performance
Step15: Create the model
Step16: The layers are stacked sequentially to build the classifier
Step17: Train the model
Step18: Evaluate the model
Step19: This fairly naive approach achieves an accuracy of about 86%.
Step20: There are four entries
Step21: In this plot, the dots represent the training loss and accuracy, and the solid lines are the validation loss and accuracy.
Step22: Inference on new data
|
459 | <ASSISTANT_TASK:>
Python Code:
from lsst.cwfs.instrument import Instrument
from lsst.cwfs.algorithm import Algorithm
from lsst.cwfs.image import Image, readFile
import lsst.cwfs.plots as plots
fieldXY = [1.185,1.185]
I1 = Image(readFile('../tests/testImages/LSST_NE_SN25/z11_0.25_intra.txt'), fieldXY, Image.INTRA)
I2 = Image(readFile('../tests/testImages/LSST_NE_SN25/z11_0.25_extra.txt'), fieldXY, Image.EXTRA)
plots.plotImage(I1.image,'intra')
plots.plotImage(I2.image,'extra')
inst=Instrument('lsst',I1.sizeinPix)
algo=Algorithm('exp',inst,0)
algo.runIt(inst,I1,I2,'offAxis')
print(algo.zer4UpNm)
plots.plotZer(algo.zer4UpNm,'nm')
plots.plotImage(algo.Wconverge,'Final wavefront')
plots.plotImage(algo.Wconverge,'Final wavefront with pupil mask applied', mask=algo.pMask)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the image objects. Input arguments
Step2: Define the instrument. Input arguments
Step3: Define the algorithm being used. Input arguments
Step4: Run it
Step5: Print the Zernikes Zn (n>=4)
Step6: plot the Zernikes Zn (n>=4)
|
460 | <ASSISTANT_TASK:>
Python Code:
# Authors: Chris Holdgraf <choldgraf@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 7
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.decoding import ReceptiveField, TimeDelayingRidge
from scipy.stats import multivariate_normal
from scipy.io import loadmat
from sklearn.preprocessing import scale
rng = np.random.RandomState(1337) # To make this example reproducible
# Read in audio that's been recorded in epochs.
path_audio = mne.datasets.mtrf.data_path()
data = loadmat(path_audio + '/speech_data.mat')
audio = data['spectrogram'].T
sfreq = float(data['Fs'][0, 0])
n_decim = 2
audio = mne.filter.resample(audio, down=n_decim, npad='auto')
sfreq /= n_decim
n_freqs = 20
tmin, tmax = -0.1, 0.4
# To simulate the data we'll create explicit delays here
delays_samp = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
delays_sec = delays_samp / sfreq
freqs = np.linspace(50, 5000, n_freqs)
grid = np.array(np.meshgrid(delays_sec, freqs))
# We need data to be shaped as n_epochs, n_features, n_times, so swap axes here
grid = grid.swapaxes(0, -1).swapaxes(0, 1)
# Simulate a temporal receptive field with a Gabor filter
means_high = [.1, 500]
means_low = [.2, 2500]
cov = [[.001, 0], [0, 500000]]
gauss_high = multivariate_normal.pdf(grid, means_high, cov)
gauss_low = -1 * multivariate_normal.pdf(grid, means_low, cov)
weights = gauss_high + gauss_low # Combine to create the "true" STRF
kwargs = dict(vmax=np.abs(weights).max(), vmin=-np.abs(weights).max(),
cmap='RdBu_r', shading='gouraud')
fig, ax = plt.subplots()
ax.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax.set(title='Simulated STRF', xlabel='Time Lags (s)', ylabel='Frequency (Hz)')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
# Reshape audio to split into epochs, then make epochs the first dimension.
n_epochs, n_seconds = 16, 5
audio = audio[:, :int(n_seconds * sfreq * n_epochs)]
X = audio.reshape([n_freqs, n_epochs, -1]).swapaxes(0, 1)
n_times = X.shape[-1]
# Delay the spectrogram according to delays so it can be combined w/ the STRF
# Lags will now be in axis 1, then we reshape to vectorize
delays = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
# Iterate through indices and append
X_del = np.zeros((len(delays),) + X.shape)
for ii, ix_delay in enumerate(delays):
# These arrays will take/put particular indices in the data
take = [slice(None)] * X.ndim
put = [slice(None)] * X.ndim
if ix_delay > 0:
take[-1] = slice(None, -ix_delay)
put[-1] = slice(ix_delay, None)
elif ix_delay < 0:
take[-1] = slice(-ix_delay, None)
put[-1] = slice(None, ix_delay)
X_del[ii][put] = X[take]
# Now set the delayed axis to the 2nd dimension
X_del = np.rollaxis(X_del, 0, 3)
X_del = X_del.reshape([n_epochs, -1, n_times])
n_features = X_del.shape[1]
weights_sim = weights.ravel()
# Simulate a neural response to the sound, given this STRF
y = np.zeros((n_epochs, n_times))
for ii, iep in enumerate(X_del):
# Simulate this epoch and add random noise
noise_amp = .002
y[ii] = np.dot(weights_sim, iep) + noise_amp * rng.randn(n_times)
# Plot the first 2 trials of audio and the simulated electrode activity
X_plt = scale(np.hstack(X[:2]).T).T
y_plt = scale(np.hstack(y[:2]))
time = np.arange(X_plt.shape[-1]) / sfreq
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 6), sharex=True)
ax1.pcolormesh(time, freqs, X_plt, vmin=0, vmax=4, cmap='Reds')
ax1.set_title('Input auditory features')
ax1.set(ylim=[freqs.min(), freqs.max()], ylabel='Frequency (Hz)')
ax2.plot(time, y_plt)
ax2.set(xlim=[time.min(), time.max()], title='Simulated response',
xlabel='Time (s)', ylabel='Activity (a.u.)')
mne.viz.tight_layout()
# Create training and testing data
train, test = np.arange(n_epochs - 1), n_epochs - 1
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
X_train, X_test, y_train, y_test = [np.rollaxis(ii, -1, 0) for ii in
(X_train, X_test, y_train, y_test)]
# Model the simulated data as a function of the spectrogram input
alphas = np.logspace(-3, 3, 7)
scores = np.zeros_like(alphas)
models = []
for ii, alpha in enumerate(alphas):
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=alpha)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores[ii] = rf.score(X_test, y_test)
models.append(rf)
times = rf.delays_ / float(rf.sfreq)
# Choose the model that performed best on the held out data
ix_best_alpha = np.argmax(scores)
best_mod = models[ix_best_alpha]
coefs = best_mod.coef_[0]
best_pred = best_mod.predict(X_test)[:, 0]
# Plot the original STRF, and the one that we recovered with modeling.
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3), sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, coefs, **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Reconstructed STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
# Plot the actual response and the predicted response on a held out stimulus
time_pred = np.arange(best_pred.shape[0]) / sfreq
fig, ax = plt.subplots()
ax.plot(time_pred, y_test, color='k', alpha=.2, lw=4)
ax.plot(time_pred, best_pred, color='r', lw=1)
ax.set(title='Original and predicted activity', xlabel='Time (s)')
ax.legend(['Original', 'Predicted'])
plt.autoscale(tight=True)
mne.viz.tight_layout()
# Plot model score for each ridge parameter
fig = plt.figure(figsize=(10, 4))
ax = plt.subplot2grid([2, len(alphas)], [1, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores, marker='o', color='r')
ax.annotate('Best parameter', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Ridge regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
for ii, (rf, i_alpha) in enumerate(zip(models, alphas)):
ax = plt.subplot2grid([2, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
plt.xticks([], [])
plt.yticks([], [])
plt.autoscale(tight=True)
fig.suptitle('Model coefficients / scores for many ridge parameters', y=1)
mne.viz.tight_layout()
scores_lap = np.zeros_like(alphas)
models_lap = []
for ii, alpha in enumerate(alphas):
estimator = TimeDelayingRidge(tmin, tmax, sfreq, reg_type='laplacian',
alpha=alpha)
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=estimator)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores_lap[ii] = rf.score(X_test, y_test)
models_lap.append(rf)
ix_best_alpha_lap = np.argmax(scores_lap)
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot2grid([3, len(alphas)], [2, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores_lap, marker='o', color='r')
ax.plot(np.arange(len(alphas)), scores, marker='o', color='0.5', ls=':')
ax.annotate('Best Laplacian', (ix_best_alpha_lap,
scores_lap[ix_best_alpha_lap]),
(ix_best_alpha_lap, scores_lap[ix_best_alpha_lap] - .1),
arrowprops={'arrowstyle': '->'})
ax.annotate('Best Ridge', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Laplacian regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
xlim = times[[0, -1]]
for ii, (rf_lap, rf, i_alpha) in enumerate(zip(models_lap, models, alphas)):
ax = plt.subplot2grid([3, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Laplacian')
ax = plt.subplot2grid([3, len(alphas)], [1, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Ridge')
fig.suptitle('Model coefficients / scores for laplacian regularization', y=1)
mne.viz.tight_layout()
rf = models[ix_best_alpha]
rf_lap = models_lap[ix_best_alpha_lap]
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3),
sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax3.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Ridge STRF')
ax3.set_title('Best Laplacian STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2, ax3]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load audio data
Step2: Create a receptive field
Step3: Simulate a neural response
Step4: Fit a model to recover this receptive field
Step5: Visualize the effects of regularization
Step6: Using different regularization types
Step7: Compare model performance
Step8: Plot the original STRF, and the one that we recovered with modeling.
|
461 | <ASSISTANT_TASK:>
Python Code:
import sys, os
spark_home = os.environ.get("SPARK_HOME", None)
# Add the spark python sub-directory to the path
sys.path.insert(0, spark_home + "/python")
# Add the py4j to the path.
# You may need to change the version number to match your install
sys.path.insert(0, os.path.join(spark_home + "/python/lib/py4j-0.10.4-src.zip"))
# Initialize PySpark to predefine the SparkContext variable 'sc'
filename = spark_home+"/python/pyspark/shell.py"
exec(open(filename).read())
chunkSize = 4000
alpha = 0.25
iterations = 5
train_file = "data/MNIST_train.dat"
test_file = "data/MNIST_test.dat"
sc.appName = "Python Logistic Regression"
print("* LogisticRegression Application *")
print(" # train file: " + train_file)
print(" # test file: " + test_file)
accel = int(input("Select mode (0: SW-only, 1: HW accelerated) : "))
from pyspark.mllib_accel.classification import LogisticRegression
trainRDD = sc.textFile(train_file).coalesce(1)
numClasses = 10
numFeatures = 784
LR = LogisticRegression(numClasses, numFeatures)
weights = LR.train(trainRDD, chunkSize, alpha, iterations, accel)
with open("data/weights.out", "w") as weights_file:
for k in range(0, numClasses):
for j in range(0, numFeatures):
if j == 0:
weights_file.write(str(round(weights[k * numFeatures + j], 5)))
else:
weights_file.write("," + str(round(weights[k * numFeatures + j], 5)))
weights_file.write("\n")
weights_file.close()
testRDD = sc.textFile(test_file)
LR.test(testRDD)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Logistic Regression Application
Step2: HW accelerated vs SW-only
Step3: Instantiate a Logistic Regression model
Step4: Train the LR model
Step5: Test the LR model
|
462 | <ASSISTANT_TASK:>
Python Code:
def reverse_words (S):
#TODO: implement me
pass
from nose.tools import assert_equal
class UnitTest (object):
def testReverseWords(self):
assert_equal(func('the sun is hot'), 'eht nus si toh')
assert_equal(func(''), None)
assert_equal(func('123 456 789'), '321 654 987')
assert_equal(func('magic'), 'cigam')
print('Success: reverse_words')
def main():
test = UnitTest()
test.testReverseWords()
if __name__=="__main__":
main()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unit Test
|
463 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.cm as cm
import matplotlib
matplotlib.rcParams.update({'font.size':18})
matplotlib.rcParams.update({'font.family':'serif'})
Alpha = 3./2.
ab_dist = np.array([2., 3.,3.5, 6., 6.7, 7., 8.2, 14., 18., 20.])
ab_per = ab_dist ** (Alpha)
# a figure of the 1-d projection, the key for SETI.
plt.figure(figsize=(5,4))
plt.plot(ab_dist, ab_per, 'k')
plt.scatter(ab_dist, ab_per,color='k')
plt.xlabel('Home $-$ Beacon Separation (pc)')
plt.ylabel('P$_{orb}$ of Beacon (days)')
plt.ylim(0,100)
plt.savefig('dist_per.pdf', dpi=150, bbox_inches='tight', pad_inches=0.25)
# figure angle on sky to plot
# theta = np.random.random(len(ab_dist)) * 2. * np.pi
# freeze this random config, i like it
theta = np.array([ 4.52448995, 3.46489278, 0.33872438, 1.6891746 , 2.37611205,
2.72516744, 5.41764719, 4.01860732, 1.72938583, 0.60279578])
x = ab_dist * np.cos(theta)
y = ab_dist * np.sin(theta)
# a figure of the 2-d observed (sky) plane
plt.figure(figsize=(6,5))
plt.axes()
# the central red cicrle
circ = plt.Circle((0,0), radius=1.4, fc='r', zorder=0)
plt.gca().add_patch(circ)
# make the concentric circles
for k in range(5,29,3):
circ = plt.Circle((0,0), radius=k, fc='none', alpha=0.35, color='k')
plt.gca().add_patch(circ)
plt.scatter(x,y, c=ab_per, cmap=cm.viridis_r, s=90, alpha=0.7, edgecolors='k', zorder=2)
plt.xlim(-20,20)
plt.ylim(-20,20)
plt.xlabel('RA (deg)')
plt.ylabel('Dec (deg)')
cbar = plt.colorbar()
cbar.set_label('P$_{orb}$ of Beacon (days)')
plt.savefig('sky_per.pdf', dpi=150, bbox_inches='tight', pad_inches=0.25)
plt.show()
n_b = 1000 # number of beacon systems
# n_b = 1e6
i_max = 90.
# i_max = 10. # what if we forced beacons to be roughly aligned w/ Galactic plane?
d_min = 2 # min distance from ET home world to place beacons (pc)
d_max = 50 # max distance from ET home world to place beacons (pc)
d_home = 1000 # distance from Earth to ET Home world (in pc)
alpha = 3. / 2. # the coefficient to encode the period-distance relationship - in this case Kepler 3rd law
R_star = 6.955e10 # cm (R_Sun)
R_planet = 7149235 # cm (R_Jupiter)
AU = 1.49598e13 # cm (1 AU)
#__ the part to repeat __
rad = np.random.random(n_b) * (d_max - d_min) + d_min # dist from ET Home to Beacons (pc)
per = rad**Alpha # the period, in days by arbitrary construction
a_AU = (per / 365.)**(2./3.) # the orbital semimajor axis (in AU), assuming solar scaling
incl = np.random.random(n_b) * i_max # orbit plane inclination (deg)
#__
# plt.scatter(a_AU, per, s=90, alpha=0.6)
# plt.xlabel('a (AU)')
# plt.ylabel('Period (days)')
# determine if beacon is "visible", i.e. does it Transit?
b = a_AU * AU * np.sin(incl / 180. * np.pi)
Transit = b < (R_star + R_planet)
no_Transit = b >= (R_star + R_planet)
print(sum(Transit), n_b, float(sum(Transit)) / n_b)
# plt.scatter(a_AU[no_Transit], per[no_Transit], s=20, alpha=0.6, c='blue', lw=0)
# plt.scatter(a_AU[Transit], per[Transit], s=100, alpha=0.6, c='red', lw=0)
# plt.xlabel('a (AU)')
# plt.ylabel('Period (days)')
# plt.xlim(0,2)
# make a plot of fraction of systems that transit as a function of orbital semimajor axis (a)
yy, aa = np.histogram(a_AU[Transit], bins=25, range=(min(a_AU),1))
nn, aa = np.histogram(a_AU[no_Transit], bins=25, range=(min(a_AU),1))
plt.plot((aa[1:] + aa[0:-1])/2., np.array(yy, dtype='float') / nn)
plt.xlabel('a (AU)')
plt.ylabel('Fraction that transit')
# now put beacons in random places in space to illustrate on the sky
theta = np.random.random(n_b) * 2 * np.pi
phi = np.random.random(n_b) * np.pi
x = rad * np.cos(theta)
y = rad * np.sin(theta)
plt.figure(figsize=(5,5))
plt.scatter(x[no_Transit], y[no_Transit], s=10, alpha=0.1)
plt.scatter(x[Transit], y[Transit], s=100, alpha=0.5, c='red')
plt.xlim(-60,60)
plt.ylim(-60,60)
plt.xlabel('RA (deg)')
plt.ylabel('Dec (deg)')
plt.savefig('3d_model.pdf', dpi=150, bbox_inches='tight', pad_inches=0.25)
'''
# repeat this 3d toy model 1000 times to get smooth recovery fraction
hist_smooth = np.zeros_like(yy)
num_smooth = np.zeros_like(nn)
num_transit = np.zeros(1000)
for k in range(1000):
rad = np.random.random(n_b) * (d_max - d_min) + d_min # dist from ET Home to Beacons (pc)
per = rad**alpha # the period, in days by arbitrary construction
a_AU = (per / 365.)**(2./3.) # the orbital semimajor axis (in AU), assuming solar scaling
incl = np.random.random(n_b) * i_max # orbit plane inclination (deg)
b = a_AU * AU * np.sin(incl / 180. * np.pi)
Transit = b < (R_star + R_planet)
no_Transit = b >= (R_star + R_planet)
yy, aa = np.histogram(a_AU[Transit], bins=25, range=(0,2))
nn, aa = np.histogram(a_AU[no_Transit], bins=25, range=(0,2))
hist_smooth = hist_smooth + np.array(yy, dtype='float')
num_smooth = num_smooth + np.array(nn, dtype='float')
# plt.plot((aa[1:] + aa[0:-1])/2., np.array(yy, dtype='float') / nn, alpha=0.1, c='k')
num_transit[k] = (float(sum(Transit)) / n_b)
plt.plot((aa[1:] + aa[0:-1])/2., hist_smooth / num_smooth, lw=2);
plt.xlabel('a (AU)');
plt.ylabel('Fraction that transit');
# plt.savefig('recov_fraction.pdf', dpi=150, bbox_inches='tight', pad_inches=0.25)
print(np.mean(num_transit), np.std(num_transit))
''';
plt.figure(figsize=(5,5))
plt.scatter(x[no_Transit], y[no_Transit], s=10, alpha=0.1)
plt.scatter(x[Transit], y[Transit], s=100, alpha=0.5, c=per[Transit], edgecolors='k', cmap=cm.viridis_r)
plt.xlim(-60,60)
plt.ylim(-60,60)
plt.xlabel('RA (deg)')
plt.ylabel('Dec (deg)')
plt.savefig('3dcolor.pdf', dpi=150, bbox_inches='tight', pad_inches=0.25)
Nother = 200
plt.figure(figsize=(5,5))
# plt.scatter(x[no_Transit], y[no_Transit], s=10, alpha=0.1)
plt.scatter(x[Transit], y[Transit], alpha=0.5, c=per[Transit], cmap=cm.viridis_r)
plt.scatter(np.random.random(Nother)*100-50,np.random.random(Nother)*100-50,
c=np.random.random(Nother)*250+5, alpha=.5, s=10)
plt.xlim(-60,60)
plt.ylim(-60,60)
plt.xlabel('RA (deg)')
plt.ylabel('Dec (deg)')
plt.savefig('3dcolor_bkgd.pdf', dpi=150, bbox_inches='tight', pad_inches=0.25)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The idea is simple
Step2: a simple 3D model
Step3: OK, so our toy model works... but how do we actually detect these beacons among the noise of naturally occuring exoplanets we've detected?
|
464 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format='retina'
from __future__ import absolute_import, division, print_function
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.pyplot import GridSpec
import seaborn as sns
import numpy as np
import pandas as pd
import os, sys
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
sns.set_context("poster", font_scale=1.3)
import missingno as msno
import pandas_profiling
from sklearn.datasets import make_blobs
import time
def save_subgroup(dataframe, g_index, subgroup_name, prefix='raw_'):
save_subgroup_filename = "".join([prefix, subgroup_name, ".csv.gz"])
dataframe.to_csv(save_subgroup_filename, compression='gzip', encoding='UTF-8')
test_df = pd.read_csv(save_subgroup_filename, compression='gzip', index_col=g_index, encoding='UTF-8')
# Test that we recover what we send in
if dataframe.equals(test_df):
print("Test-passed: we recover the equivalent subgroup dataframe.")
else:
print("Warning -- equivalence test!!! Double-check.")
def load_subgroup(filename, index_col=[0]):
return pd.read_csv(filename, compression='gzip', index_col=index_col)
clean_players = load_subgroup("cleaned_players.csv.gz")
players = load_subgroup("raw_players.csv.gz", )
countries = load_subgroup("raw_countries.csv.gz")
referees = load_subgroup("raw_referees.csv.gz")
agg_dyads = pd.read_csv("raw_dyads.csv.gz", compression='gzip', index_col=[0, 1])
agg_dyads.head(10)
# Test if the number of games is equal to the victories + ties + defeats in the dataset
all(agg_dyads['games'] == agg_dyads.victories + agg_dyads.ties + agg_dyads.defeats)
# Sanity check passes
len(agg_dyads.reset_index().set_index('playerShort'))
agg_dyads['totalRedCards'] = agg_dyads['yellowReds'] + agg_dyads['redCards']
agg_dyads.rename(columns={'redCards': 'strictRedCards'}, inplace=True)
agg_dyads.head()
clean_players.head()
agg_dyads.head()
agg_dyads.reset_index().head()
agg_dyads.reset_index().set_index('playerShort').head()
player_dyad = (clean_players.merge(agg_dyads.reset_index().set_index('playerShort'),
left_index=True,
right_index=True))
player_dyad.head()
clean_dyads = (agg_dyads.reset_index()[agg_dyads.reset_index()
.playerShort
.isin(set(clean_players.index))
]).set_index(['refNum', 'playerShort'])
clean_dyads.head()
clean_dyads.shape, agg_dyads.shape, player_dyad.shape
# inspired by https://github.com/mathewzilla/redcard/blob/master/Crowdstorming_visualisation.ipynb
colnames = ['games', 'totalRedCards']
j = 0
out = [0 for _ in range(sum(clean_dyads['games']))]
for index, row in clean_dyads.reset_index().iterrows():
n = row['games']
d = row['totalRedCards']
ref = row['refNum']
player = row['playerShort']
for _ in range(n):
row['totalRedCards'] = 1 if (d-_) > 0 else 0
rowlist=list([ref, player, row['totalRedCards']])
out[j] = rowlist
j += 1
tidy_dyads = pd.DataFrame(out, columns=['refNum', 'playerShort', 'redcard'],).set_index(['refNum', 'playerShort'])
# 3092
tidy_dyads.redcard.sum()
# Notice this is longer than before
clean_dyads.games.sum()
tidy_dyads.shape
# Ok, this is a bit crazy... tear it apart and figure out what each piece is doing if it's not clear
clean_referees = (referees.reset_index()[referees.reset_index()
.refNum.isin(tidy_dyads.reset_index().refNum
.unique())
]).set_index('refNum')
clean_referees.shape, referees.shape
clean_countries = (countries.reset_index()[countries.reset_index()
.refCountry
.isin(clean_referees.refCountry
.unique())
].set_index('refCountry'))
clean_countries.shape, countries.shape
tidy_dyads.head()
tidy_dyads.to_csv("cleaned_dyads.csv.gz", compression='gzip')
tidy_dyads.shape
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: About the Data
Step2: Tidy Dyads and Starting Joins
Step3: Remove records that come from players who don't have a skintone rating
Step4: Disaggregate
|
465 | <ASSISTANT_TASK:>
Python Code:
import scipy as sp
import openpnm as op
import matplotlib.pyplot as plt
%matplotlib inline
wrk = op.Workspace() # Initialize a workspace object
wrk.loglevel=50
net = op.network.CubicDual(shape=[6, 6, 6])
from openpnm.topotools import plot_connections, plot_coordinates
fig1 = plot_coordinates(network=net, pores=net.pores('primary'), c='b')
fig2 = plot_coordinates(network=net, pores=net.pores('primary'), c='b')
fig2 = plot_coordinates(network=net, pores=net.pores('secondary'), fig=fig2, c='r')
fig3 = plot_coordinates(network=net, pores=net.pores('primary'), c='b')
fig3 = plot_coordinates(network=net, pores=net.pores('secondary'), fig=fig3, c='r')
fig3 = plot_connections(network=net, throats=net.throats('primary'), fig=fig3, c='b')
fig4 = plot_coordinates(network=net, pores=net.pores('primary'), c='b')
fig4 = plot_coordinates(network=net, pores=net.pores('secondary'), fig=fig4, c='r')
fig4 = plot_connections(network=net, throats=net.throats('primary'), fig=fig4, c='b')
fig4 = plot_connections(network=net, throats=net.throats('secondary'), fig=fig4, c='r')
fig5 = plot_coordinates(network=net, pores=net.pores('primary'), c='b')
fig5 = plot_coordinates(network=net, pores=net.pores('secondary'), fig=fig5, c='r')
fig5 = plot_connections(network=net, throats=net.throats('primary'), fig=fig5, c='b')
fig5 = plot_connections(network=net, throats=net.throats('secondary'), fig=fig5, c='r')
fig5 = plot_connections(network=net, throats=net.throats('interconnect'), fig=fig5, c='g')
print(net.num_pores('primary'))
print(net.num_pores('secondary'))
print(net.num_throats('primary'))
print(net.num_throats('secondary'))
print(net.num_throats('interconnect'))
geo_pri = op.geometry.GenericGeometry(network=net,
pores=net.pores('primary'),
throats=net.throats('primary'))
geo_sec = op.geometry.GenericGeometry(network=net,
pores=net.pores('secondary'),
throats=net.throats('secondary'))
geo_inter = op.geometry.GenericGeometry(network=net,
throats=net.throats('interconnect'))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's create a CubicDual and visualize it in Paraview
Step2: The resulting network has two sets of pores, labelled as blue and red in the image below. By default, the main cubic lattice is referred to as the 'primary' network which is colored blue, and the interpenetrating dual is referred to as the 'secondary' network shown in red. These names are used to label the pores and throats associated with each network. These names can be changed by sending label_1 and label_2 arguments during initialization. The throats connecting the 'primary' and 'secondary' pores are labelled 'interconnect', and they can be seen as the diagonal connections below.
Step3: Inspection of this image shows that the 'primary' pores are located at expected locations for a cubic network including on the faces of the cube, and 'secondary' pores are located at the interstitial locations. There is one important nuance to note
Step4: Now that this topology is created, the next step would be to create Geometry objects for each network, and an additional one for the 'interconnect' throats
|
466 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.array([1, 2, 3])
print(repr(a), a.shape, end="\n\n")
b = np.array([(1, 2, 3), (4, 5, 6)])
print(repr(b), b.shape)
print(b.T, end="\n\n") # transpoe uma matriz
print(a + b, end="\n\n") # soma um vetor linha/coluna a todas as linhas/colunas de uma matriz
print(b - a, end="\n\n") # subtrai um vetor linha/coluna a todas as linhas/colunas de uma matriz
# multiplica os elementos de um vetor linha/coluna
# a todos os elementos das linhas/colunas de uma matriz
print(a * b, end="\n\n")
print(a**2, end="\n\n") # eleva os elementos ao quadrado
print(10*np.sin(1)) # seno trigonométrico de 1
print(10*np.sin(a)) # seno trigonométrico de cada elemento de a
b<35
print(b,end="\n\n")
print('Axis 1: %s' % b[0], end="\n\n") # retorna um vetor
print(np.average(b), end="\n\n") # tira a média dos elementos
print(np.average(b, axis=1), end="\n\n") # tira a média dos elementos dos vetores no eixo 1
print(b.sum(), end="\n\n") # retorna as somas dos valores
print(b.sum(axis=1), end="\n\n") # retorna as somas dos valores no eixo 1
print(b.min(), end="\n\n") # retorna o menor valor
print(b.max(), end="\n\n") # retorna o maior valor
print(np.zeros((3, 5)), end="\n\n") # array de zeros com dimensões [3,5]
print(np.ones((2,3,4)), end="\n\n------------\n\n") # array de uns com dimensões [2,3,4]
print(np.full((2, 2), 10), end="\n\n") # array de 10 com dimensões [2,2]
print(np.arange(10, 30, 5), end="\n\n") # valores de 10 a 30 com passo 5
print(np.random.rand(2, 3), end="\n\n") # array cde dimensao [2,3] com valores aleatórios
d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
d
d[:, 0] # todas as linhas (:) da primeira coluna (0)
d[:, 1] # todas as linhas (:) da segunda coluna (1)
d[:, 0:2] # todas as linhas (:) das colunas de 0 à 2
d[:, 2] # todas as linhas (:) da terceira coluna (2)
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
np.save('/tmp/x.npy', x)
del(x)
x = np.load('/tmp/x.npy')
print(x)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A base de seu funcionamento é o np.array, que retorna o objeto array sobre o qual todas as funções estão implementadas
Step2: O array traz consigo diversos operadores já implementados
Step3: O Numpy traz consigo diversas operações matemáticas implementadas, as quais podem ser aplicadas sobre um valor ou um array de valores.
Step4: Uma operação booleana pode ser aplicada sobre todos os elementos de um array, retornando um array de mesmas dimensões com o resultado da operação
Step5: Existem também operações utilitárias pré-definidas em um array
Step6: Existem também funções para gerar arrays pré-inicializados
Step7: Podemos selecionar intervalos do array, permitindo recuperar apenas uma porção dele
Step8: O Numpy conta também com funções para salvar/ler arrays de arquivos
|
467 | <ASSISTANT_TASK:>
Python Code:
try:
# Use the Colab's preinstalled TensorFlow 2.x
%tensorflow_version 2.x
except:
pass
# Install the required packages
!pip install fastavro
!pip install tensorflow-io==0.9.0
# Install the specified package
!pip install google-cloud-bigquery-storage
PROJECT_ID = "<YOUR PROJECT>" #@param {type:"string"}
! gcloud config set project $PROJECT_ID
%env GCLOUD_PROJECT=$PROJECT_ID
# Import necessary libraries
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from six.moves import urllib
import tempfile
import numpy as np
import pandas as pd
import tensorflow as tf
from google.cloud import bigquery
from google.api_core.exceptions import GoogleAPIError
LOCATION = 'us'
# Storage directory
DATA_DIR = os.path.join(tempfile.gettempdir(), 'census_data')
# Download options.
DATA_URL = 'https://storage.googleapis.com/cloud-samples-data/ml-engine/census/data'
TRAINING_FILE = 'adult.data.csv'
EVAL_FILE = 'adult.test.csv'
TRAINING_URL = '%s/%s' % (DATA_URL, TRAINING_FILE)
EVAL_URL = '%s/%s' % (DATA_URL, EVAL_FILE)
DATASET_ID = 'census_dataset'
TRAINING_TABLE_ID = 'census_training_table'
EVAL_TABLE_ID = 'census_eval_table'
CSV_SCHEMA = [
bigquery.SchemaField("age", "FLOAT64"),
bigquery.SchemaField("workclass", "STRING"),
bigquery.SchemaField("fnlwgt", "FLOAT64"),
bigquery.SchemaField("education", "STRING"),
bigquery.SchemaField("education_num", "FLOAT64"),
bigquery.SchemaField("marital_status", "STRING"),
bigquery.SchemaField("occupation", "STRING"),
bigquery.SchemaField("relationship", "STRING"),
bigquery.SchemaField("race", "STRING"),
bigquery.SchemaField("gender", "STRING"),
bigquery.SchemaField("capital_gain", "FLOAT64"),
bigquery.SchemaField("capital_loss", "FLOAT64"),
bigquery.SchemaField("hours_per_week", "FLOAT64"),
bigquery.SchemaField("native_country", "STRING"),
bigquery.SchemaField("income_bracket", "STRING"),
]
UNUSED_COLUMNS = ["fnlwgt", "education_num"]
def create_bigquery_dataset_if_necessary(dataset_id):
# Construct a full Dataset object to send to the API.
client = bigquery.Client(project=PROJECT_ID)
dataset = bigquery.Dataset(bigquery.dataset.DatasetReference(PROJECT_ID, dataset_id))
dataset.location = LOCATION
try:
# Constructs the API request
dataset = # TODO -- Your code goes here
return True
except GoogleAPIError as err:
if err.code != 409: # http_client.CONFLICT
raise
return False
def load_data_into_bigquery(url, table_id):
create_bigquery_dataset_if_necessary(DATASET_ID)
client = bigquery.Client(project=PROJECT_ID)
dataset_ref = client.dataset(DATASET_ID)
table_ref = dataset_ref.table(table_id)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE
job_config.source_format = bigquery.SourceFormat.CSV
job_config.schema = CSV_SCHEMA
# Constructs the Job to load data into table
load_job = # TODO -- Your code goes here(
url, table_ref, job_config=job_config
)
print("Starting job {}".format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print("Job finished.")
destination_table = client.get_table(table_ref)
print("Loaded {} rows.".format(destination_table.num_rows))
load_data_into_bigquery(TRAINING_URL, TRAINING_TABLE_ID)
load_data_into_bigquery(EVAL_URL, EVAL_TABLE_ID)
%%bigquery --use_bqstorage_api
SELECT * FROM `<YOUR PROJECT>.census_dataset.census_training_table` LIMIT 5
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow_io.bigquery import BigQueryClient
from tensorflow_io.bigquery import BigQueryReadSession
def transform_row(row_dict):
# Trim all string tensors
trimmed_dict = { column:
(tf.strings.strip(tensor) if tensor.dtype == 'string' else tensor)
for (column,tensor) in row_dict.items()
}
# Extract feature column
income_bracket = trimmed_dict.pop('income_bracket')
# Convert feature column to 0.0/1.0
income_bracket_float = tf.cond(tf.equal(tf.strings.strip(income_bracket), '>50K'),
lambda: tf.constant(1.0),
lambda: tf.constant(0.0))
return (trimmed_dict, income_bracket_float)
def read_bigquery(table_name):
tensorflow_io_bigquery_client = BigQueryClient()
read_session = tensorflow_io_bigquery_client.read_session(
"projects/" + PROJECT_ID,
PROJECT_ID, table_name, DATASET_ID,
list(field.name for field in CSV_SCHEMA
if not field.name in UNUSED_COLUMNS),
list(dtypes.double if field.field_type == 'FLOAT64'
else dtypes.string for field in CSV_SCHEMA
if not field.name in UNUSED_COLUMNS),
requested_streams=2)
# Read the rows in parallel streams
dataset = # TODO -- Your code goes here
# Apply transformation to the dataset
transformed_ds = # TODO -- Your code goes here(transform_row)
return transformed_ds
BATCH_SIZE = 32
training_ds = read_bigquery(TRAINING_TABLE_ID).shuffle(10000).batch(BATCH_SIZE)
eval_ds = read_bigquery(EVAL_TABLE_ID).batch(BATCH_SIZE)
def get_categorical_feature_values(column):
query = 'SELECT DISTINCT TRIM({}) FROM `{}`.{}.{}'.format(column, PROJECT_ID, DATASET_ID, TRAINING_TABLE_ID)
client = bigquery.Client(project=PROJECT_ID)
dataset_ref = client.dataset(DATASET_ID)
job_config = bigquery.QueryJobConfig()
query_job = client.query(query, job_config=job_config)
result = query_job.to_dataframe()
return result.values[:,0]
from tensorflow import feature_column
feature_columns = []
# numeric cols
for header in ['capital_gain', 'capital_loss', 'hours_per_week']:
feature_columns.append(feature_column.numeric_column(header))
# categorical cols
for header in ['workclass', 'marital_status', 'occupation', 'relationship',
'race', 'native_country', 'education']:
categorical_feature = feature_column.categorical_column_with_vocabulary_list(
header, get_categorical_feature_values(header))
categorical_feature_one_hot = feature_column.indicator_column(categorical_feature)
feature_columns.append(categorical_feature_one_hot)
# bucketized cols
age = feature_column.numeric_column('age')
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
Dense = tf.keras.layers.Dense
model = tf.keras.Sequential(
[
feature_layer,
Dense(100, activation=tf.nn.relu, kernel_initializer='uniform'),
Dense(75, activation=tf.nn.relu),
Dense(50, activation=tf.nn.relu),
Dense(25, activation=tf.nn.relu),
Dense(1, activation=tf.nn.sigmoid)
])
# Compile Keras model
model.compile(
loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model
# TODO -- Your code goes here(training_ds, epochs=5)
# Evaluate the model
loss, accuracy = # TODO -- Your code goes here(eval_ds)
print("Accuracy", accuracy)
sample_x = {
'age' : np.array([56, 36]),
'workclass': np.array(['Local-gov', 'Private']),
'education': np.array(['Bachelors', 'Bachelors']),
'marital_status': np.array(['Married-civ-spouse', 'Married-civ-spouse']),
'occupation': np.array(['Tech-support', 'Other-service']),
'relationship': np.array(['Husband', 'Husband']),
'race': np.array(['White', 'Black']),
'gender': np.array(['Male', 'Male']),
'capital_gain': np.array([0, 7298]),
'capital_loss': np.array([0, 0]),
'hours_per_week': np.array([40, 36]),
'native_country': np.array(['United-States', 'United-States'])
}
model.predict(sample_x)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Please ignore the incompatible errors.
Step2: Set your PROJECT ID
Step3: Import Python libraries, define constants
Step4: Import census data into BigQuery
Step5: Load Census data in BigQuery.
Step6: Confirm that data was imported
Step7: Load census data in TensorFlow DataSet using BigQuery reader
Step8: Define feature columns
Step9: Build and train model
Step10: Train model
Step11: Evaluate model
Step12: Evaluate a couple of random samples
|
468 | <ASSISTANT_TASK:>
Python Code:
# Importing numpy for math, and matplotlib for plots
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
class Arm:
def __init__(self, mu=None, sigma=None):
if mu is None:
self.mu = np.absolute(np.random.uniform())
else:
self.mu = mu
if sigma is None:
self.sigma=np.absolute(np.random.uniform())
else:
self.sigma = sigma
def pull(self):
reward = np.random.normal(self.mu, self.sigma, 1)
return reward
def get_arms(k):
# returns a list of arms
arms = []
for i in range(k):
arms.append(Arm())
return arms
class agent:
def __init__(self, arms, play_once=1):
self.expectations = np.zeros(len(arms))
self.times_played = np.zeros(len(arms))
self.arms = arms
self.number_of_arms = len(arms)
self.N = 0
self.reward_history = []
self.choice_history = []
if play_once == 1:
for i in range(self.number_of_arms):
self.expectations[i] = self.play(i)
def play(self, index):
reward = self.arms[index].pull()
self.times_played[index] += 1
self.N += 1
self.choice_history.append(index)
self.reward_history.append(reward)
return reward
def policy(self):
pass
def update_expectations(self, reward, index):
self.expectations[index] += (reward - self.expectations[index])/self.N
def select_arm(self):
options = range(self.number_of_arms)
i = np.random.choice(options, p=self.policy(), replace=False)
return i
def gamble(self, iterations):
for i in range(iterations):
index = self.select_arm()
reward = self.play(index)
self.update_expectations(reward, index)
class epsilon_greedy(agent):
def __init__(self, arms, play_once=1, epsilon=0.1):
super().__init__(arms, play_once)
self.epsilon = epsilon
def __str__(self):
return "Epsilon-Greedy Agent, epsilon= "+str(self.epsilon)
def policy(self):
temp = np.zeros_like(self.expectations)
temp[np.argmax(self.expectations)] = 1-self.epsilon
ans = temp + self.epsilon/self.number_of_arms
return ans
class softmax(agent):
def __init__(self, arms, play_once=1, beta=1):
super().__init__(arms, play_once)
self.beta = beta
def __str__(self):
return "Softmax agent, beta= "+ str(self.beta)
def policy(self):
temp = np.exp(self.expectations/self.beta)
ans = temp / np.sum(temp, axis=0)
return ans
class ucb(agent):
def __init__(self, arms, play_once=1):
super().__init__(arms, play_once)
def __str__(self):
return "UCB1 agent"
def policy(self):
temp = self.expectations + np.sqrt(2*np.log(self.N)/self.times_played)
ans = np.zeros_like(temp)
ans[np.argmax(temp)] = 1
return ans
def maxreward(arms):
#Max rewards
a= [arm.mu for arm in arms]
return max(a)
def avg_reward(rewards):
ans = []
ans.append(rewards[0])
for i in range(1,len(rewards)):
ans.append(ans[i-1]+rewards[i])
for i in range(len(ans)):
ans[i]/=i+1
return ans
def cosine_similarity(a,b):
temp = a*b
temp/=(euclid_distance(a)* euclid_distance(b))
return np.sum(temp, axis=0)
def euclid_distance(a):
return np.sqrt(np.sum(a*a, axis=0))
def test(agents, iterations):
for agent in agents:
agent.gamble(iterations)
temp = [ arm.mu for arm in levers]
optimal_policy = np.zeros_like(agent.expectations)
optimal_policy[temp.index(max(temp))] = 1
avg_rewards_earned = avg_reward(agent.reward_history)
print(agent)
print("maximum possible reward:", maxreward(levers))
print("average reward:", avg_rewards_earned[-1])
print("cosine similarity" ,cosine_similarity(agent.policy(), optimal_policy))
euclid_norm = euclid_distance(agent.policy()-optimal_policy)/len(optimal_policy)
print("euclidian norm ",euclid_norm)
plt.plot(avg_rewards_earned)
plt.ylabel('Average Reward')
plt.xlabel('Iteration')
plt.show()
print("\n")
# print("optimal policy:" , optimal)
# print("learnt policy:" ,agent.policy())
# plt.scatter(range(len(agent.choice_history)),y=agent.choice_history)
# plt.title("Choices")
# plt.xlabel("time")
# plt.ylabel("arm")
# plt.show()
# print("\n")
levers = get_arms(10)
agents = [
epsilon_greedy(levers, epsilon=1),
epsilon_greedy(levers, epsilon=0),
softmax(levers, beta=0.1),
ucb(levers)
]
plt.plot([ arm.mu for arm in levers] )
plt.title("distribution of expected value of arms")
test(agents, 5000)
class softmax_with_exponentiation(agent):
def __init__(self, arms, play_once=1, beta=1, exp=1):
super().__init__(arms, play_once)
self.beta = beta
self.exp = exp
def policy(self):
temp = np.exp(self.expectations/self.beta)
ans = temp / np.sum(temp, axis=0)
ans = ans**self.exp
ans /= np.sum(ans, axis=0)
return ans
class softmax_with_reccurence(agent):
def __init__(self, arms, play_once=1, beta=1):
super().__init__(arms, play_once)
self.old_policy = np.ones_like(self.expectations)/self.l
self.beta = beta
def policy(self):
temp = np.exp(self.expectations/self.beta)
new_policy = temp / np.sum(temp, axis=0)
result = np.multiply(new_policy, self.old_policy)
result /= np.sum(result, axis=0)
self.old_policy = result
return result
class greedy_with_reccurence(agent):
# alpha = number < 1; will sum over a number of observations and will keep
# osiclating.
# alpha = N will allow the algo to converge to an arm, greedy doesn't
# really need this, kind of always give one answer.
def __init__(self, arms, play_once=1, alpha=1):
super().__init__(arms, play_once)
self.old_policy = np.ones_like(self.expectations)
self.alpha = alpha
def policy(self):
new_policy = np.zeros_like(self.expectations)
new_policy[np.argmax(self.expectations)] = 1
new_policy = (1-self.alpha)*new_policy + self.alpha*self.old_policy
new_policy /= np.sum(new_policy, axis=0)
self.old_policy = new_policy
return new_policy
# class magic(agent):
# def __init__(self, arms, play_once=1, exp=1):
# super().__init__(arms, play_once)
# self.old_policy = np.ones_like(self.expectations)/self.l
# self.exp = exp
#
# def policy(self):
# new_policy = f(old_policy, g(expectations))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Arms
Step2: Agents
Step3: Example agents
Step4: Beta-Softmax
Step5: Upper Confidence Bound (UCB1)
Step6: Metrics
Step7: Test
Step8: Experimental stuff
|
469 | <ASSISTANT_TASK:>
Python Code:
%pylab inline
from scipy.integrate import odeint
from math import sqrt, atan
# Constants
g = 9.8 # Accelaration of gravity
p = 1.2 # Density of air
# Caracteristics of the problem
m = 0.100 # A 100 g ball
r = 0.10 # 10 cm radius
Cd = 0.5 # Drag coeficient for a small spherical object
y0 = 1000.0 # Initial height of the body (1000 m)
v0 = 10.0 # Initial velocity of the body (10 m/s^2, going up)
A = math.pi*r**2 # Cross-section area of the body
sgn = lambda x: math.copysign(1, x) # Auxiliary function to calculate the sign of a number
def gm(f, t):
(y, v) = f # Extract y and v (i.e., dy/dt) from the f mapping
dy_dt = v # The differential equations
dv_dt = -1.0*g - sgn(v)*(1./2.)*(p/m)*Cd*A*v**2
return [dy_dt, dv_dt] # Return the derivatives
# Initial conditions (position and velocity)
start = [y0, v0]
# Time vector (from 0 to 5 secs)
tf = 5.0
t = linspace(0, tf, int(tf*100))
f = odeint(gm, start, t)
y = f[:, 0]
v = f[:, 1]
figure(figsize=(14, 6))
subplot(1, 2, 1, title='Velocity over time')
xlabel('Time (sec)')
ylabel('Velocity (m/sec)')
plot(t, v)
subplot(1, 2, 2, title='Height over time')
xlabel('Time (sec)')
ylabel('Height (m)')
plot(t, y)
vt = sqrt( (2.*m*g) / (p*A*Cd) )
vt
# The terminal velocity
vt_numeric = abs(min(v))
vt_numeric
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We now define the initial conditions and constants of the problem.
Step2: As said, let's define a system of ordinary differential equations in its normal form ${\bf f}' = {\bf g}({\bf f}, t)$. (In the code bellow we substitute $g()$ for $gm()$ so that it doesn't clash with the acceleration of gravity constant - $g$).
Step3: Let's define the conditions to numerically solve the problem, including a time vector
Step4: Now let's solve the equations numericaly and extract the corresponding $y(t)$ and $v(t)$
Step5: Finally, we can plot the solution.
Step6: As you can see, the velocity starts at 10 $m/s^2$, with the ball going up. Its velocity starts decreasing, goes to zero at max height, and then becomes negative as the ball starts coming down. After a while it reaches its maximum speed
Step7: Now, with our numerical simulation, the terminal velocity is
|
470 | <ASSISTANT_TASK:>
Python Code:
from pprint import pprint
import urllib.request
import os
# print date & versions
import datetime
print("Date & time:",datetime.datetime.now())
import sys
print("Python version:", sys.version)
import pbxplore as pbx
print("PBxplore version:", pbx.__version__)
names = []
pb_sequences = []
pdb_name, _ = urllib.request.urlretrieve('https://files.rcsb.org/view/2LFU.pdb', '2LFU.pdb')
for chain_name, chain in pbx.chains_from_files([pdb_name]):
dihedrals = chain.get_phi_psi_angles()
pb_seq = pbx.assign(dihedrals)
names.append(chain_name)
pb_sequences.append(pb_seq)
pprint(names)
pprint(pb_sequences)
with open('output.fasta', 'w') as outfile:
pbx.io.write_fasta(outfile, pb_sequences, names)
!cat output.fasta
!rm output.fasta
pdb_name, _ = urllib.request.urlretrieve('https://files.rcsb.org/view/2LFU.pdb', '2LFU.pdb')
with open('output.fasta', 'w') as outfile:
for chain_name, chain in pbx.chains_from_files([pdb_name]):
dihedrals = chain.get_phi_psi_angles()
pb_seq = pbx.assign(dihedrals)
pbx.io.write_fasta_entry(outfile, pb_seq, chain_name)
!cat output.fasta
!rm output.fasta
print(pb_sequences[0])
with open('output.fasta', 'w') as outfile:
for width in (60, 70, 80):
pbx.io.write_fasta_entry(outfile, pb_sequences[0],
'width={} blocks'.format(width),
width=width)
!cat output.fasta
!rm output.fasta
pdb_name, _ = urllib.request.urlretrieve('https://files.rcsb.org/view/2LFU.pdb', '2LFU.pdb')
with open('output.phipsi', 'w') as outfile:
for chain_name, chain in pbx.chains_from_files([pdb_name]):
dihedral = chain.get_phi_psi_angles()
for res in sorted(dihedral):
phi = "{:8.2f}".format(dihedral[res]["phi"]) if dihedral[res]["phi"] else " None"
psi = "{:8.2f}".format(dihedral[res]["psi"]) if dihedral[res]["psi"] else " None"
print("{} {:6d} {} {} ".format(chain_name, res, phi, psi), file=outfile)
!head output.phipsi
!tail output.phipsi
!rm output.phipsi
def pdb_to_fasta_pb(pdb_path, fasta_path):
Write a fasta file with all the PB sequences from a PDB
with open(fasta_path, 'w') as outfile:
for chain_name, chain in pbx.chains_from_files([pdb_path]):
dihedrals = chain.get_phi_psi_angles()
pb_seq = pbx.assign(dihedrals)
pbx.io.write_fasta_entry(outfile, pb_seq, chain_name)
# Write a fasta file
pdb_name, _ = urllib.request.urlretrieve('https://files.rcsb.org/view/2LFU.pdb', '2LFU.pdb')
pdb_to_fasta_pb(pdb_name, 'output.fasta')
# Read a list of headers and a list of sequences from a fasta file
names, sequences = pbx.io.read_fasta('output.fasta')
print('names:')
pprint(names)
print('sequences:')
pprint(sequences)
!rm output.fasta
# Write several fasta files
pdbname, _ = urllib.request.urlretrieve('https://files.rcsb.org/view/1BTA.pdb', '1BTA.pdb')
pdb_to_fasta_pb(pdbname, '1BTA.fasta')
pdbname, _ = urllib.request.urlretrieve('https://files.rcsb.org/view/2LFU.pdb', '2LFU.pdb')
pdb_to_fasta_pb(pdb_name, '2FLU.fasta')
# Read the fasta files
names, sequences = pbx.io.read_several_fasta(['1BTA.fasta', '2FLU.fasta'])
# Print the first entries
print('names:')
pprint(names[:5])
print('sequences:')
pprint(sequences[:5])
!rm 1BTA.fasta 2FLU.fasta
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fasta files
Step2: Sequences can be written once at a time using the pbxplore.io.write_fasta_entry() function.
Step3: By default, the lines in fasta files are wrapped at 60 caracters as defined in pbxplore.io.fasta.FASTA_WIDTH. Both pbxplore.io.write_fasta() and pbxplore.io.write_fasta_entry() have a width optionnal argument that allows to control the wrapping.
Step4: Dihedral angles
Step5: Note it's better to write the dihedral for each PDB/frame due to the high memory cost to store all of them in a list.
Step7: Read fasta files
Step8: If the sequences we want to read are spread amongst several fasta files, then we can use the pbxplore.io.read_several_fasta() function that takes a list of fasta file path as argument instead of a single path.
|
471 | <ASSISTANT_TASK:>
Python Code:
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import exists
filename = 'modsim.py'
if not exists(filename):
from urllib.request import urlretrieve
url = 'https://raw.githubusercontent.com/AllenDowney/ModSim/main/'
local, _ = urlretrieve(url+filename, filename)
print('Downloaded ' + local)
# import functions from modsim
from modsim import *
import os
filename = 'World_population_estimates.html'
if not os.path.exists(filename):
!wget https://raw.githubusercontent.com/AllenDowney/ModSimPy/master/data/World_population_estimates.html
from pandas import read_html
tables = read_html(filename, header=0, index_col=0, decimal='M')
table2 = tables[2]
table2.columns = ['census', 'prb', 'un', 'maddison',
'hyde', 'tanton', 'biraben', 'mj',
'thomlinson', 'durand', 'clark']
from modsim import TimeSeries
def run_simulation(system, growth_func):
results = TimeSeries()
results[system.t_0] = system.p_0
for t in range(system.t_0, system.t_end):
growth = growth_func(results[t], t, system)
results[t+1] = results[t] + growth
return results
def growth_func_quad(pop, t, system):
return system.alpha * pop + system.beta * pop**2
census = table2.census / 1e9
un = table2.un / 1e9
from modsim import System
t_0 = census.index[0]
p_0 = census[t_0]
system = System(t_0=t_0,
p_0=p_0,
t_end=2100)
system.alpha = 25 / 1000
system.beta = -1.8 / 1000
results = run_simulation(system, growth_func_quad)
from modsim import show
show(results.tail())
from modsim import decorate
results.plot(color='gray', label='model')
decorate(xlabel='Year',
ylabel='World population (billion)',
title='Quadratic Model Projection')
table3 = tables[3]
table3.head()
table3.columns = ['census', 'prb', 'un']
def plot_projections(table):
Plot world population projections.
table: DataFrame with columns 'un' and 'census'
census_proj = table.census.dropna() / 1e9
un_proj = table.un.dropna() / 1e9
census_proj.plot(style=':', label='US Census')
un_proj.plot(style='--', label='UN DESA')
decorate(xlabel='Year',
ylabel='World population (billion)')
plot_projections(table3)
results.plot(color='gray', label='model')
decorate(title='Quadratic Model Projection')
diff = census.diff()
diff.head()
alpha = census.diff() / census
alpha.head()
def plot_alpha():
alpha_census = census.diff() / census
alpha_census.plot(style='.', label='US Census')
alpha_un = un.diff() / un
alpha_un.plot(style='.', label='UN DESA')
decorate(xlabel='Year', label='Net growth rate')
plot_alpha()
def alpha_func(t):
intercept = 0.02
slope = -0.00021
return intercept + slope * (t - 1970)
from numpy import linspace
t_array = linspace(1960, 2020, 5)
alpha_array = alpha_func(t_array)
from matplotlib.pyplot import plot
plot_alpha()
plot(t_array, alpha_array, color='gray')
# Solution
t_0 = 1960
t_end = 2100
p_0 = census[t_0]
# Solution
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
alpha_func=alpha_func)
# Solution
def growth_func_alpha(pop, t, system):
return system.alpha_func(t) * pop
# Solution
growth_func_alpha(p_0, t_0, system)
# Solution
results2 = run_simulation(system, growth_func_alpha);
# Solution
plot_projections(table3)
results2.plot(color='gray', label='model')
decorate(title='Proportional model, linearly decreasing rate')
# Solution
# If the net growth rate continues to decrease linearly,
# world population will peak around 2065 at about 9.8 billion,
# and then start to decline.
# Solution
results.idxmax(), results.max()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the previous chapter we developed a quadratic model of world
Step2: And here's the code that reads table2, which contains world populations estimates from the U.S. Census and U.N. DESA, among other organizations.
Step3: Generating Projections
Step4: And here are the results.
Step5: According to the model, population growth will slow gradually after 2020, approaching 12.5 billion by 2100.
Step6: Some values are NaN, which indicates missing data, because some organizations did not publish projections for some years.
Step8: The following function plots projections from the U.N. DESA and U.S. Census. It uses dropna to remove the NaN values from each series before plotting it.
Step9: Here are their projections compared to the results of the quadratic model.
Step10: The U.N. DESA expects the world population to reach 11 billion around 2100, and then level off.
Step11: The first element is NaN because we don't have the data for 1945, so we can't compute the first difference.
Step12: The following function computes and plots the growth rates for the census and un estimates
Step13: And here's what it looks like.
Step14: Other than a bump around 1990, net growth rate has been declining roughly linearly since 1970.
Step15: To see what it looks like, I'll create an array of time stamps from 1960 to 2020 and use alpha_func to compute the corresponding growth rates.
Step16: Here's what it looks like, compared to the data.
Step17: If you don't like the slope and intercept I chose, feel free to adjust them.
|
472 | <ASSISTANT_TASK:>
Python Code:
import theano
import os, sys
sys.path.insert(1, os.path.join('utils'))
from __future__ import print_function, division
path = 'data/statefarm/'
import utils; reload(utils)
from utils import *
batch_size=16
vgg = Vgg16()
model = vgg.model
last_conv_idx = [i for i, l in enumerate(model.layers) if type(l) is Convolution2D][-1]
conv_layers = model.layers[:last_conv_idx + 1]
conv_model = Sequential(conv_layers)
gen = image.ImageDataGenerator()
test_batches = get_batches(path + 'test', batch_size=batch_size, shuffle=False)
# conv_test_feat = conv_model.predict_generator(test_batches, test_batches.nb_sample)
fname = path + 'results/conv_test_feat.dat'
%rm -r $fname
for i in xrange(test_batches.n // batch_size + 1):
conv_test_feat = conv_model.predict_on_batch(test_batches.next()[0])
if not i:
c = bcolz.carray(conv_feat, rootdir= path + '/results/conv_test_feat.dat', mode='a')
else:
c.append(conv_feat)
c.flush()
# apparently you can just open a (massive) bcolz carray this way
# without crashing memory... okay I'm learning things
# carr = bcolz.open(fname)
# forgot to add the '+1' so missed the last 14 images. Doing that here:
# NOTE: below code only adds on the missed batch
# iterate generator until final missed batch, then work:
fname = path + 'results/conv_test_feat.dat'
test_batches.reset()
iters = test_batches.n // batch_size
for i in xrange(iters): test_batches.next()
conv_test_feat = conv_model.predict_on_batch(test_batches.next()[0])
# c = bcolz.carray(conv_test_feat, rootdir=fname, mode='a')
c = bcolz.open(fname)
c.append(conv_test_feat)
c.flush()
fname = path + 'results/conv_test_feat.dat'
x = bcolz.open(fname)
len(x)
# conv_train_feat_batches = get_batches(path + '/results/conv_feat.dat')
# conv_valid_feat_batches = get_batches(path + '/results/conv_val_feat.dat')
conv_trn_feat = load_array(path + '/results/conv_feat.dat')
conv_val_feat = load_array(path + '/results/conv_val_feat.dat')
(val_classes, trn_classes, val_labels, trn_labels,
val_filenames, filenames, test_filenames) = get_classes(path)
p = 0.8
bn_model = Sequential([
MaxPooling2D(input_shape=conv_layers[-1].output_shape[1:]),
Flatten(),
Dropout(p/2),
Dense(128, activation='relu'),
BatchNormalization(),
Dropout(p/2),
Dense(128, activation='relu'),
BatchNormalization(),
Dropout(p),
Dense(10, activation='softmax')
])
bn_model.compile(Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['accuracy'])
# Sequential.fit_generator(self, generator, samples_per_epoch, nb_epoch, verbose=1, callbacks=None, validation_data=None, nb_val_samples=None, class_weight=None, max_q_size=10, nb_worker=1, pickle_safe=False, initial_epoch=0, **kwargs)
# bn_model.fit_generator((conv_train_feat_batches, trn_labels), conv_train_feat_batches.nb_sample, nb_epoch=1,
# validation_data=(conv_valid_feat_batches, val_labels), nb_val_samples=conv_valid_feat_batches.nb_sample)
bn_model.fit(conv_trn_feat, trn_labels, batch_size=batch_size, nb_epoch=1,
validation_data = (conv_val_feat, val_labels))
bn_model.optimizer.lr=1e-2
bn_model.fit(conv_trn_feat, trn_labels, batch_size=batch_size, nb_epoch=4,
validation_data = (conv_val_feat, val_labels))
# bn_model.save_weights(path + 'models/da_conv8.h5')
bn_model.load_weights(path + 'models/da_conv8.h5')
# conv_test_feat_batches = bcolz.iterblocks(path + fname)
fname = path + 'results/conv_test_feat.dat'
idx, inc = 0, 4096
preds = []
while idx < test_batches.n - inc:
conv_test_feat = bcolz.open(fname)[idx:idx+inc]
idx += inc
if len(preds):
next_preds = bn_model.predict(conv_test_feat, batch_size=batch_size, verbose=0)
preds = np.concatenate([preds, next_preds])
else:
preds = bn_model.predict(conv_test_feat, batch_size=batch_size, verbose=0)
conv_test_feat = bcolz.open(fname)[idx:]
next_preds = bn_model.predict(conv_test_feat, batch_size=batch_size, verbose=0)
preds = np.concatenate([preds, next_preds])
print(len(preds))
if len(preds) != len(bcolz.open(fname)):
print("Ya done fucked up, son.")
print(81920 - 79726)
print(79726 % 4096)
print(81920 % 4096) # <-- that's yeh problem right there, kid
x = preds[len(preds) - 4096]
print(preds[-1])
print(x)
preds[0]
# ??image.ImageDataGenerator.flow_from_directory
# ??Sequential.predict()
fname = path + 'results/conv_test_feat.dat'
idx, inc = 4096, 4096
preds = []
conv_test_feat = bcolz.open(fname)[:idx]
preds = bn_model.predict(conv_test_feat, batch_size=batch_size, verbose=0)
while idx < test_batches.n - inc:
conv_test_feat = bcolz.open(fname)[idx:idx+inc]
idx += inc
next_preds = bn_model.predict(conv_test_feat, batch_size=batch_size, verbose=0)
preds = np.concatenate([preds, next_preds])
conv_test_feat = bcolz.open(fname)[idx:]
next_preds = bn_model.predict(conv_test_feat, batch_size=batch_size, verbose=0)
preds = np.concatenate([preds, next_preds])
print(len(preds))
if len(preds) != len(bcolz.open(fname)):
print("Ya done fucked up, son.")
def do_clip(arr, mx): return np.clip(arr, (1-mx)/9, mx)
subm = do_clip(preds, 0.93)
subm_name = path + 'results/subm01.gz'
trn_batches = get_batches(path + 'train', batch_size=batch_size, shuffle=False)
# make sure training batches defined before this:
classes = sorted(trn_batches.class_indices, key=trn_batches.class_indices.get)
import pandas as pd
submission = pd.DataFrame(subm, columns=classes)
submission.insert(0, 'img', [f[8:] for f in test_filenames])
submission.head()
submission.to_csv(subm_name, index=False, compression='gzip')
from IPython.display import FileLink
FileLink(subm_name)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Manual iteration through test image to generate convolutional test features. Saves each batch to disk insetad of loading in memory.
Step2: I think conv_feat below should be conv_test_feat
Step3: Question
Step4: As expected (& which motivated this) the full set of convolutional test features does not fit at once in memory.
Step5: Loading train/valid features; defining & fitting NN model
Step6: Made a mistake on the last loop above. The penultimate batch -- the last full 4096-image batch -- was added onto the end of the predictions array twice. The final 2194 image predictions were never run.
Step7: Redoing predictions here
Step8: Oh I forgot, predictions through a FC NN are fast. CNNs are where it takes a long time.
|
473 | <ASSISTANT_TASK:>
Python Code:
all_crime_tipos.head(10)
all_crime_tipos_top10 = all_crime_tipos.head(10)
all_crime_tipos_top10.plot(kind='barh', figsize=(12,6), color='#3f3fff')
plt.title('Top 10 crimes por tipo (Mar 2017)')
plt.xlabel('Número de crimes')
plt.ylabel('Crime')
plt.tight_layout()
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
plt.show()
all_crime_tipos
group_df_marco = df_marco.groupby('CLUSTER')
crimes = group_df_marco['NATUREZA DA OCORRÊNCIA'].count()
crimes.plot(kind='barh', figsize=(10,7), color='#3f3fff')
plt.title('Número de crimes por região (Mar 2017)')
plt.xlabel('Número')
plt.ylabel('Região')
plt.tight_layout()
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
plt.show()
regioes = df_marco.groupby('CLUSTER').count()
grupo_de_regioes = regioes.sort_values('NATUREZA DA OCORRÊNCIA', ascending=False)
grupo_de_regioes['TOTAL'] = grupo_de_regioes.ID
top_5_regioes_qtd = grupo_de_regioes.TOTAL.head(6)
top_5_regioes_qtd.plot(kind='barh', figsize=(10,4), color='#3f3fff')
plt.title('Top 5 regiões com mais crimes')
plt.xlabel('Número de crimes')
plt.ylabel('Região')
plt.tight_layout()
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
plt.show()
regiao_4_detalhe = df_marco[df_marco['CLUSTER'] == 4]
regiao_4_detalhe
crime_types = regiao_4_detalhe[['NATUREZA DA OCORRÊNCIA']]
crime_type_total = crime_types.groupby('NATUREZA DA OCORRÊNCIA').size()
crime_type_counts = regiao_4_detalhe[['NATUREZA DA OCORRÊNCIA']].groupby('NATUREZA DA OCORRÊNCIA').sum()
crime_type_counts['TOTAL'] = crime_type_total
all_crime_types = crime_type_counts.sort_values(by='TOTAL', ascending=False)
crimes_top_5 = all_crime_types.head(5)
crimes_top_5.plot(kind='barh', figsize=(11,3), color='#3f3fff')
plt.title('Top 5 crimes na região 4')
plt.xlabel('Número de crimes')
plt.ylabel('Crime')
plt.tight_layout()
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
plt.show()
horas_mes = df_marco.HORA.value_counts()
horas_mes_top10 = horas_mes.head(10)
horas_mes_top10.plot(kind='barh', figsize=(11,4), color='#3f3fff')
plt.title('Crimes por hora (Mar 2017)')
plt.xlabel('Número de ocorrências')
plt.ylabel('Hora do dia')
plt.tight_layout()
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
plt.show()
crime_hours = regiao_4_detalhe[['HORA']]
crime_hours_total = crime_hours.groupby('HORA').size()
crime_hours_counts = regiao_4_detalhe[['HORA']].groupby('HORA').sum()
crime_hours_counts['TOTAL'] = crime_hours_total
all_hours_types = crime_hours_counts.sort_values(by='TOTAL', ascending=False)
all_hours_types.head(5)
all_hours_types_top5 = all_hours_types.head(5)
all_hours_types_top5.plot(kind='barh', figsize=(11,3), color='#3f3fff')
plt.title('Top 5 crimes por hora na região 4')
plt.xlabel('Número de ocorrências')
plt.ylabel('Hora do dia')
plt.tight_layout()
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
plt.show()
crimes_mes = df_marco.BAIRRO.value_counts()
crimes_mes_top10 = crimes_mes.head(10)
crimes_mes_top10.plot(kind='barh', figsize=(11,4), color='#3f3fff')
plt.title('Top 10 Bairros com mais crimes (Mar 2017)')
plt.xlabel('Número de ocorrências')
plt.ylabel('Bairro')
plt.tight_layout()
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
plt.show()
messejana = df_marco[df_marco['BAIRRO'] == 'JANGURUSSU']
crime_types = messejana[['NATUREZA DA OCORRÊNCIA']]
crime_type_total = crime_types.groupby('NATUREZA DA OCORRÊNCIA').size()
crime_type_counts = messejana[['NATUREZA DA OCORRÊNCIA']].groupby('NATUREZA DA OCORRÊNCIA').sum()
crime_type_counts['TOTAL'] = crime_type_total
all_crime_types = crime_type_counts.sort_values(by='TOTAL', ascending=False)
all_crime_tipos_5 = all_crime_types.head(5)
all_crime_tipos_5.plot(kind='barh', figsize=(15,4), color='#3f3fff')
plt.title('Top 5 crimes no Jangurussú')
plt.xlabel('Número de Crimes')
plt.ylabel('Crime')
plt.tight_layout()
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
plt.show()
crime_types_bairro = regiao_4_detalhe[['BAIRRO']]
crime_type_total_bairro = crime_types_bairro.groupby('BAIRRO').size()
crime_type_counts_bairro = regiao_4_detalhe[['BAIRRO']].groupby('BAIRRO').sum()
crime_type_counts_bairro['TOTAL'] = crime_type_total_bairro
all_crime_types_bairro = crime_type_counts_bairro.sort_values(by='TOTAL', ascending=False)
crimes_top_5_bairro = all_crime_types_bairro.head(5)
crimes_top_5_bairro.plot(kind='barh', figsize=(11,3), color='#3f3fff')
plt.title('Top 5 bairros na região 4')
plt.xlabel('Quantidade')
plt.ylabel('Bairro')
plt.tight_layout()
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
plt.show()
bom_jardim = df_marco[df_marco['BAIRRO'] == 'BOM JARDIM']
crime_types = bom_jardim[['NATUREZA DA OCORRÊNCIA']]
crime_type_total = crime_types.groupby('NATUREZA DA OCORRÊNCIA').size()
crime_type_counts = bom_jardim[['NATUREZA DA OCORRÊNCIA']].groupby('NATUREZA DA OCORRÊNCIA').sum()
crime_type_counts['TOTAL'] = crime_type_total
all_crime_types = crime_type_counts.sort_values(by='TOTAL', ascending=False)
all_crime_tipos_5 = all_crime_types.head(5)
all_crime_tipos_5.plot(kind='barh', figsize=(15,4), color='#3f3fff')
plt.title('Top 5 crimes no Bom Jardim')
plt.xlabel('Número de Crimes')
plt.ylabel('Crime')
plt.tight_layout()
ax = plt.gca()
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Todas as ocorrências criminais de março
Step2: Quantidade de crimes por região
Step3: As 5 regiões com mais ocorrências
Step4: Acima podemos ver que a região 4 teve o maior número de ocorrências criminais
Step5: Uma análise sobre as 5 ocorrências mais comuns
Step6: Filtro dos 10 horários com mais ocorrências em março
Step7: Filtro dos 5 horários com mais ocorrências na região 4 (região com mais ocorrências em março)
Step8: Filtro dos 10 bairros com mais ocorrências em março
Step9: O Bairro com o maior número de ocorrências em março foi Jangurussu
Step10: Os 5 bairros mais comuns na região 4
Step11: Análise sobre o bairro Bom Jardim
|
474 | <ASSISTANT_TASK:>
Python Code:
import csv
import json
import os
import ujson
import urllib2
from riotwatcher import RiotWatcher
config = {
'key': 'API_key',
}
class RiotCrawler:
def __init__(self, key):
self.key = key
self.w = RiotWatcher(key)
self.tiers = {
'bronze': [],
'silver': [],
'gold': [],
'platinum': [],
'diamond': [],
'challenger': [],
'master': [],
}
# def get_player_list(self):
# recent_games = self.w.get_recent_games(self.player_id)
# player_list = set()
# for game in recent_games['games']:
# # only pick up ranked games
# if 'RANKED' in game['subType']:
# fellow_players = game['fellowPlayers']
# for fellow_player in fellow_players:
# fellow_player_id = fellow_player['summonerId']
# if fellow_player_id not in player_list:
# player_list.add(fellow_player_id)
# return list(player_list)
def get_player_by_tier(self, summoner_id):
request_url = 'https://na.api.pvp.net/api/lol/na/v2.5/league/by-summoner/{}?api_key={}'.format(
summoner_id, self.key
)
response = urllib2.urlopen(request_url)
tier_info = ujson.loads(response.read())
tier = tier_info[str(summoner_id)][0]['tier'].lower()
entries = tier_info[str(summoner_id)][0]['entries']
level = self.tiers[tier]
for entry in entries:
level.append(entry['playerOrTeamId'])
# for l in level:
# print 'summoner id: {}'.format(str(l))
def get_tier():
# challenger: 77759242
# platinum: 53381
# gold: 70359816
# silver: 65213225
# bronze: 22309680
# master: 22551130
# diamond: 34570626
player_ids = [70359816, 77759242, 53381, 65213225, 22309680, 22551130, 34570626]
riot_crawler = RiotCrawler(config['key'])
for player_id in player_ids:
print 'start crawling id: {}'.format(player_id)
riot_crawler.get_player_by_tier(player_id)
return riot_crawler.tiers
tiers = get_tier()
for tier, rank_dict in tiers.iteritems():
print '--- {} ---'.format(tier)
for summoner in rank_dict:
print 'summoner id: {}'.format(summoner)
print '--- end of {} ---'.format(tier)
import csv
import json
import os
import urllib2
class TopChampion:
FIELD_NAMES = ['totalSessionsPlayed', 'totalSessionsLost', 'totalSessionsWon',
'totalChampionKills', 'totalDamageDealt', 'totalDamageTaken',
'mostChampionKillsPerSession', 'totalMinionKills', 'totalDoubleKills',
'totalTripleKills', 'totalQuadraKills', 'totalPentaKills',
'totalUnrealKills', 'totalDeathsPerSession', 'totalGoldEarned',
'mostSpellsCast', 'totalTurretsKilled', 'totalPhysicalDamageDealt',
'totalMagicDamageDealt', 'totalFirstBlood', 'totalAssists',
'maxChampionsKilled', 'maxNumDeaths', 'label']
def __init__(self, key, player_id, label, n):
self.label = label
self.player_id = player_id
self.key = key
self.n = n
self.top_champions = []
pass
def get_top_champions(self):
self.top_champions[:] = []
data = urllib2.urlopen(
'https://na.api.pvp.net/api/lol/na/v1.3/stats/by-summoner/' +
self.player_id +
'/ranked?season=SEASON2016&api_key=' +
self.key
).read()
json_data = json.loads(data)
champions = json_data['champions']
champion_stats = []
for champion in champions:
champion_stat = champion['stats']
champion_stat['id'] = champion['id']
champion_stat['label'] = self.label
champion_stats.append(champion_stat)
pass
self.top_champions = sorted(champion_stats,
key=lambda x: x['totalSessionsPlayed'],
reverse=True)[1:self.n + 1]
return self.top_champions
pass
def save_top_champions(self):
for champion in self.top_champions:
file_name = '../data/{}.csv'.format(champion['id'])
if os.path.isfile(file_name):
with open(file_name, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=self.FIELD_NAMES)
writer.writerow(
{
'totalSessionsPlayed': champion['totalSessionsPlayed'],
'totalSessionsLost': champion['totalSessionsLost'],
'totalSessionsWon': champion['totalSessionsWon'],
'totalChampionKills': champion['totalChampionKills'],
'totalDamageDealt': champion['totalDamageDealt'],
'totalDamageTaken': champion['totalDamageTaken'],
'mostChampionKillsPerSession': champion['mostChampionKillsPerSession'],
'totalMinionKills': champion['totalMinionKills'],
'totalDoubleKills': champion['totalDoubleKills'],
'totalTripleKills': champion['totalTripleKills'],
'totalQuadraKills': champion['totalQuadraKills'],
'totalPentaKills': champion['totalPentaKills'],
'totalUnrealKills': champion['totalUnrealKills'],
'totalDeathsPerSession': champion['totalDeathsPerSession'],
'totalGoldEarned': champion['totalGoldEarned'],
'mostSpellsCast': champion['mostSpellsCast'],
'totalTurretsKilled': champion['totalTurretsKilled'],
'totalPhysicalDamageDealt': champion['totalPhysicalDamageDealt'],
'totalMagicDamageDealt': champion['totalMagicDamageDealt'],
'totalFirstBlood': champion['totalFirstBlood'],
'totalAssists': champion['totalAssists'],
'maxChampionsKilled': champion['maxChampionsKilled'],
'maxNumDeaths': champion['maxNumDeaths'],
'label': champion['label']
}
)
pass
pass
else:
with open(file_name, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=self.FIELD_NAMES)
writer.writeheader()
writer.writerow(
{
'totalSessionsPlayed': champion['totalSessionsPlayed'],
'totalSessionsLost': champion['totalSessionsLost'],
'totalSessionsWon': champion['totalSessionsWon'],
'totalChampionKills': champion['totalChampionKills'],
'totalDamageDealt': champion['totalDamageDealt'],
'totalDamageTaken': champion['totalDamageTaken'],
'mostChampionKillsPerSession': champion['mostChampionKillsPerSession'],
'totalMinionKills': champion['totalMinionKills'],
'totalDoubleKills': champion['totalDoubleKills'],
'totalTripleKills': champion['totalTripleKills'],
'totalQuadraKills': champion['totalQuadraKills'],
'totalPentaKills': champion['totalPentaKills'],
'totalUnrealKills': champion['totalUnrealKills'],
'totalDeathsPerSession': champion['totalDeathsPerSession'],
'totalGoldEarned': champion['totalGoldEarned'],
'mostSpellsCast': champion['mostSpellsCast'],
'totalTurretsKilled': champion['totalTurretsKilled'],
'totalPhysicalDamageDealt': champion['totalPhysicalDamageDealt'],
'totalMagicDamageDealt': champion['totalMagicDamageDealt'],
'totalFirstBlood': champion['totalFirstBlood'],
'totalAssists': champion['totalAssists'],
'maxChampionsKilled': champion['maxChampionsKilled'],
'maxNumDeaths': champion['maxNumDeaths'],
'label': champion['label']
}
)
pass
pass
pass
pass
pass
def main():
import time
tiers = get_tier()
for tier, rank_dict in tiers.iteritems():
print 'starting tier: {}'.format(tier)
for summoner_id in rank_dict:
print 'tier: {}, summoner id: {}'.format(tier, summoner_id)
top_champion = TopChampion(config['key'], summoner_id, tier, 3)
top_champion.get_top_champions()
top_champion.save_top_champions()
time.sleep(1)
print 'end tier: {}'.format(tier)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To use the Riot api, one more important thing to do is to get your own API key. API key can be obtained from here. Note that normal developr API key has a narrow request limit, whereas production API key for commercial use has a looser requirement of request limit. For now, we are just gonna use the normal API key for demonstration.
Step2: <a name="architecture"></a>Project Architecture
Step3: get_tier will return a divisioin dictionary, whose keys are the tier name, and values are the summoner id list in each tier. The results are printed in a human-readable format, categorized by tier.
Step4: <a name="mfpChampions"></a>2. Fetch most frequently played champions
Step5: Then we can move on and fetch the data we need. Riot gives us the API to get all champions that a user had used during the season. And the response will be in JSON format. After parsing the JSON response, what we need to do is to get the most frequently used champions which can represent a player's level. So we sort the champions list by the number of games that the player used this champioin (totalSessionsPlayed) in descending order. Notice that the first element in the list will always be the champion with id 0, which represents the stats data of all champions that the player used in the season. So we need to skip that.
Step6: With the above class, now we can start crawling the stats data of all champions saving them to csv files by the following code. Notice that this process is pretty slow since we added the sleep methods in our code. Riot APIs have a limitation on the API calls rate. You cannot send more than 500 requests per 10 minutes. So everytime we send a request here, we sleep for 1 second to prevent error responses.
|
475 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import scipy.stats
# Create two lists of random values
x = [1,2,3,4,5,6,7,8,9]
y = [2,1,2,4.5,7,6.5,6,9,9.5]
# Create a function that takes in x's and y's
def spearmans_rank_correlation(xs, ys):
# Calculate the rank of x's
xranks = pd.Series(xs).rank()
# Caclulate the ranking of the y's
yranks = pd.Series(ys).rank()
# Calculate Pearson's correlation coefficient on the ranked versions of the data
return scipy.stats.pearsonr(xranks, yranks)
# Run the function
spearmans_rank_correlation(x, y)[0]
# Just to check our results, here it Spearman's using Scipy
scipy.stats.spearmanr(x, y)[0]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Data
Step2: Calculate Spearman's Rank Correlation
Step3: Calculate Spearman's Correlation Using SciPy
|
476 | <ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_digits
digits = load_digits()
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(6, 6)) # figure size in inches
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# plot the digits: each image is 8x8 pixels
for i in range(64):
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest')
# label the image with the target value
ax.text(0, 7, str(digits.target[i]))
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
proj = pca.fit_transform(digits.data)
plt.scatter(proj[:, 0], proj[:, 1], c=digits.target)
plt.colorbar()
from sklearn.manifold import Isomap
iso = Isomap(n_neighbors=5, n_components=2)
proj = iso.fit_transform(digits.data)
plt.scatter(proj[:, 0], proj[:, 1], c=digits.target)
plt.colorbar()
from sklearn.naive_bayes import GaussianNB
from sklearn.cross_validation import train_test_split
# split the data into training and validation sets
X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target)
# train the model
clf = GaussianNB()
clf.fit(X_train, y_train)
# use the model to predict the labels of the test data
predicted = clf.predict(X_test)
expected = y_test
fig = plt.figure(figsize=(6, 6)) # figure size in inches
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# plot the digits: each image is 8x8 pixels
for i in range(64):
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
ax.imshow(X_test.reshape(-1, 8, 8)[i], cmap=plt.cm.binary,
interpolation='nearest')
# label the image with the target value
if predicted[i] == expected[i]:
ax.text(0, 7, str(predicted[i]), color='green')
else:
ax.text(0, 7, str(predicted[i]), color='red')
matches = (predicted == expected)
print(matches.sum())
print(len(matches))
matches.sum() / float(len(matches))
from sklearn import metrics
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll re-use some of our code from before to visualize the data and remind us what
Step2: Visualizing the Data
Step3: Here we see that the digits do cluster fairly well, so we can expect even
Step4: It can be fun to explore the various manifold learning methods available,
Step5: Question
Step6: Quantitative Measurement of Performance
Step7: We see that nearly 1500 of the 1800 predictions match the input. But there are other
Step8: Another enlightening metric for this sort of multi-label classification
|
477 | <ASSISTANT_TASK:>
Python Code:
import torch
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(8,5))
# how many time steps/data pts are in one batch of data
seq_length = 20
# generate evenly spaced data pts
time_steps = np.linspace(0, np.pi, seq_length + 1)
data = np.sin(time_steps)
data.resize((seq_length + 1, 1)) # size becomes (seq_length+1, 1), adds an input_size dimension
x = data[:-1] # all but the last piece of data
y = data[1:] # all but the first
# display the data
plt.plot(time_steps[1:], x, 'r.', label='input, x') # x
plt.plot(time_steps[1:], y, 'b.', label='target, y') # y
plt.legend(loc='best')
plt.show()
class RNN(nn.Module):
def __init__(self, input_size, output_size, hidden_dim, n_layers):
super(RNN, self).__init__()
self.hidden_dim=hidden_dim
# define an RNN with specified parameters
# batch_first means that the first dim of the input and output will be the batch_size
self.rnn = nn.RNN(input_size, hidden_dim, n_layers, batch_first=True)
# last, fully-connected layer
self.fc = nn.Linear(hidden_dim, output_size)
def forward(self, x, hidden):
# x (batch_size, seq_length, input_size)
# hidden (n_layers, batch_size, hidden_dim)
# r_out (batch_size, time_step, hidden_size)
batch_size = x.size(0)
# get RNN outputs
r_out, hidden = self.rnn(x, hidden)
# shape output to be (batch_size*seq_length, hidden_dim)
r_out = r_out.view(-1, self.hidden_dim)
# get final output
output = self.fc(r_out)
return output, hidden
# test that dimensions are as expected
test_rnn = RNN(input_size=1, output_size=1, hidden_dim=10, n_layers=2)
# generate evenly spaced, test data pts
time_steps = np.linspace(0, np.pi, seq_length)
data = np.sin(time_steps)
data.resize((seq_length, 1))
test_input = torch.Tensor(data).unsqueeze(0) # give it a batch_size of 1 as first dimension
print('Input size: ', test_input.size())
# test out rnn sizes
test_out, test_h = test_rnn(test_input, None)
print('Output size: ', test_out.size())
print('Hidden state size: ', test_h.size())
# decide on hyperparameters
input_size=1
output_size=1
hidden_dim=32
n_layers=1
# instantiate an RNN
rnn = RNN(input_size, output_size, hidden_dim, n_layers)
print(rnn)
# MSE loss and Adam optimizer with a learning rate of 0.01
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(rnn.parameters(), lr=0.01)
# train the RNN
def train(rnn, n_steps, print_every):
# initialize the hidden state
hidden = None
for batch_i, step in enumerate(range(n_steps)):
# defining the training data
time_steps = np.linspace(step * np.pi, (step+1)*np.pi, seq_length + 1)
data = np.sin(time_steps)
data.resize((seq_length + 1, 1)) # input_size=1
x = data[:-1]
y = data[1:]
# convert data into Tensors
x_tensor = torch.Tensor(x).unsqueeze(0) # unsqueeze gives a 1, batch_size dimension
y_tensor = torch.Tensor(y)
# outputs from the rnn
prediction, hidden = rnn(x_tensor, hidden)
## Representing Memory ##
# make a new variable for hidden and detach the hidden state from its history
# this way, we don't backpropagate through the entire history
hidden = hidden.data
# calculate the loss
loss = criterion(prediction, y_tensor)
# zero gradients
optimizer.zero_grad()
# perform backprop and update weights
loss.backward()
optimizer.step()
# display loss and predictions
if batch_i%print_every == 0:
print('Loss: ', loss.item())
plt.plot(time_steps[1:], x, 'r.') # input
plt.plot(time_steps[1:], prediction.data.numpy().flatten(), 'b.') # predictions
plt.show()
return rnn
# train the rnn and monitor results
n_steps = 75
print_every = 15
trained_rnn = train(rnn, n_steps, print_every)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the RNN
Step2: Check the input and output dimensions
Step3: Training the RNN
Step4: Loss and Optimization
Step5: Defining the training function
|
478 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
xpoints=512 #nr of grid points in 1 direction
xmax=1 #extension of grid [m]
pref=9e9 # 1/(4pi eps0)
x=np.linspace(-xmax,xmax,xpoints)
y=x
[x2d,y2d]=np.meshgrid(x,y,indexing='ij') #2D matrices holding x or y coordinate for each point on the grid
#define multipole
npoles=6 #number of poles, needs to be even
fradius=0.5*xmax #field radius
sradius=0.1*xmax #radius of spheres making up the multipole
vamp=1 #voltage amplitude on multipole (half of the poles have +vamp, other half has -vamp)
def multipolepotential(x,y,z,npoles,v,fradius,sradius):
#assume a set of n conducting spheres of radius on a circle of radius fradius (field radius)
#npoles is number of poles and needs to be even >0
#the spheres are positioned in the xy plane and have a potential of V for the even spheres and -V for the odd spheres
out=np.zeros(x.shape)
potentialin=np.zeros(x.shape)
potential=np.zeros(x.shape)
theta=np.linspace(0,2*np.pi,npoles+1)
if(npoles % 2) == 0:
for nid in range(npoles):
#make a superposition of the potential for each of the spheres
vin=v*(-1.0)**nid
xn=fradius*np.cos(theta[nid])
yn=fradius*np.sin(theta[nid])
r=np.sqrt(np.square(x-xn)+np.square(y-yn)+np.square(z)) #distance to sphere n
in1=r<sradius #logical function 1 if inside sphere, 0 if outside
out1=r>=sradius #logical function 0 if inside sphere, 1 if outside
potential=potential+vin*sradius*np.multiply(np.power(r,-1),out1)
out=out+out1
potentialin=potentialin+vin*in1
#do a rescaling to match potential as the superposition changes the actual potential on the spheres slighlty
idin=np.where(potentialin)
idout=np.where(out)
potential[idin]=potentialin[idin]
potential[idout]=v*(potential[idout]/np.max(potential[idout]))
else:
potential=None
#undefined
return potential
v=multipolepotential(x2d,y2d,np.zeros(x2d.shape),npoles,vamp,fradius,sradius)
ex,ey=np.gradient(-v,x,y) #strange ordering due to meshgrid
e=np.sqrt(ex**2+ey**2)
#show vector plot, but limit number of points to keep the number of vector reasonable
skippts=20
skip=(slice(None,None,skippts),slice(None,None,skippts)) #dont plot all points in a quiver as this becomes unreadable
plt.quiver(x2d[skip],y2d[skip],ex[skip],ey[skip])
plt.title('electric field')
plt.xlabel('x')
plt.ylabel('y')
plt.axis('square')
plt.imshow(e,extent=[-xmax, xmax, -xmax, xmax])
plt.title('electric field and fieldlines')
plt.xlabel('x');
plt.ylabel('y');
plt.streamplot(x2d,x2d,ey,ex)
plt.axis('square')
plt.colorbar
plt.show()
plt.imshow(v,extent=[-xmax, xmax, -xmax, xmax])
plt.title('electrostatic potential V')
plt.xlabel('x')
plt.ylabel('y')
plt.axis('square')
plt.colorbar()
plt.show()
nlines=50;
plt.contour(x2d,y2d,v,nlines)
plt.title('equipotential surfaces')
plt.xlabel('x')
plt.ylabel('y')
plt.axis('square')
plt.colorbar
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: create grid to plot (choose 2D plane for visualisation cutting through charge centers , but calculation is correct for 3D)
Step2: calculate the potential of the set of spheres (use a function that we can reuse later)
Step3: And now its showtime!
Step4: Note how the field emanates from the positive charge sinks into the negative charge
Step5: Note the interesting npoles/2 fold symmetry of the field
|
479 | <ASSISTANT_TASK:>
Python Code:
!wget https://d17h27t6h515a5.cloudfront.net/topher/2016/December/584f6edd_data/data.zip -O data.zip
!unzip -q data.zip
!mv data udacity_data
!rm -rf __MACOSX/
!unzip -q data_val.zip
!unzip -q data_test.zip
DATA_TRAIN_FOLDER = 'udacity_data/'
DATA_VAL_FOLDER = 'data_val/'
DATA_TEST_FOLDER = 'data_test/'
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
import seaborn
%matplotlib inline
train_source = pd.read_csv(DATA_TRAIN_FOLDER + 'driving_log.csv')
train_nonzero = train_source[train_source.steering != 0]
train_zero = (train_source[train_source.steering == 0]).sample(frac=.1)
data_train = pd.concat([train_nonzero, train_zero], ignore_index=True)
data_train.steering.hist(bins=100)
names = ['center', 'left', 'right', 'steering', 'throttle', 'brake', 'speed']
data_val = pd.read_csv(DATA_VAL_FOLDER + 'driving_log.csv', names=names)
data_test = pd.read_csv(DATA_TEST_FOLDER + 'driving_log.csv', names=names)
data_val.steering.hist(bins=30)
data_test.steering.hist(bins=30)
len(data_train), len(data_val), len(data_test)
input_shape = (64, 64, 3)
def _get_img_and_steering_from_row(row, folder):
imgpath = row.center.values[0]
imgpath = imgpath[imgpath.find('IMG'):]
img = cv2.imread(folder + imgpath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
steering = row.steering.values[0]
return img, steering
def _get_img_and_steering_from_iloc_row(row, folder):
imgpath = row.center
imgpath = imgpath[imgpath.find('IMG'):]
img = cv2.imread(folder + imgpath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
steering = row.steering
return img, steering
def _get_flipped_image(img, y):
img = cv2.flip(img, 1)
return img, -y
def _get_brightnessed_image(img):
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
random_bright = .25 + np.random.uniform()
img[:,:,2] = img[:,:,2] * random_bright
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
return img
def _get_translated_image(img, y, trans_range):
rows, cols, _ = img.shape
tr_x = trans_range * np.random.uniform() - trans_range/2
y = y + tr_x/trans_range * 2 *.4
tr_y = 10 * np.random.uniform() - 10/2
Trans_M = np.float32([[1,0, tr_x], [0,1, tr_y]])
img = cv2.warpAffine(img, Trans_M, (cols, rows))
return img, y
def image_augmentation(img, y):
if np.random.uniform() < 0.5:
img, y = _get_flipped_image(img, y)
img = _get_brightnessed_image(img)
img, y = _get_translated_image(img, y, 100)
return img, y
def image_transformation(img):
img = img[60:-20,:,:]
img = cv2.resize(img, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA)
return img
row = data_train.sample()
img, steering = _get_img_and_steering_from_row(row, DATA_TRAIN_FOLDER)
img, steering = image_augmentation(img, steering)
img = image_transformation(img)
plt.imshow(img)
plt.axis('off')
def train_data_generator(batch_size):
while True:
X = np.zeros((batch_size, *input_shape), dtype=np.float32)
y = np.zeros((batch_size,), dtype=np.float32)
for idx in range(batch_size):
row = data_train.sample()
img, steering = _get_img_and_steering_from_row(row, DATA_TRAIN_FOLDER)
img, steering = image_augmentation(img, steering)
img = image_transformation(img)
X[idx], y[idx] = img, steering
yield X, y
def val_data_generator(batch_size):
seq_idx = 0
while True:
X = np.zeros((batch_size, *input_shape), dtype=np.float32)
y = np.zeros((batch_size,), dtype=np.float32)
for idx in range(batch_size):
row = data_val.iloc[seq_idx]
img, steering = _get_img_and_steering_from_iloc_row(row, DATA_VAL_FOLDER)
img = image_transformation(img)
X[idx], y[idx] = img, steering
seq_idx += 1
if seq_idx == len(data_val):
seq_idx = 0
yield X, y
def test_data_generator(batch_size):
seq_idx = 0
while True:
X = np.zeros((batch_size, *input_shape), dtype=np.float32)
y = np.zeros((batch_size,), dtype=np.float32)
for idx in range(batch_size):
row = data_test.iloc[seq_idx]
img, steering = _get_img_and_steering_from_iloc_row(row, DATA_TEST_FOLDER)
img = image_transformation(img)
X[idx], y[idx] = img, steering
seq_idx += 1
if seq_idx == len(data_test):
seq_idx = 0
yield X, y
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, ELU, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint
def get_vivek_model():
model = Sequential()
model.add(Lambda(lambda x: x/127.5 - 1., input_shape=input_shape))
model.add(Convolution2D(3,1,1, border_mode='valid', name='conv0', init='he_normal'))
model.add(Convolution2D(32,3,3, border_mode='valid', name='conv1', init='he_normal'))
model.add(ELU())
model.add(Convolution2D(32,3,3, border_mode='valid', name='conv2', init='he_normal'))
model.add(ELU())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))
model.add(Convolution2D(64,3,3, border_mode='valid', name='conv3', init='he_normal'))
model.add(ELU())
model.add(Convolution2D(64,3,3, border_mode='valid', name='conv4', init='he_normal'))
model.add(ELU())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))
model.add(Convolution2D(128,3,3, border_mode='valid', name='conv5', init='he_normal'))
model.add(ELU())
model.add(Convolution2D(128,3,3, border_mode='valid', name='conv6', init='he_normal'))
model.add(ELU())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512,name='hidden1', init='he_normal'))
model.add(ELU())
model.add(Dropout(0.5))
model.add(Dense(64,name='hidden2', init='he_normal'))
model.add(ELU())
model.add(Dropout(0.5))
model.add(Dense(16,name='hidden3',init='he_normal'))
model.add(ELU())
model.add(Dropout(0.5))
model.add(Dense(1, name='output', init='he_normal'))
model.compile(optimizer="adam", loss="mse")
return model
model = get_vivek_model()
model.fit_generator(
train_data_generator(50),
samples_per_epoch=20000,
nb_epoch=10,
validation_data=val_data_generator(250),
nb_val_samples=750,
callbacks=[ModelCheckpoint(filepath="best_validation.h5", verbose=1, save_best_only=True)]
)
print('Latest', model.evaluate_generator(val_data_generator(250), 750))
model.load_weights('best_validation.h5')
print('Best', model.evaluate_generator(val_data_generator(250), 750))
model.evaluate_generator(test_data_generator(50), 950)
def get_samples(log, folder):
for _ in range(10):
row = log.sample()
imgpath = row.center.values[0]
imgpath = imgpath[imgpath.find('IMG'):]
img = cv2.imread(folder + imgpath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = image_transformation(img)
print(row.steering.values[0], model.predict(np.expand_dims(img, axis=0))[0])
get_samples(data_val, DATA_VAL_FOLDER)
get_samples(data_test, DATA_TEST_FOLDER)
import json
with open('model.json', 'w') as f:
f.write(model.to_json())
model.save_weights('model.h5')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unzip prepared validation and training sets
Step2: Step 1 -- load data and visualize
Step3: Step 2 data generators
Step4: Step 2 -- define the model
Step5: I also tried NVIDIA, commaai and VGG-like architecture, as well as several custom ones, but this one, provided by Vivek Yadav is actually the best one for this task. Big thanks!
Step6: Step 3 -- train!
Step7: Step 4 -- evaluation
Step8: Step 5 -- save and use
|
480 | <ASSISTANT_TASK:>
Python Code:
np.random.seed(0)
X0 = sp.stats.norm(-2, 1).rvs(40)
X1 = sp.stats.norm(+2, 1).rvs(60)
X = np.hstack([X0, X1])[:, np.newaxis]
y0 = np.zeros(40)
y1 = np.ones(60)
y = np.hstack([y0, y1])
sns.distplot(X0, rug=True, kde=False, norm_hist=True, label="class 0")
sns.distplot(X1, rug=True, kde=False, norm_hist=True, label="class 1")
plt.legend()
plt.xlim(-6,6)
plt.show()
from sklearn.naive_bayes import GaussianNB
clf_norm = GaussianNB().fit(X, y)
clf_norm.classes_
clf_norm.class_count_
clf_norm.class_prior_
clf_norm.theta_, clf_norm.sigma_
xx = np.linspace(-6, 6, 100)
p0 = sp.stats.norm(clf_norm.theta_[0], clf_norm.sigma_[0]).pdf(xx)
p1 = sp.stats.norm(clf_norm.theta_[1], clf_norm.sigma_[1]).pdf(xx)
sns.distplot(X0, rug=True, kde=False, norm_hist=True, color="r", label="class 0 histogram")
sns.distplot(X1, rug=True, kde=False, norm_hist=True, color="b", label="class 1 histogram")
plt.plot(xx, p0, c="r", label="class 0 est. pdf")
plt.plot(xx, p1, c="b", label="class 1 est. pdf")
plt.legend()
plt.show()
x_new = -1
clf_norm.predict_proba([[x_new]])
px = sp.stats.norm(clf_norm.theta_, np.sqrt(clf_norm.sigma_)).pdf(x_new)
px
p = px.flatten() * clf_norm.class_prior_
p
clf_norm.class_prior_
p / p.sum()
np.random.seed(0)
X = np.random.randint(2, size=(10, 4))
y = np.array([0,0,0,0,1,1,1,1,1,1])
print(X)
print(y)
from sklearn.naive_bayes import BernoulliNB
clf_bern = BernoulliNB().fit(X, y)
clf_bern.classes_
clf_bern.class_count_
np.exp(clf_bern.class_log_prior_)
fc = clf_bern.feature_count_
fc
fc / np.repeat(clf_bern.class_count_[:, np.newaxis], 4, axis=1)
theta = np.exp(clf_bern.feature_log_prob_)
theta
x_new = np.array([1, 1, 0, 0])
clf_bern.predict_proba([x_new])
p = ((theta**x_new)*(1-theta)**(1-x_new)).prod(axis=1)*np.exp(clf_bern.class_log_prior_)
p / p.sum()
x_new = np.array([0, 0, 1, 1])
clf_bern.predict_proba([x_new])
p = ((theta**x_new)*(1-theta)**(1-x_new)).prod(axis=1)*np.exp(clf_bern.class_log_prior_)
p / p.sum()
from sklearn.naive_bayes import MultinomialNB
clf_mult = MultinomialNB().fit(X, y)
clf_mult.classes_
clf_mult.class_count_
fc = clf_mult.feature_count_
fc
fc / np.repeat(fc.sum(axis=1)[:, np.newaxis], 4, axis=1)
clf_mult.alpha
(fc + clf_mult.alpha) / (np.repeat(fc.sum(axis=1)[:, np.newaxis], 4, axis=1) + clf_mult.alpha * X.shape[1])
theta = np.exp(clf_mult.feature_log_prob_)
theta
x_new = np.array([21, 35, 29, 14])
clf_mult.predict_proba([x_new])
p = (theta**x_new).prod(axis=1)*np.exp(clf_bern.class_log_prior_)
p / p.sum()
x_new = np.array([18, 24, 35, 24])
clf_mult.predict_proba([x_new])
from sklearn.datasets import fetch_20newsgroups
from sklearn.cross_validation import train_test_split
news = fetch_20newsgroups(subset="all")
X_train, X_test, y_train, y_test = train_test_split(news.data, news.target, test_size=0.1, random_state=1)
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer, CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
clf_1 = Pipeline([
('vect', CountVectorizer()),
('clf', MultinomialNB()),
])
clf_2 = Pipeline([
('vect', TfidfVectorizer()),
('clf', MultinomialNB()),
])
clf_3 = Pipeline([
('vect', TfidfVectorizer(token_pattern=r"\b[a-z0-9_\-\.]+[a-z][a-z0-9_\-\.]+\b")),
('clf', MultinomialNB()),
])
clf_4 = Pipeline([
('vect', TfidfVectorizer(stop_words="english",
token_pattern=r"\b[a-z0-9_\-\.]+[a-z][a-z0-9_\-\.]+\b")),
('clf', MultinomialNB()),
])
clf_5 = Pipeline([
('vect', TfidfVectorizer(stop_words="english",
token_pattern=ur"\b[a-z0-9_\-\.]+[a-z][a-z0-9_\-\.]+\b")),
('clf', MultinomialNB(alpha=0.01)),
])
from sklearn.cross_validation import cross_val_score, KFold
from scipy.stats import sem
for i, clf in enumerate([clf_1, clf_2, clf_3, clf_4, clf_5]):
scores = cross_val_score(clf, X_test, y_test, cv=5)
print(("Model {0:d}: Mean score: {1:.3f} (+/-{2:.3f})").format(i, np.mean(scores), sem(scores)))
import codecs
def read_data(filename):
with codecs.open(filename, encoding='utf-8', mode='r') as f:
data = [line.split('\t') for line in f.read().splitlines()]
data = data[1:] # header 제외
return data
train_data = read_data('/home/dockeruser/data/nsmc/ratings_train.txt')
test_data = read_data('/home/dockeruser/data/nsmc/ratings_test.txt')
X = zip(*train_data)[1]
y = zip(*train_data)[2]
y = np.array(y, dtype=int)
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=10000, test_size=10000)
len(X_train), len(X_test)
from konlpy.utils import pprint
pprint((X[0], y[0]))
from konlpy.tag import Twitter
pos_tagger = Twitter()
%%time
def tokenize(doc):
return ['/'.join(t) for t in pos_tagger.pos(doc, norm=True, stem=True)]
train_docs = [(tokenize(row[1]), row[2]) for row in train_data[:10000]]
tokens = [t for d in train_docs for t in d[0]]
import nltk
text = nltk.Text(tokens, name='NMSC')
mpl.rcParams["font.family"] = "NanumGothic"
plt.figure(figsize=(12,10))
text.plot(50)
plt.show()
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
clf_1 = Pipeline([
('vect', CountVectorizer()),
('clf', MultinomialNB()),
])
%%time
clf_1.fit(X_train, y_train)
pprint(list(clf_1.named_steps["vect"].vocabulary_)[:10])
%%time
print(classification_report(y_test, clf_1.predict(X_test)))
from sklearn.feature_extraction.text import TfidfVectorizer
clf_2 = Pipeline([
('vect', TfidfVectorizer()),
('clf', MultinomialNB()),
])
%%time
clf_2.fit(X_train, y_train)
%%time
print(classification_report(y_test, clf_2.predict(X_test)))
from konlpy.tag import Twitter
pos_tagger = Twitter()
def tokenize_pos(doc):
return ['/'.join(t) for t in pos_tagger.pos(doc, norm=True, stem=True)]
clf_3 = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize_pos)),
('clf', MultinomialNB()),
])
%%time
clf_3.fit(X_train, y_train)
pprint(list(clf_3.named_steps["vect"].vocabulary_)[:10])
%%time
print(classification_report(y_test, clf_3.predict(X_test), digits=4))
vect3 = clf_3.named_steps["vect"]
idx3 = np.array(np.argsort(vect3.transform(X_train).sum(axis=0)))[0]
voca3 = np.array(vect3.get_feature_names()).flatten()
pprint(voca3[idx3[-20:]].tolist())
clf_4 = Pipeline([
('vect', TfidfVectorizer(tokenizer=tokenize_pos, ngram_range=(1,2))),
('clf', MultinomialNB()),
])
%%time
clf_4.fit(X_train, y_train)
%%time
print(classification_report(y_test, clf_4.predict(X_test), digits=4))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 베르누이 분포 나이브 베이즈 모형
Step2: 다항 분포 나이브 베이즈 모형
Step3: 예 1
Step4: 감성 분석 Sentiment Analysis
Step5: CountVectorize 사용
Step6: TfidfVectorizer 사용
Step7: 형태소 분석기 사용
Step8: 최적화
|
481 | <ASSISTANT_TASK:>
Python Code:
class Module(object):
def __init__ (self):
self.output = None
self.gradInput = None
self.training = True
Basically, you can think of a module as of a something (black box)
which can process `input` data and produce `ouput` data.
This is like applying a function which is called `forward`:
output = module.forward(input)
The module should be able to perform a backward pass: to differentiate the `forward` function.
More, it should be able to differentiate it if is a part of chain (chain rule).
The latter implies there is a gradient from previous step of a chain rule.
gradInput = module.backward(input, gradOutput)
def forward(self, input):
Takes an input object, and computes the corresponding output of the module.
return self.updateOutput(input)
def backward(self,input, gradOutput):
Performs a backpropagation step through the module, with respect to the given input.
This includes
- computing a gradient w.r.t. `input` (is needed for further backprop),
- computing a gradient w.r.t. parameters (to update parameters while optimizing).
self.updateGradInput(input, gradOutput)
self.accGradParameters(input, gradOutput)
return self.gradInput
def updateOutput(self, input):
Computes the output using the current parameter set of the class and input.
This function returns the result which is stored in the `output` field.
Make sure to both store the data in `output` field and return it.
# The easiest case:
# self.output = input
# return self.output
pass
def updateGradInput(self, input, gradOutput):
Computing the gradient of the module with respect to its own input.
This is returned in `gradInput`. Also, the `gradInput` state variable is updated accordingly.
The shape of `gradInput` is always the same as the shape of `input`.
Make sure to both store the gradients in `gradInput` field and return it.
# The easiest case:
# self.gradInput = gradOutput
# return self.gradInput
pass
def accGradParameters(self, input, gradOutput):
Computing the gradient of the module with respect to its own parameters.
No need to override if module has no parameters (e.g. ReLU).
pass
def zeroGradParameters(self):
Zeroes `gradParams` variable if the module has params.
pass
def getParameters(self):
Returns a list with its parameters.
If the module does not have parameters return empty list.
return []
def getGradParameters(self):
Returns a list with gradients with respect to its parameters.
If the module does not have parameters return empty list.
return []
def training(self):
Sets training mode for the module.
Training and testing behaviour differs for Dropout, BatchNorm.
self.training = True
def evaluate(self):
Sets evaluation mode for the module.
Training and testing behaviour differs for Dropout, BatchNorm.
self.training = False
def __repr__(self):
Pretty printing. Should be overrided in every module if you want
to have readable description.
return "Module"
class Sequential(Module):
This class implements a container, which processes `input` data sequentially.
`input` is processed by each module (layer) in self.modules consecutively.
The resulting array is called `output`.
def __init__ (self):
super(Sequential, self).__init__()
self.modules = []
def add(self, module):
Adds a module to the container.
self.modules.append(module)
def updateOutput(self, input):
Basic workflow of FORWARD PASS:
y_0 = module[0].forward(input)
y_1 = module[1].forward(y_0)
...
output = module[n-1].forward(y_{n-2})
Just write a little loop.
# Your code goes here. ################################################
return self.output
def backward(self, input, gradOutput):
Workflow of BACKWARD PASS:
g_{n-1} = module[n-1].backward(y_{n-2}, gradOutput)
g_{n-2} = module[n-2].backward(y_{n-3}, g_{n-1})
...
g_1 = module[1].backward(y_0, g_2)
gradInput = module[0].backward(input, g_1)
!!!
To ech module you need to provide the input, module saw while forward pass,
it is used while computing gradients.
Make sure that the input for `i-th` layer the output of `module[i]` (just the same input as in forward pass)
and NOT `input` to this Sequential module.
!!!
# Your code goes here. ################################################
return self.gradInput
def zeroGradParameters(self):
for module in self.modules:
module.zeroGradParameters()
def getParameters(self):
Should gather all parameters in a list.
return [x.getParameters() for x in self.modules]
def getGradParameters(self):
Should gather all gradients w.r.t parameters in a list.
return [x.getGradParameters() for x in self.modules]
def __repr__(self):
string = "".join([str(x) + '\n' for x in self.modules])
return string
def __getitem__(self,x):
return self.modules.__getitem__(x)
class Linear(Module):
A module which applies a linear transformation
A common name is fully-connected layer, InnerProductLayer in caffe.
The module should work with 2D input of shape (n_samples, n_feature).
def __init__(self, n_in, n_out):
super(Linear, self).__init__()
# This is a nice initialization
stdv = 1./np.sqrt(n_in)
self.W = np.random.uniform(-stdv, stdv, size = (n_out, n_in))
self.b = np.random.uniform(-stdv, stdv, size = n_out)
self.gradW = np.zeros_like(self.W)
self.gradb = np.zeros_like(self.b)
def updateOutput(self, input):
# Your code goes here. ################################################
return self.output
def updateGradInput(self, input, gradOutput):
# Your code goes here. ################################################
return self.gradInput
def accGradParameters(self, input, gradOutput):
# Your code goes here. ################################################
pass
def zeroGradParameters(self):
self.gradW.fill(0)
self.gradb.fill(0)
def getParameters(self):
return [self.W, self.b]
def getGradParameters(self):
return [self.gradW, self.gradb]
def __repr__(self):
s = self.W.shape
q = 'Linear %d -> %d' %(s[1],s[0])
return q
class SoftMax(Module):
def __init__(self):
super(SoftMax, self).__init__()
def updateOutput(self, input):
# start with normalization for numerical stability
self.output = np.subtract(input, input.max(axis=1, keepdims=True))
# Your code goes here. ################################################
return self.output
def updateGradInput(self, input, gradOutput):
# Your code goes here. ################################################
return self.gradInput
def __repr__(self):
return "SoftMax"
class BatchMeanSubtraction(Module):
def __init__(self, alpha = 0.):
super(BatchMeanSubtraction, self).__init__()
self.alpha = alpha
self.old_mean = None
def updateOutput(self, input):
# Your code goes here. ################################################
return self.output
def updateGradInput(self, input, gradOutput):
# Your code goes here. ################################################
return self.gradInput
def __repr__(self):
return "BatchMeanNormalization"
class Dropout(Module):
def __init__(self, p=0.5):
super(Dropout, self).__init__()
self.p = p
self.mask = None
def updateOutput(self, input):
# Your code goes here. ################################################
return self.output
def updateGradInput(self, input, gradOutput):
# Your code goes here. ################################################
return self.gradInput
def __repr__(self):
return "Dropout"
class ReLU(Module):
def __init__(self):
super(ReLU, self).__init__()
def updateOutput(self, input):
self.output = np.maximum(input, 0)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = np.multiply(gradOutput , input > 0)
return self.gradInput
def __repr__(self):
return "ReLU"
class LeakyReLU(Module):
def __init__(self, slope = 0.03):
super(LeakyReLU, self).__init__()
self.slope = slope
def updateOutput(self, input):
# Your code goes here. ################################################
return self.output
def updateGradInput(self, input, gradOutput):
# Your code goes here. ################################################
return self.gradInput
def __repr__(self):
return "LeakyReLU"
class ELU(Module):
def __init__(self, alpha = 1.0):
super(ELU, self).__init__()
self.alpha = alpha
def updateOutput(self, input):
# Your code goes here. ################################################
return self.output
def updateGradInput(self, input, gradOutput):
# Your code goes here. ################################################
return self.gradInput
def __repr__(self):
return "ELU"
class SoftPlus(Module):
def __init__(self):
super(SoftPlus, self).__init__()
def updateOutput(self, input):
# Your code goes here. ################################################
return self.output
def updateGradInput(self, input, gradOutput):
# Your code goes here. ################################################
return self.gradInput
def __repr__(self):
return "SoftPlus"
class Criterion(object):
def __init__ (self):
self.output = None
self.gradInput = None
def forward(self, input, target):
Given an input and a target, compute the loss function
associated to the criterion and return the result.
For consistency this function should not be overrided,
all the code goes in `updateOutput`.
return self.updateOutput(input, target)
def backward(self, input, target):
Given an input and a target, compute the gradients of the loss function
associated to the criterion and return the result.
For consistency this function should not be overrided,
all the code goes in `updateGradInput`.
return self.updateGradInput(input, target)
def updateOutput(self, input, target):
Function to override.
return self.output
def updateGradInput(self, input, target):
Function to override.
return self.gradInput
def __repr__(self):
Pretty printing. Should be overrided in every module if you want
to have readable description.
return "Criterion"
class MSECriterion(Criterion):
def __init__(self):
super(MSECriterion, self).__init__()
def updateOutput(self, input, target):
self.output = np.sum(np.power(input - target,2)) / input.shape[0]
return self.output
def updateGradInput(self, input, target):
self.gradInput = (input - target) * 2 / input.shape[0]
return self.gradInput
def __repr__(self):
return "MSECriterion"
class ClassNLLCriterion(Criterion):
def __init__(self):
a = super(ClassNLLCriterion, self)
super(ClassNLLCriterion, self).__init__()
def updateOutput(self, input, target):
# Use this trick to avoid numerical errors
input_clamp = np.maximum(1e-15, np.minimum(input, 1 - 1e-15) )
# Your code goes here. ################################################
return self.output
def updateGradInput(self, input, target):
# Use this trick to avoid numerical errors
input_clamp = np.maximum(1e-15, np.minimum(input, 1 - 1e-15) )
# Your code goes here. ################################################
return self.gradInput
def __repr__(self):
return "ClassNLLCriterion"
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step12: Module is an abstract class which defines fundamental methods necessary for a training a neural network. You do not need to change anything here, just read the comments.
Step19: Sequential container
Step21: Layers
Step22: This one is probably the hardest but as others only takes 5 lines of code in total.
Step23: One of the most significant recent ideas that impacted NNs a lot is Batch normalization. The idea is simple, yet effective
Step24: Implement dropout. The idea and implementation is really simple
Step25: Activation functions
Step26: Implement Leaky Rectified Linear Unit. Expriment with slope.
Step27: Implement Exponential Linear Units activations.
Step28: Implement SoftPlus activations. Look, how they look a lot like ReLU.
Step34: Criterions
Step35: The MSECriterion, which is basic L2 norm usually used for regression, is implemented here for you.
Step36: You task is to implement the ClassNLLCriterion. It should implement multiclass log loss. Nevertheless there is a sum over y (target) in that formula,
|
482 | <ASSISTANT_TASK:>
Python Code:
import theano
import theano.tensor as T
import numpy as np
vector1 = T.vector('vector1')
vector2 = T.vector('vector2')
output, updates = theano.scan(fn=lambda a, b : a * b,
sequences=[vector1, vector2])
f = theano.function(inputs=[vector1, vector2],
outputs=output,
updates=updates)
vector1_value = np.arange(0, 5).astype(theano.config.floatX) # [0,1,2,3,4]
vector2_value = np.arange(1, 6).astype(theano.config.floatX) # [1,2,3,4,5]
print(f(vector1_value, vector2_value))
print(f(vector1_value, vector2_value[:4]))
X = T.matrix('X') # Minibatch of data
W = T.matrix('W') # Weights of the layer
b = T.vector('b') # Biases of the layer
def step(v, W, b):
return T.dot(v, W) + b
output, updates = theano.scan(fn=step,
sequences=[X],
non_sequences=[W, b])
f = theano.function(inputs=[X, W, b],
outputs=output,
updates=updates)
X_value = np.arange(-3, 3).reshape(3, 2).astype(theano.config.floatX)
W_value = np.eye(2).astype(theano.config.floatX)
b_value = np.arange(2).astype(theano.config.floatX)
print(f(X_value, W_value, b_value))
def step(m_row, cumulative_sum):
return m_row + cumulative_sum
M = T.matrix('X')
s = T.vector('s') # Initial value for the cumulative sum
output, updates = theano.scan(fn=step,
sequences=[M],
outputs_info=[s])
f = theano.function(inputs=[M, s],
outputs=output,
updates=updates)
M_value = np.arange(9).reshape(3, 3).astype(theano.config.floatX)
s_value = np.zeros((3, ), dtype=theano.config.floatX)
print(f(M_value, s_value))
def step(f_minus2, f_minus1):
new_f = f_minus2 + f_minus1
ratio = new_f / f_minus1
return new_f, ratio
f_init = T.fvector()
outputs_info = [dict(initial=f_init, taps=[-2, -1]),
None]
output, updates = theano.scan(fn=step,
outputs_info=outputs_info,
n_steps=10)
next_fibonacci_terms = output[0]
ratios_between_terms = output[1]
f = theano.function(inputs=[f_init],
outputs=[next_fibonacci_terms, ratios_between_terms],
updates=updates)
out = f([1, 1])
print(out[0])
print(out[1])
coefficients = theano.tensor.vector("coefficients")
x = T.scalar("x")
max_coefficients_supported = 10000
def step(coeff, power, free_var):
return coeff * free_var ** power
# Generate the components of the polynomial
full_range=theano.tensor.arange(max_coefficients_supported)
components, updates = theano.scan(fn=step,
outputs_info=None,
sequences=[coefficients, full_range],
non_sequences=x)
polynomial = components.sum()
calculate_polynomial = theano.function(inputs=[coefficients, x],
outputs=polynomial,
updates=updates)
test_coeff = np.asarray([1, 0, 2], dtype=theano.config.floatX)
print(calculate_polynomial(test_coeff, 3))
# 19.0
%load scan_ex1_solution.py
probabilities = T.vector()
nb_samples = T.iscalar()
rng = T.shared_randomstreams.RandomStreams(1234)
def sample_from_pvect(pvect):
Provided utility function: given a symbolic vector of
probabilities (which MUST sum to 1), sample one element
and return its index.
onehot_sample = rng.multinomial(n=1, pvals=pvect)
sample = onehot_sample.argmax()
return sample
def set_p_to_zero(pvect, i):
Provided utility function: given a symbolic vector of
probabilities and an index 'i', set the probability of the
i-th element to 0 and renormalize the probabilities so they
sum to 1.
new_pvect = T.set_subtensor(pvect[i], 0.)
new_pvect = new_pvect / new_pvect.sum()
return new_pvect
# TODO use Scan to sample from the vector of probabilities and
# symbolically obtain 'samples' the vector of sampled indices.
samples = None
# Compiling the function
f = theano.function(inputs=[probabilities, nb_samples],
outputs=[samples])
# Testing the function
test_probs = np.asarray([0.6, 0.3, 0.1], dtype=theano.config.floatX)
for i in range(10):
print(f(test_probs, 2))
%load scan_ex2_solution.py
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we call the scan() function. It has many parameters but, because our use case is simple, we only need two of them. We'll introduce other parameters in the next examples.
Step2: Calling scan(), we see that it returns two outputs.
Step3: If updates is omitted, the state of any shared variables modified by Scan will not be updated properly. Random number sampling, for instance, relies on shared variables. If updates is not provided, the state of the random number generator won't be updated properly and the same numbers might be sampled repeatedly. Always provide updates when compiling your Theano function.
Step4: An interesting thing is that we never explicitly told Scan how many iteration it needed to run. It was automatically inferred; when given sequences, Scan will run as many iterations as the length of the shortest sequence
Step5: Example 2
Step6: For the sake of variety, in this example we define the computation to be done at every iteration of the loop using a Python function, step(), instead of a lambda expression.
Step7: We can now compile our Theano function and see that it gives the expected results.
Step8: Example 3
Step9: The trick part is informing Scan that our step function expects as input the output of a previous iteration. To achieve this, we need to use a new parameter of the scan() function
Step10: We can now compile and test the Theano function
Step11: An important thing to notice here, is that the output computed by the Scan does not include the initial state that we provided. It only outputs the states that it has computed itself.
Step12: The next step is defining the value of outputs_info.
Step13: Now that we've defined the step function and the properties of our outputs, we can call the scan() function. Because the step() function has multiple outputs, the first output of scan() function will be a list of tensors
Step14: Let's compile our Theano function which will take a vector of consecutive values from the Fibonacci sequence and compute the next 10 values
Step15: Precisions about the order of the arguments to the step function
Step16: Solution
Step19: Exercise 2 - Sampling without replacement
Step20: Solution
|
483 | <ASSISTANT_TASK:>
Python Code:
from IPython.core.display import HTML
css_file = 'pynoddy.css'
HTML(open(css_file, "r").read())
%matplotlib inline
# here the usual imports. If any of the imports fails,
# make sure that pynoddy is installed
# properly, ideally with 'python setup.py develop'
# or 'python setup.py install'
import sys, os
import matplotlib.pyplot as plt
import numpy as np
# adjust some settings for matplotlib
from matplotlib import rcParams
# print rcParams
rcParams['font.size'] = 15
# determine path of repository to set paths corretly below
repo_path = os.path.realpath('../..')
import pynoddy.history
import pynoddy.experiment
reload(pynoddy.experiment)
rcParams.update({'font.size': 15})
reload(pynoddy.history)
reload(pynoddy.experiment)
from pynoddy.experiment import monte_carlo
model_url = 'http://tectonique.net/asg/ch3/ch3_7/his/typeb.his'
ue = pynoddy.experiment.Experiment(url = model_url)
ue.write_history("typeb_tmp3.his")
ue.write_history("typeb_tmp2.his")
ue.change_cube_size(100)
ue.plot_section('y')
ue.freeze()
ue.set_random_seed(12345)
ue.info(events_only = True)
ev2 = ue.events[2]
ev2.properties
param_stats = [{'event' : 2,
'parameter': 'Amplitude',
'stdev': 100.0,
'type': 'normal'},
{'event' : 2,
'parameter': 'Wavelength',
'stdev': 500.0,
'type': 'normal'},
{'event' : 2,
'parameter': 'X',
'stdev': 500.0,
'type': 'normal'}]
ue.set_parameter_statistics(param_stats)
resolution = 100
ue.change_cube_size(resolution)
tmp = ue.get_section('y')
prob_4 = np.zeros_like(tmp.block[:,:,:])
n_draws = 100
for i in range(n_draws):
ue.random_draw()
tmp = ue.get_section('y', resolution = resolution)
prob_4 += (tmp.block[:,:,:] == 4)
# Normalise
prob_4 = prob_4 / float(n_draws)
fig = plt.figure(figsize = (12,8))
ax = fig.add_subplot(111)
ax.imshow(prob_4.transpose()[:,0,:],
origin = 'lower left',
interpolation = 'none')
plt.title("Estimated probability of unit 4")
plt.xlabel("x (E-W)")
plt.ylabel("z")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Defining an experiment
Step2: For simpler visualisation in this notebook, we will analyse the following steps in a section view of the model.
Step3: Before we start to draw random realisations of the model, we should first store the base state of the model for later reference. This is simply possibel with the freeze() method which stores the current state of the model as the "base-state"
Step4: We now intialise the random generator. We can directly assign a random seed to simplify reproducibility (note that this is not essential, as it would be for the definition in a script function
Step5: The next step is to define probability distributions to the relevant event parameters. Let's first look at the different events
Step6: Next, we define the probability distributions for the uncertain input parameters
|
484 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import absolute_import
from __future__ import print_function
# import local library
import tools
import nnlstm
# import library to build the neural network
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.optimizers import Adam
#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py
%load_ext watermark
# for reproducibility
%watermark -a 'Paul Willot' -mvp numpy,scipy,keras
X_train, y_train, X_test, y_test, feature_names,max_features, classes_names, vectorizer = tools.load_pickle("data/unpadded_4_BacObjMetCon.pickle")
X_train, X_test, y_train, y_test = nnlstm.pad_sequence(X_train, X_test, y_train, y_test, maxlen=100)
X_train, y_train, X_test, y_test, feature_names, max_features, classes_names, vectorizer = tools.load_pickle("/Users/meat/Documents/NII/data/training_4_BacObjMetCon.pickle")
X_train[0][:100]
# one-hot vector for the 4 different labels
y_train[0]
%%time
# take approximately 50s to build
dim_out = len(classes_names)
net = Sequential()
net.add(Embedding(max_features, 16))
net.add(LSTM(16, 16))
net.add(Dense(16, dim_out))
net.add(Dropout(0.5))
net.add(Activation('softmax'))
net.compile(loss='categorical_crossentropy', optimizer='adam', class_mode="categorical")
batch_size = 100
length_train = 15000 # length of the reduced training set (can put to -1 for all)
length_test = 5000 # length of the reduced testing set (can put to -1 for all)
nb_epoch = 10
patience = 2 # when to apply early stopping, if necessary
history = nnlstm.train_network(net,
X_train[:length_train],
y_train[:length_train],
X_test[:length_test],
y_test[:length_test],
nb_epoch,
batch_size=batch_size,
path_save="weights",
patience=patience)
net.load_weights("weights/best.hdf5")
nnlstm.show_history(history)
nnlstm.evaluate_network(net, X_test[:length_test], y_test[:length_test], classes_names, length=-1)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's gather the datas from the previous notebook
Step2: and pad each vector to a regular size (necessary for the sequence processing)
Step3: Or directly get a bigger training and testing set
Step4: Our data look like this
Step5: Choosing the architecture
Step6: Training on a small subset
Step7: The weights are saved at each epoch, and you can load 'best' for the epoch with the higher (accuracy * (loss/10))
Step8: Evaluate the network
|
485 | <ASSISTANT_TASK:>
Python Code:
%pylab inline
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from datetime import timedelta
from obspy.core import read
from obspy.core.utcdatetime import UTCDateTime
from obspy.core.inventory import read_inventory
import numpy as np
from obspy.clients.fdsn.client import Client
import pdart.auth as auth
from pdart.util import linear_interpolation, timing_correction
from pdart.extra_plots.plot_timing_divergence import plot_timing
import matplotlib
from matplotlib import pyplot as plt
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = 10, 4
plt.rcParams['lines.linewidth'] = 0.5
plt.rcParams['font.size'] = 12
SECONDS_PER_DAY=3600.*24
def raw_seismogram():
View a raw seismogram.
user=auth.user
auth_password=auth.auth_password
if user == '' or auth_password == '':
print('Set user and auth_password in auth.py')
return
starttime= UTCDateTime('1973-03-13T07:30:00.0')
endtime = UTCDateTime('1973-03-13T09:30:00.0')
network='XA'
station='S14'
channel='MH1'
location='*'
client = Client("IRIS",user=user,password=auth_password)
print(client)
stream = client.get_waveforms(network=network, station=station, channel=channel, location=location, starttime=starttime, endtime=endtime)
stream.plot(equal_scale=False,size=(1000,600),method='full')
raw_seismogram()
# import pandas as pd
# series = pd.Series()
# print(series)
def view_Apollo(stream=None,starttime= UTCDateTime('1973-03-13T07:30:00.0'),endtime = UTCDateTime('1973-03-13T09:30:00.0'),
network='XA',station='S14',channel='MH1',location='*',plot_seismogram=True,plot_response=False):
Snippet to read in raw seismogram and remove the instrument response for Apollo.
user=auth.user
auth_password=auth.auth_password
if user == '' or auth_password == '':
print('Set user and auth_password in auth.py')
return
client = Client("IRIS",user=user,password=auth_password)
# get the response file (wildcards allowed)
inv = client.get_stations(starttime=starttime, endtime=endtime,
network=network, sta=station, loc=location, channel=channel,
level="response")
if stream is None:
stream = client.get_waveforms(network=network, station=station, channel=channel, location=location, starttime=starttime, endtime=endtime)
else:
stream.trim(starttime=starttime,endtime=endtime)
for tr in stream:
# interpolate across the gaps of one sample
linear_interpolation(tr,interpolation_limit=1)
stream.merge()
for tr in stream:
# optionally interpolate across any gap
# for removing the instrument response from a seimogram,
# it is useful to get a mask, then interpolate across the gaps,
# then mask the trace again.
if tr.stats.channel in ['MH1', 'MH2', 'MHZ']:
# add linear interpolation but keep the original mask
original_mask = linear_interpolation(tr,interpolation_limit=None)
# remove the instrument response
pre_filt = [0.1,0.3,0.9,1.1]
tr.remove_response(inventory=inv, pre_filt=pre_filt, output="DISP",
water_level=None, plot=plot_response)
if plot_response:
plt.show()
# apply the mask back to the trace
tr.data = np.ma.masked_array(tr, mask=original_mask)
elif tr.stats.channel in ['SHZ']:
# add linear interpolation but keep the original mask
original_mask = linear_interpolation(tr,interpolation_limit=None)
# remove the instrument response
pre_filt = [1,2,11,13]
tr.remove_response(inventory=inv, pre_filt=pre_filt, output="DISP",
water_level=None, plot=plot_response)
if plot_response:
plt.show()
# apply the mask back to the trace
tr.data = np.ma.masked_array(tr, mask=original_mask)
if plot_seismogram:
stream.plot(equal_scale=False,size=(1000,600),method='full')
view_Apollo()
view_Apollo(plot_seismogram=False,plot_response=True)
def view_timing_divergence(starttime= UTCDateTime('1973-06-30T00:00:00.00000Z'),
endtime = UTCDateTime('1973-07-01T00:00:00.00000Z'),network='XA',
station='*',channel='ATT',location='*'):
user=auth.user
auth_password=auth.auth_password
if user == '' or auth_password == '':
print('Set user and auth_password in auth.py')
return
client = Client("IRIS",user=user,password=auth_password)
stream = client.get_waveforms(network=network, station=station, channel=channel, location=location, starttime=starttime, endtime=endtime)
plot_timing(stream=stream, start_time=starttime,end_time=endtime,save_fig=False)
view_timing_divergence()
starttime= UTCDateTime('1973-03-13T07:30:00.0')
def get_traces():
Get the traces
user=auth.user
auth_password=auth.auth_password
if user == '' or auth_password == '':
print('Set user and auth_password in auth.py')
return
starttime= UTCDateTime('1973-03-13T00:00:00.0')
endtime = UTCDateTime('1973-03-14T00:00:00.0')
network='XA'
station='*'
channel='*'
location='*'
client = Client("IRIS",user=user,password=auth_password)
print(client)
stream = client.get_waveforms(network=network, station=station, channel=channel, location=location, starttime=starttime, endtime=endtime)
return stream
stream_before = get_traces()
print(stream_before)
# plot the timing divergence before correction
plot_timing(stream=stream_before, start_time=UTCDateTime('1973-03-13T00:00:00.0'),end_time=UTCDateTime('1973-03-13T12:00:00.0'),save_fig=False)
stream_after = stream_before.copy()
correction_time=UTCDateTime('1973-03-13T08:02:00.0')
timing_correction(stream_after,correction_time=correction_time)
# timing divergence after correction
plot_timing(stream=stream_after, start_time=UTCDateTime('1973-03-13T00:00:00.0'),end_time=UTCDateTime('1973-03-13T12:00:00.0'),save_fig=False)
print('End of Notebook')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting Started with the Apollo Passive Seismic Data Archive
Step3: Notice that the raw seismogram is
Step5: In the next section, we will make a correction for the timing divergence. Taking the approximate onset time of the event, (1973-03-13T07
Step6: It can be seen from the previous image the extent of the timing divergence. It many situations it may be necessary to do more than just correct the start times, but instead to reinterpolate the data.
|
486 | <ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import urllib
import zipfile
from matplotlib import pylab
from sklearn.manifold import TSNE
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
Download a file if not present, and make sure it's the right size.
if not os.path.exists(filename):
filename, _ = urllib.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print 'Found and verified', filename
else:
print statinfo.st_size
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return f.read(name).split()
f.close()
words = read_data(filename)
print 'Data size', len(words)
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
print 'Most common words (+UNK)', count[:5]
print 'Sample data', data[:10]
del words # Hint to reduce memory.
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size / num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
print data[:20]
batch, labels = generate_batch(batch_size=16, num_skips=1, skip_window=2)
for i in range(16):
print batch[i], '->', labels[i, 0]
# print reverse_dictionary[batch[i]], '->', reverse_dictionary[labels[i, 0]]
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(xrange(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Variables.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model.
# Look up embeddings for inputs.
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
# Compute the softmax loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed,
train_labels, num_sampled, vocabulary_size))
# Optimizer.
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
# Compute the similarity between minibatch examples and all embeddings.
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
num_steps = 100001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print "Initialized"
average_loss = 0
for step in xrange(num_steps):
batch_data, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print "Average loss at step", step, ":", average_loss
average_loss = 0
# note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log = "%s %s," % (log, close_word)
print log
final_embeddings = normalized_embeddings.eval()
num_points = 400
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15,15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
words = [reverse_dictionary[i] for i in xrange(1, num_points+1)]
plot(two_d_embeddings, words)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Download the data from the source website if necessary.
Step3: Read the data into a string.
Step4: Build the dictionary and replace rare words with UNK token.
Step5: Function to generate a training batch for the skip-gram model.
Step6: Train a skip-gram model.
|
487 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame.from_dict({'id': ['A', 'B', 'A', 'C', 'D', 'B', 'C'],
'val': [1,2,-3,1,5,6,-2],
'stuff':['12','23232','13','1234','3235','3236','732323']})
def g(df):
df['cummax'] = df.groupby('id')['val'].transform(pd.Series.cummax)
return df
df = g(df.copy())
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
488 | <ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import mne
import os.path as op
import numpy as np
from matplotlib import pyplot as plt
# Load an example dataset, the preload flag loads the data into memory now
data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(data_path, preload=True, verbose=False)
raw = raw.crop(0, 2)
print(raw)
filt_bands = [(1, 3), (3, 10), (10, 20), (20, 60)]
f, (ax, ax2) = plt.subplots(2, 1, figsize=(15, 10))
_ = ax.plot(raw._data[0])
for fband in filt_bands:
raw_filt = raw.copy()
raw_filt.filter(*fband)
_ = ax2.plot(raw_filt._data[0])
ax2.legend(filt_bands)
ax.set_title('Raw data')
ax2.set_title('Band-pass filtered data')
# Filter signal, then take hilbert transform
raw_band = raw.copy()
raw_band.filter(12, 18)
raw_hilb = raw_band.copy()
hilb_picks = mne.pick_types(raw_band.info, meg=False, eeg=True)
raw_hilb.apply_hilbert(hilb_picks)
print(raw_hilb._data.dtype)
# Take the amplitude and phase
raw_amp = raw_hilb.copy()
raw_amp.apply_function(np.abs, hilb_picks, float, 1)
raw_phase = raw_hilb.copy()
raw_phase.apply_function(np.angle, hilb_picks, float, 1)
f, (a1, a2) = plt.subplots(2, 1, figsize=(15, 10))
a1.plot(raw_band._data[hilb_picks[0]])
a1.plot(raw_amp._data[hilb_picks[0]])
a2.plot(raw_phase._data[hilb_picks[0]])
a1.set_title('Amplitude of frequency band')
a2.set_title('Phase of frequency band')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It is often necessary to modify data once you have loaded it into memory.
Step2: Signal processing
Step3: In addition, there are functions for applying the Hilbert transform, which is
Step4: Finally, it is possible to apply arbitrary to your data to do what you want.
|
489 | <ASSISTANT_TASK:>
Python Code:
# import feedforward neural net
from mlnn import neural_net
# Visualize tanh and its derivative
x = np.linspace(-np.pi, np.pi, 120)
plt.figure(figsize=(8, 3))
plt.subplot(1, 2, 1)
plt.plot(x, np.tanh(x))
plt.title("tanh(x)")
plt.xlim(-3, 3)
plt.subplot(1, 2, 2)
plt.plot(x, 1 - np.square(np.tanh(x)))
plt.xlim(-3, 3)
plt.title("tanh\'(x)")
plt.show()
# Training the neural network
my_nn = neural_net([2, 4, 2]) # [2,4,2] = [input nodes, hidden nodes, output nodes]
my_nn.train(X, y, 0.001, 0.0001) # weights regularization lambda= 0.001 , epsilon= 0.0001
### visualize predictions
my_nn.visualize_preds(X ,y)
X_, y_ = sklearn.datasets.make_circles(n_samples=400, noise=0.18, factor=0.005, random_state=1)
plt.figure(figsize=(7, 5))
plt.scatter(X_[:, 0], X_[:, 1], s=15, c=y_, cmap=plt.cm.Spectral)
plt.show()
'''
Uncomment the code below to see classification process for above data.
To stop training early reduce no. of iterations.
'''
#new_nn = neural_net([2, 6, 2])
#new_nn.animate_preds(X_, y_, 0.001, 0.0001) # max iterations = 35000
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <script type="text/javascript" src="https
Step2: It can be seen from the above figure that as we increase our input the our activation starts to saturate which can inturn kill gradients. This can be mitigated using rectified activation functions. Another problem that we encounter in training deep neural networks during backpropagation is vanishing gradient and gradient explosion. It can be observed from the derivative of our nth activation- $\large\frac{\partial act_n}{\partial pre_act_n}$ , is fairly large near zero. Let's assume that the weigths $< 1$, this will usually satisfy $|w_{i}*tanh'(x)| < 1$. The succesive product of such values in each layer will exponentially decrease the computed product leading to vanishing gradient. This is not a robust explanation of vanishing gradient problem. For more information refer to this article.
Step3: Animate Training
|
490 | <ASSISTANT_TASK:>
Python Code:
class Test:
pass
a = Test()
a
type(a)
type(Test)
type(type)
type?
TestWithType = type('TestWithType', (object,), {})
type(TestWithType)
ins1 = TestWithType()
type(ins1)
type('TestWithType', (object,), {})()
class TestClass:
def __new__(cls, *args, **kwargs):
print('new method called')
instance = super(TestClass, cls).__new__(cls, *args, **kwargs)
return instance
def __call__(self, a, b, c):
self.call_count += 1
print('call method called')
return a * b * c
def __init__(self):
self.call_count = 0
super(TestClass, self).__init__()
print('init method called')
def get_call_count(self):
return self.call_count
a = TestClass()
a(1,2,3)
a.get_call_count()
class MySingletonMeta(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(MySingletonMeta, cls).__call__(*args)
return cls._instances[cls]
class MySingletonClass(metaclass=MySingletonMeta):
def __init__(self):
self.i = 1
a = MySingletonClass()
b = MySingletonClass()
type(a), id(a) , type(b), id(b)
class MyMetaClass(type):
_test_attribute = 1
def __new__(cls, *args, **kwargs):
print("metaclass new method called")
return super(MyMetaClass, cls).__new__(cls, *args, **kwargs)
def __call__(cls, *args, **kwargs):
print("metaclass call method called")
return super(MyMetaClass, cls).__call__(*args, **kwargs)
def __init__(self, *args, **kwargs):
print("metaclass init method called")
return super(MyMetaClass, self).__init__(*args, **kwargs)
def test_method_1(self):
print("MyMetaClass - Test method 1 called")
class MyClass(metaclass=MyMetaClass):
def __new__(cls, *args, **kwargs):
print("instance new method called")
return super(MyClass, cls).__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
print("instance init method called")
return super(MyClass, self).__init__(*args, **kwargs)
ins2 = MyClass()
MyClass._test_attribute
MyClass.__mro__
MyMetaClass.__mro__
from abc import ABCMeta, ABC, abstractmethod
ABCMeta?
class MyAbstractClass(metaclass=ABCMeta):
def __init__(self):
pass
@abstractmethod
def my_abstract_method(self):
pass
MyAbstractClass()
class MyChildClass(MyAbstractClass):
def __init__(self):
pass
def my_abstract_method(self):
pass
mcc = MyChildClass()
mcc
class MySingletonABCMeta(ABCMeta):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(MySingletonABCMeta, cls).__call__(*args)
return cls._instances[cls]
class MyAbstractSingletonClass(metaclass=MySingletonABCMeta):
def __init__(self):
pass
@abstractmethod
def my_abstract_method(self):
pass
MyAbstractSingletonClass()
class MyAbstractSingletonChild(MyAbstractSingletonClass):
def __init__(self):
pass
def my_abstract_method(self):
pass
a1 = MyAbstractSingletonChild()
b1 = MyAbstractSingletonChild()
type(a1), id(a1), type(b1), id(b1)
class MyBeanMeta(type):
_instances = {}
def __call__(cls, *args):
print(args)
key = tuple((cls, args))
if key not in cls._instances:
cls._instances[key] = super(MyBeanMeta, cls).__call__(*args)
return cls._instances[key]
class MyBeanClass(metaclass=MyBeanMeta):
def __init__(self, a ):
self.a = a
bn1 = MyBeanClass(1)
bn2 = MyBeanClass(2)
bn3 = MyBeanClass(3)
bn4 = MyBeanClass(1)
id(bn1), id(bn2), id(bn3), id(bn4)
import logging
logging.basicConfig(filename='example.log', level=logging.INFO)
logging.debug('This message should go to the log file')
logging.info('So should this')
logging.warning('And this, too')
class MyLogSingletonMeta(type):
logger = logging.getLogger('abc')
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
instance = super(MyLogSingletonMeta, cls).__call__(*args)
cls._instances[cls] = instance
instance.__dict__['logger'] = logging.getLogger('abc')
return cls._instances[cls]
class MyLogEnabledClass(metaclass=MyLogSingletonMeta):
def test_function(self):
self.logger.info('Inside test_function method of Log Enabled class')
pass
lec_instance1 = MyLogEnabledClass()
lec_instance2 = MyLogEnabledClass()
lec_instance1.test_function()
print(id(lec_instance1), id(lec_instance2))
!cat example.log
class MyLogger:
def __init__(self, logger=None):
self.logger = logger
def __call__(self, func):
def wrapper(*args, **kwargs):
if self.logger is None:
print(str(func) + " is called")
else:
self.logger.info(str(func) + " is called")
return func(*args, **kwargs)
return wrapper
class MyLoggingMeta(type):
def __new__(cls, name, bases, attrs):
for item, value in attrs.items():
if callable(value):
print("Function item :" + str(item), str(value), type(value))
attrs[item] = MyLogger()(value)
else:
print(str(item), str(value), type(value))
return super(MyLoggingMeta, cls).__new__(cls, name, bases, attrs)
class MyClass1(metaclass=MyLoggingMeta):
def test_m1(self):
pass
def test_m2(self):
pass
a= MyClass1()
a.test_m2()
a.test_m1()
class MySealedMeta(type):
def __new__(cls, name, bases, attrs):
all_metaclasses = [type(x) for x in bases]
if MySealedMeta in all_metaclasses:
raise TypeError("Sealed class cannot be sublcassed")
return super(MySealedMeta, cls).__new__(cls, name, bases, attrs)
class MySealedClass(metaclass=MySealedMeta):
pass
class MyChildOfSealed(MySealedClass):
pass
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Classes - Nothing but instances of types. Class technically is a sugar over the native 'type'
Step2: 'type' is an important native structure used for creating classes.
Step3: What is type? 'type' defines how a class behaves in Python.
Step4: LifeCycle with Metaclasses
Step5: Pattern 1
Step6: Pattern 2
Step7: Pattern 3
Step8: Pattern 4
Step9: Pattern 5
|
491 | <ASSISTANT_TASK:>
Python Code:
# Loading modules
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
x = np.array([1,2,3,5,6,7,8,10],dtype=float)
x
y = np.arange(10)
y
z = np.linspace(0,100,50)
z
h = np.random.randn(100)
h
print('Min X: {0:.3f} \t Max X: {1:.3f}'.format(np.min(x), np.max(x)) )
zz = x**2 + 3*x**3
zz
zz_idx = np.where((zz>= 100)&(zz <= 500))[0]
print('zz_idx: {0}'.format(zz_idx))
zz[zz_idx]
h1 = np.random.randint(10, 50, 50)
h1
np.size(h1)
h1.shape
A = np.array([[1,2,3,4,5],
[6,7,8,9,10],
[12,13,14,16,17],
[13,45,67,89,90] ])
A
np.shape(A)
A_t = np.transpose(A)
A_t
np.arange(0,10,1)
np.arange(0,20,5)
np.arange(-40,21,10)
B = np.linspace(0,50)
B
B = np.linspace(0,100, 20)
B
B = np.logspace(0,3,25)
B
B = np.logspace(0,10,11, base=np.e)
B
from numpy import random
# Uniform random numbers in [0,1]
random.rand(5,5)
# 20 Random integers from 10 to 30
random.randint(10,30,20)
np.zeros(20)
nelem = 10
C = np.ones(10)
C
for ii in range(C.size):
C[ii] = random.rand()
C
np.diag(random.randint(10,20,5))
M = random.rand(10,5)
M
M[1,:]
M[:,1]
M[1:3, 2:4]
A = random.rand(3,3)
np.fill_diagonal(A, np.nan)
A
B = np.arange(0,9).reshape((3,3))
B
A_mask = np.isfinite(A)
A_mask
B[A_mask]
# Creating my bin edges
bins = np.arange(0,13)
bins
# Generating Data
data = 10*random.rand(100)
data
# Defining statistical function to use
stat_func = np.nanmean
# Binning the data
data_bins = np.digitize(data, bins)
data_bins
failval = -10
bins_stat = np.array([stat_func(data[data_bins == ii]) \
if len(data[data_bins == ii]) > 0 \
else failval \
for ii in range(1,len(bins))])
bins_stat = np.asarray(bins_stat)
bins_stat
import math
def myceil(x, base=10):
Returns the upper-bound integer of 'x' in base 'base'.
Parameters
----------
x: float
number to be approximated to closest number to 'base'
base: float
base used to calculate the closest 'largest' number
Returns
-------
n_high: float
Closest float number to 'x', i.e. upper-bound float.
Example
-------
>>>> myceil(12,10)
20
>>>>
>>>> myceil(12.05, 0.1)
12.10000
n_high = float(base*math.ceil(float(x)/base))
return n_high
def myfloor(x, base=10):
Returns the lower-bound integer of 'x' in base 'base'
Parameters
----------
x: float
number to be approximated to closest number of 'base'
base: float
base used to calculate the closest 'smallest' number
Returns
-------
n_low: float
Closest float number to 'x', i.e. lower-bound float.
Example
-------
>>>> myfloor(12, 5)
>>>> 10
n_low = float(base*math.floor(float(x)/base))
return n_low
def Bins_array_create(arr, base=10):
Generates array between [arr.min(), arr.max()] in steps of `base`.
Parameters
----------
arr: array_like, Shape (N,...), One-dimensional
Array of numerical elements
base: float, optional (default=10)
Interval between bins
Returns
-------
bins_arr: array_like
Array of bin edges for given arr
base = float(base)
arr = np.array(arr)
assert(arr.ndim==1)
arr_min = myfloor(arr.min(), base=base)
arr_max = myceil( arr.max(), base=base)
bins_arr = np.arange(arr_min, arr_max+0.5*base, base)
return bins_arr
def Mean_std_calc_one_array(x1, y1, arr_len=0, statfunc=np.nanmean,
failval=np.nan, error='std',
base=10.):
Calculates statistics of two arrays, e.g. scatter,
error in `statfunc`, etc.
Parameters
----------
x1: array-like, shape (N,)
array of x-values
y1: array-like, shape (N,)
array of y-values
arr_len: int, optional (default = 0)
minimum number of elements in the bin
statfunc: numpy function, optional (default = numpy.nanmean)
statistical function used to evaluate the bins
failval: int or float, optional (default = numpy.nan)
Number to use to replace when the number of elements in the
bin is smaller than `arr_len`
error: string, optional (default = 'std')
type of error to evaluate
Options:
- 'std': Evaluates the standard deviation of the bin
- 'stat': Evaluates the error in the mean/median of each bin
- 'none': Does not calculate the error in `y1`
base: float
Value of bin width in units of that of `x1`
Returns
--------
x1_stat: array-like, shape (N,)
`stat_func` of each bin in `base` spacings for x1
y1_stat: array-like, shape (N,)
`stat_func` of each bin in `base` spacings for y1
x1 = np.asarray(x1)
y1 = np.asarray(y1)
assert((x1.ndim==1) & (y1.ndim==1))
assert((x1.size >0) & (y1.size>0))
n_elem = len(x1)
## Computing Bins
x1_bins = Bins_array_create(x1, base=base)
x1_digit = np.digitize(x1, x1_bins)
## Computing Statistics in bins
x1_stat = np.array([statfunc(x1[x1_digit==ii])
if len(x1[x1_digit==ii])>arr_len
else failval
for ii in range(1,x1_bins.size)])
y1_stat = np.array([statfunc(y1[x1_digit==ii])
if len(y1[x1_digit==ii])>arr_len
else failval
for ii in range(1,x1_bins.size)])
## Computing error in the data
if error=='std':
stat_err = np.nanstd
y1_err = np.array([stat_err(y1[x1_digit==ii])
if len(y1[x1_digit==ii])>arr_len
else failval
for ii in range(1,x1_bins.size)])
if error!='none':
y1_err = np.array([stat_err(y1[x1_digit==ii])/np.sqrt(len(y1[x1_digit==ii]))
if len(y1[x1_digit==ii])>arr_len
else failval
for ii in range(1,x1_bins.size)])
if (stat_func==np.median) or (stat_func==np.nanmedian):
y1_err *= 1.253
else:
y1_err = np.zeros(y1.stat.size)
return x1_stat, y1_stat, y1_err
import numpy as np
# Defining arrays
x_arr = np.arange(100)
y_arr = 50*np.random.randn(x_arr.size)
# Computing mean and error in the mean for `x_arr` and `y_arr`
x_stat, y_stat, y_err = Mean_std_calc_one_array(x_arr, y_arr,
statfunc=np.nanmean,
failval=np.nan,
base=10)
x_stat2, y_stat2, y_err2 = Mean_std_calc_one_array(x_arr, y_arr,
statfunc=np.nanmedian,
failval=np.nan,
base=10)
plt.style.use('seaborn-notebook')
plt.clf()
plt.close()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111,facecolor='white')
ax.plot(x_arr, y_arr, 'ro', label='Data')
ax.errorbar(x_stat, y_stat, yerr=y_err, color='blue', marker='o',
linestyle='--',label='Mean')
ax.errorbar(x_stat2, y_stat2, yerr=y_err2, color='green', marker='o',
linestyle='--',label='Median')
ax.set_xlabel('X axis', fontsize=20)
ax.set_ylabel('Y axis', fontsize=20)
ax.set_title('Data and the Binned Data', fontsize=24)
plt.legend(fontsize=20)
plt.show()
A = np.array([[n+m*10 for n in range(5)] for m in range(5)])
A
n, m = A.shape
B = A.reshape((1,n*m))
B
A_f = A.flatten()
A_f
C = random.rand(A.size)
C
C.shape
# Stacking the two arrays
D = np.column_stack((A_f,C))
D
# Selecting from 3rd to 11th row
D[2:10]
a = np.array([[1, 2], [3, 4]])
b = np.array([[5,6]])
np.concatenate((a,b))
np.concatenate((a,b.T), axis=1)
A = np.array([[1, 2], [3, 4]])
A
# `B` is now referring to the same array data as `A`
B = A
B[0,0] = 10
B
A
B = np.copy(A)
# Modifying `B`
B[0,0] = -5
B
A
import scipy as sc
from scipy.interpolate import interp1d
def f(x):
return np.sin(x)
n = np.arange(0, 10)
x = np.linspace(0, 9, 100)
y_meas = f(n) + 0.1 * np.random.randn(len(n)) # simulate measurement with noise
y_real = f(x)
linear_interpolation = interp1d(n, y_meas)
y_interp1 = linear_interpolation(x)
cubic_interpolation = interp1d(n, y_meas, kind='cubic')
y_interp2 = cubic_interpolation(x)
fig, ax = plt.subplots(figsize=(15,6))
ax.set_facecolor('white')
ax.plot(n, y_meas, 'bs', label='noisy data')
ax.plot(x, y_real, 'k', lw=2, label='true function')
ax.plot(x, y_interp1, 'r', label='linear interp')
ax.plot(x, y_interp2, 'g', label='cubic interp')
ax.legend(loc=3, prop={'size':20});
ax.tick_params(axis='both', which='major', labelsize=20)
ax.tick_params(axis='both', which='minor', labelsize=15)
Lbox = 250.
Npts = 1000
# Creating cartesian coordinates
x = np.random.uniform(0, Lbox, Npts)
y = np.random.uniform(0, Lbox, Npts)
z = np.random.uniform(0, Lbox, Npts)
sample1 = np.vstack([x, y, z]).T
sample1
sample1.shape
from scipy.spatial import cKDTree
# Initializing KDTree
KD_obj = cKDTree(sample1)
N_neighbours = cKDTree.count_neighbors(KD_obj, KD_obj, 50) - \
cKDTree.count_neighbors(KD_obj, KD_obj, 30)
print("Number of Neighbours: {0}".format(N_neighbours))
k_nearest = 4
dist_k, dist_k_idx = cKDTree.query(KD_obj, sample1, k_nearest)
dist_k
dist_k_idx
pairs = KD_obj.query_ball_tree(KD_obj, 30)
pairs[0:10]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Review
Step2: Handling arrays
Step3: Apply mathematical functions
Step4: Conditionals
Step5: Manipulating Arrays
Step6: We can get the overall size and shape of the array.
Step7: You can also transpose array A.
Step8: Why are Numpy arrays better than lists
Step9: linspace and logspace
Step10: Array of 25 elements from $10^{0}$ to $10^{3}$, with base of 10.
Step11: Creating an array of 11 elements from $e^{0}$ to $e^{10}$, with the base == numpy.e
Step12: Random Data
Step13: Arrays of zeros and ones.
Step14: You can use these to populate other arrays
Step15: Diagonals
Step16: Indexing
Step17: Selecting the 1st row
Step18: The 2nd column
Step19: Select a range of columns and rows
Step20: You can easily use this to create a mask, for when you are cleaning your data.
Step21: Appying the mask from $A \to B$
Step22: Binning you data
Step23: Now I want to bin my data and calculate the mean for each bin
Step24: Calculating the mean for each of the bins
Step29: You can put all of this into a function that estimates errors and more...
Step30: Example of using these function
Step31: With this function, it is really easy to apply statistics on binned data, as well as to estimate errors on the data.
Step32: np.concatenate
Step33: Copy and "Deep Copy"
Step34: If we make any changes to B, A will also be affected by this change.
Step35: To get a completely independent, new object, you would use
Step36: The array A was not affected by this changed. This is important when you're constantly re-defining new arrays
Step37: Interpolation
Step38: KD-Trees
Step39: Let's say we want to know how many points are within distances of 30 and 50 from other points. To know this, you construct a KD-Tree
Step40: Let's say you want to get the distances to the Nth-nearest neighbor.
Step41: You can also get the indices
Step42: The first columns corresponds to itself.
|
492 | <ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for greyscale image data
def normalize_greyscale(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
# ToDo: Implement Min-Max scaling for greyscale image data
minV = np.amin(image_data)
maxV = np.amax(image_data)
return (0.1 + ((image_data - minV)*(0.9 - 0.1))/(maxV - minV))
print(np.shape(train_features))
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_greyscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_greyscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
train_features = normalize_greyscale(train_features)
test_features = normalize_greyscale(test_features)
is_features_normal = True
print('Tests Passed!')
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
features_count = 784
labels_count = 10
# ToDo: Set the features and labels tensors
features = tf.placeholder(shape=[None, features_count], dtype=tf.float32)
labels = tf.placeholder(shape=[None, labels_count], dtype=tf.float32)
#labels = tf.placeholder(shape=[10,1], dtype=tf.float32)
# ToDo: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal([features_count, labels_count]))
biases = tf.Variable(tf.zeros(labels_count))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
logits = -np.amax(logits)
prediction = tf.nn.softmax(logits)
# Cross entropy
#prediction -= np.max(prediction)
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.initialize_all_variables()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# ToDo: Find the best parameters for each configuration
epochs = 1
batch_size = 100
learning_rate = 0.1
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'b', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
# ToDo: Set the epochs, batch_size, and learning_rate with the best parameters from problem 3
epochs = 5
batch_size = 64
learning_rate = 0.1
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST data is a large dataset to handle for most computers. It contains 500 thousands images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/mean_variance.png" style="height
Step6: Checkpoint
Step7: <img src="image/weight_biases.png" style="height
Step8: <img src="image/learn_rate_tune.png" style="height
Step9: Test
|
493 | <ASSISTANT_TASK:>
Python Code:
import random
elements = list(range(1, 11)) * 2 + [25, 50, 75, 100]
game = random.sample(elements, 6)
goal = random.randint(100, 999)
print goal, ':', game
# the DNA is just the calcul in a string
def random_dna(game):
# we want random links to node, so we need to shuffle the game
game_shuffled = list(game)
random.shuffle(game_shuffled)
# let start with an empty dna
dna = ''
i = 0
while i < len(game_shuffled):
try_dna = dna
if i > 0:
try_dna += random.choice([' * ', ' / ', ' + ', ' - '])
try_dna += str(float(game_shuffled[i]))
# we check that the result is still an int before recording the random gene
check_result = eval(try_dna)
if check_result == int(check_result):
dna = try_dna
i += 1
return dna
test_dna = random_dna(game)
test_res = eval(test_dna)
assert test_res == int(test_res)
population_size = 1000
def first_generation(population_size=1000):
return [random_dna(game) for _ in range(population_size)]
population = first_generation(population_size)
print 'Test:', test_dna, '=', test_res
print 'Population Size:', population_size
print 'Population:'
for dna in population[:5]:
print '->', dna, ' = ', eval(dna)
print '-> ...'
for dna in population[-5:]:
print '->', dna, ' = ', eval(dna)
def score(dna):
return abs(goal - eval(dna))
scored_population = sorted([(dna, score(dna)) for dna in population], key=lambda item: item[1])
def show_scored_population(scored_population):
for dna, score in scored_population[:5]:
print '->', dna, ' = ', eval(dna), '\t|', score
print '-> ...'
for dna, score in scored_population[-5:]:
print '->', dna, ' = ', eval(dna), '\t|', score
show_scored_population(scored_population)
from collections import OrderedDict
import math
def generation_stats(generation):
scores = [c[1] for c in generation]
stats = OrderedDict((
('avg', float(sum(scores)) / len(scores)),
('min', min(scores)),
('max', max(scores)),
('stdev', None),
('q1', None),
('med', None),
('q3', None)
))
variance = float(sum([(s - stats['avg'])**2 for s in scores])) / len(scores)
stats['stdev'] = math.sqrt(variance)
q1idx = len(scores) / 4
stats['q1'] = scores[q1idx]
q3idx = 3 * len(scores) / 4
stats['q3'] = scores[q3idx]
if len(scores) % 2 == 0:
i1idx = len(scores) / 2
i2idx = i1idx + 1
i1, i2 = scores[i1idx], scores[i2idx]
stats['med'] = (i1 + i2) / 2
else:
medidx = len(scores) / 2 + 1
stats['med'] = scores[medidx]
return stats, scores
def plot_stats(stats, scores, gen=0):
rows = zip(*stats.items())
dim = [0.05, 0.80, 0.9, 0.15]
# Figure 1: min avg/q3 max color graph
fig1 = plt.figure(figsize=(18, 3))
a1x = fig1.add_axes(dim)
cmap1 = matplotlib.colors.ListedColormap(['g', 'b', 'r'])
bounds1 = [
stats['min'],
min(stats['q3'], stats['avg']),
max(stats['q3'], stats['avg']),
stats['max']
]
norm1 = matplotlib.colors.BoundaryNorm(bounds1, cmap1.N)
cbl1 = matplotlib.colorbar.ColorbarBase(
a1x,
cmap=cmap1, norm=norm1,
spacing='proportional',
orientation='horizontal'
)
# Figure 2: min q1 med q3 color graph
fig2 = plt.figure(figsize=(18, 3))
a2x = fig2.add_axes(dim)
cmap2 = matplotlib.colors.ListedColormap(['g', 'b', 'y'])
bounds2 = [stats['min'], stats['q1'], stats['med'], stats['q3']]
norm2 = matplotlib.colors.BoundaryNorm(bounds2, cmap2.N)
cbl2 = matplotlib.colorbar.ColorbarBase(
a2x,
cmap=cmap2, norm=norm2,
spacing='proportional',
orientation='horizontal'
)
a1x.set_xticklabels([
'min',
'avg' if stats['avg'] <= stats['q3'] else 'q3',
'avg' if stats['avg'] > stats['q3'] else 'q3',
'max'
])
a2x.set_xticklabels(['min', 'q1', 'med', 'q3', 'max'])
# Figure 3: scores line chart
fig3, a3x = plt.subplots()
a3x.plot(scores)
a3x.grid(True)
a3x.set_ylabel('Score')
a3x.set_xlabel('Candidate')
a1x.set_title('Generation: {0}'.format(gen))
plt.show()
stats1, scores1 = generation_stats(scored_population)
print json.dumps(stats1, indent=2)
plot_stats(stats1, scores1)
def selection(generation, stats):
parents = []
q1 = filter(lambda c: c[1] < stats['q1'], generation)
q2 = filter(lambda c: stats['q1'] <= c[1] < stats['med'], generation)
q3 = filter(lambda c: stats['med'] <= c[1] < stats['q3'], generation)
q4 = filter(lambda c: stats['q3'] <= c[1], generation)
for q in [q1, q2, q3, q4]:
parents += q[:len(q) / 2]
return parents
s1 = selection(scored_population, stats1)
show_scored_population(s1)
stats_s1, scores_s1 = generation_stats(s1)
print json.dumps(stats_s1, indent=2)
plot_stats(stats_s1, scores_s1)
from redbaron import RedBaron, BinaryOperatorNode, FloatNode
red = RedBaron(s1[0][0])
print 'Example:', s1[0][0]
red.help()
def gen_child(parents_dna):
# reimport things because it will be used in a IPython parallel engine
from redbaron import RedBaron
import random
tmpred = RedBaron(parents_dna[0])
child = RedBaron(parents_dna[1])
# Choose random operator from parent a
operators = tmpred.find_all('binary_operator')
op = random.choice(operators[1:]) # we don't want the root operator
# Find and remove all leafs from child
nbs = [float(nb.value) for nb in op.find_all('float')]
# mark the nodes as empty
for node in child.find_all('float'):
if float(node.value) in nbs:
if node.parent.first is node:
node.parent.first.replace('None')
elif node.parent.second is node:
node.parent.second.replace('None')
# keep going until nothing is done (which means there is no more empty nodes)
reparented = True
while reparented:
reparented = False
for node in child.find_all('binary_operator'):
if node.first.value == 'None' and node.second.value == 'None':
reparent = 'None'
elif node.first.value == 'None':
reparent = node.second.dumps()
elif node.second.value == 'None':
reparent = node.first.dumps()
else:
continue
if node.parent.parent is None:
node.replace(reparent)
reparented = True
elif node.parent.first is node:
node.parent.first.replace(reparent)
reparented = True
elif node.parent.second is node:
node.parent.second.replace(reparent)
reparented = True
# Combine parents DNA with a mutation: a random operator
notint = True
while notint:
combine = '{0} {2} {1}'.format(
op.dumps(),
child[0].dumps(),
random.choice(['+', '-', '*', '/'])
)
res = eval(combine)
if res == int(res):
notint = False
child[0].replace(combine)
print '.'
return child.dumps()
child = gen_child((s1[0][0], s1[-1][0]))
test_child = eval(child)
assert test_child == int(test_child)
print child, '=', test_child
def make_couples(selected):
parents = []
for _ in range(4):
_set = list(selected)
while len(_set) > 1:
i = random.randrange(0, len(_set))
a = _set.pop(i)
i = random.randrange(0, len(_set))
b = _set.pop(i)
parents.append((a[0], b[0]))
return parents
def new_population(selected, population_size):
population = []
couples = make_couples(selected)
while len(population) < population_size:
parents = random.choice(couples)
population.append(gen_child(parents))
return population
def next_generation(population, gen):
scored_population = sorted([(dna, score(dna)) for dna in population], key=lambda item: item[1])
show_scored_population(scored_population)
for i in range(len(scored_population)):
if scored_population[i][1] == 0:
print 'WIN:', i, ':', scored_population[i][0]
else:
break
stats, scores = generation_stats(scored_population)
print json.dumps(stats, indent=2)
plot_stats(stats, scores, gen)
selected = selection(scored_population, stats)
return new_population(selected, len(population)), gen + 1
def main(popsize=1000, nsteps=100):
pop, gen = first_generation(popsize), 0
while nsteps > 0:
pop, gen = next_generation(pop, gen)
nsteps -= 1
main(popsize=10, nsteps=30)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, we need to generate our population, this means for each candidate we will generate random DNA
Step2: Now, we would like to score our population
Step3: If we are lucky, we have found the solution on the first generation. But it is unlikely on more complex problems.
Step4: Now that we have our first generation and we know what it looks like, we can proceed to selection.
Step5: Combination and mutation, creating the next generation
Step6: Now we need to parse a pair of candidates, in order to return a new one. How do we proceed ?
Step7: Now that we are able to generate a child from two parents, let's make our couples
Step8: And generate our new population
Step9: The whole process
|
494 | <ASSISTANT_TASK:>
Python Code:
%%capture
!python -m pip install iree-compiler iree-runtime iree-tools-tflite -f https://github.com/google/iree/releases/latest
!pip3 install --extra-index-url https://google-coral.github.io/py-repo/ tflite_runtime
import numpy as np
import urllib.request
import pathlib
import tempfile
import re
import tflite_runtime.interpreter as tflite
from iree import runtime as iree_rt
from iree.compiler import compile_str
from iree.tools import tflite as iree_tflite
ARTIFACTS_DIR = pathlib.Path(tempfile.gettempdir(), "iree", "colab_artifacts")
ARTIFACTS_DIR.mkdir(parents=True, exist_ok=True)
#@title Download pretrained text classification model
MODEL_URL = "https://storage.googleapis.com/download.tensorflow.org/models/tflite/text_classification/text_classification_v2.tflite"
urllib.request.urlretrieve(MODEL_URL, ARTIFACTS_DIR.joinpath("text_classification.tflite"))
#@title Extract model vocab and label metadata
!unzip -o -d {ARTIFACTS_DIR} {ARTIFACTS_DIR}/text_classification.tflite
# Load the vocab file into a dictionary. It contains the most common 1,000
# words in the English language, mapped to an integer.
vocab = {}
with open(ARTIFACTS_DIR.joinpath("vocab.txt")) as vocab_file:
for line in vocab_file:
(key, val) = line.split()
vocab[key] = int(val)
# Text will be labeled as either 'Positive' or 'Negative'.
with open(ARTIFACTS_DIR.joinpath("labels.txt")) as label_file:
labels = label_file.read().splitlines()
#@title Input and output processing
# Input text will be encoded as an integer array of fixed length 256. The
# input sentence will be mapped to integers from the vocab dictionary, and the
# empty array spaces are filled with padding.
SENTENCE_LEN = 256
START = "<START>"
PAD = "<PAD>"
UNKNOWN = "<UNKNOWN>"
def tokenize_input(text):
output = np.empty([1, SENTENCE_LEN], dtype=np.int32)
output.fill(vocab[PAD])
# Remove capitalization and punctuation from the input text.
text_split = text.split()
text_split = [text.lower() for text in text_split]
text_split = [re.sub(r"[^\w\s']", '', text) for text in text_split]
# Prepend <START>.
index = 0
output[0][index] = vocab[START]
index += 1
for word in text_split:
output[0][index] = vocab[word] if word in vocab else vocab[UNKNOWN]
index += 1
return output
def interpret_output(output):
if output[0] >= output[1]:
label = labels[0]
confidence = output[0]
else:
label = labels[1]
confidence = output[1]
print("Label: " + label + "\nConfidence: " + str(confidence))
#@title Text samples
positive_text = "This is the best movie I've seen in recent years. Strongly recommend it!"
negative_text = "What a waste of my time."
print(positive_text)
print(tokenize_input(positive_text))
interpreter = tflite.Interpreter(
model_path=str(ARTIFACTS_DIR.joinpath("text_classification.tflite")))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
def classify_text_tflite(text):
interpreter.set_tensor(input_details[0]['index'], tokenize_input(text))
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
interpret_output(output_data[0])
print("Invoking text classification with TFLite\n")
positive_text = "This is the best movie I've seen in recent years. Strongly recommend it!"
print(positive_text)
classify_text_tflite(positive_text)
print()
negative_text = "What a waste of my time."
print(negative_text)
classify_text_tflite(negative_text)
# Convert TFLite model to TOSA MLIR with IREE's import tool.
IREE_TFLITE_TOOL = iree_tflite.get_tool('iree-import-tflite')
!{IREE_TFLITE_TOOL} {ARTIFACTS_DIR}/text_classification.tflite --o={ARTIFACTS_DIR}/text_classification.mlir
with open(ARTIFACTS_DIR.joinpath("text_classification.mlir")) as mlir_file:
tosa_mlir = mlir_file.read()
# The generated .mlir file could now be saved and used outside of Python, with
# IREE native tools or in apps, etc.
# The model contains very large constants, so recompile a truncated version to print.
!{IREE_TFLITE_TOOL} {ARTIFACTS_DIR}/text_classification.tflite --o={ARTIFACTS_DIR}/text_classification_truncated.mlir --mlir-elide-elementsattrs-if-larger=50
with open(ARTIFACTS_DIR.joinpath("text_classification_truncated.mlir")) as truncated_mlir_file:
truncated_tosa_mlir = truncated_mlir_file.read()
print(truncated_tosa_mlir, end='')
# Compile the TOSA MLIR into a VM module.
compiled_flatbuffer = compile_str(tosa_mlir, input_type="tosa", target_backends=["vmvx"])
vm_module = iree_rt.VmModule.from_flatbuffer(compiled_flatbuffer)
# Register the module with a runtime context.
config = iree_rt.Config("vmvx")
ctx = iree_rt.SystemContext(config=config)
ctx.add_vm_module(vm_module)
invoke_text_classification = ctx.modules.module["main"]
def classify_text_iree(text):
result = invoke_text_classification(tokenize_input(text)).to_host()[0]
interpret_output(result)
print("Invoking text classification with IREE\n")
print(positive_text)
classify_text_iree(positive_text)
print()
print(negative_text)
classify_text_iree(negative_text)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the TFLite model
Step2: Run using TFLite
Step3: Run using IREE
|
495 | <ASSISTANT_TASK:>
Python Code:
!pip install praatio --upgrade
from praatio import textgrid
# Textgrids take no arguments--it gets all of its necessary attributes from the tiers that it contains.
tg = textgrid.Textgrid()
# IntervalTiers and PointTiers take four arguments: the tier name, a list of intervals or points,
# a starting time, and an ending time.
wordTier = textgrid.IntervalTier('words', [], 0, 1.0)
maxF0Tier = textgrid.PointTier('maxF0', [], 0, 1.0)
tg.addTier(wordTier)
tg.addTier(maxF0Tier)
tg.save("empty_textgrid.TextGrid", format="short_textgrid", includeBlankSpaces=False)
import os
from os.path import join
from praatio import textgrid
from praatio import audio
inputPath = join('..', 'examples', 'files')
outputPath = join(inputPath, "generated_textgrids")
if not os.path.exists(outputPath):
os.mkdir(outputPath)
for fn in os.listdir(inputPath):
name, ext = os.path.splitext(fn)
if ext != ".wav":
continue
duration = audio.getDuration(join(inputPath, fn))
wordTier = textgrid.IntervalTier('words', [], 0, duration)
tg = textgrid.Textgrid()
tg.addTier(wordTier)
tg.save(join(outputPath, name + ".TextGrid"), format="short_textgrid", includeBlankSpaces=False)
# Did it work?
for fn in os.listdir(outputPath):
ext = os.path.splitext(fn)[1]
if ext != ".TextGrid":
continue
print(fn)
from os.path import join
from praatio import textgrid
inputFN = join('..', 'examples', 'files', 'mary.TextGrid')
tg = textgrid.openTextgrid(inputFN, includeEmptyIntervals=False) # Give it a file name, get back a Textgrid object
# What tiers are stored in this textgrid?
print(tg.tierNameList)
# It's possible to access the tiers by their position in the TextGrid
# (i.e. the order they were added in)
firstTier = tg.tierDict[tg.tierNameList[0]]
# Or by their names
wordTier = tg.tierDict['word']
print(firstTier)
# I just want the labels from the entryList
labelList = [entry[2] for entry in wordTier.entryList]
print(labelList)
# Get the duration of each interval
# (in this example, an interval is a word, so this outputs word duration)
durationList = []
for start, stop, _ in wordTier.entryList:
durationList.append(stop - start)
print(durationList)
# Print out each interval on a separate line
from os.path import join
from praatio import textgrid
inputFN = join('..', 'examples', 'files', 'mary.TextGrid')
tg = textgrid.openTextgrid(inputFN, includeEmptyIntervals=False)
tier = tg.tierDict['word']
for start, stop, label in tier.entryList:
print("From:%f, To:%f, %s" % (start, stop, label))
# Sometimes you just want to have two copies of something
newTG = tg.new()
newTier = tier.new()
# emptiedTier and renamedTier are the same as tier, except for the parameter specified in .new()
emptiedTier = tier.new(entryList=[]) # Remove all entries in the entry list
renamedTier = tier.new(name="lexical items") # Rename the tier to 'lexical items'
# Let's reload everything
from os.path import join
from praatio import textgrid
inputFN = join('..', 'examples', 'files', 'mary.TextGrid')
tg = textgrid.openTextgrid(inputFN, includeEmptyIntervals=False)
# Ok, what were our tiers?
print(tg.tierNameList)
# We've already seen how to add a new tier to a TextGrid
# Here we add a new tier, 'utterance', which has one entry that spans the length of the textgrid
utteranceTier = textgrid.IntervalTier(name='utterance', entryList=[('0', tg.maxTimestamp, 'mary rolled the barrel'), ],
minT=0, maxT=tg.maxTimestamp)
tg.addTier(utteranceTier)
print(tg.tierNameList)
# Maybe we decided that we don't need the phone tier. We can remove it using the tier's name.
# The remove function returns the removed tier, in case you want to do something with it later.
wordTier = tg.removeTier('word')
print(tg.tierNameList)
print(wordTier)
# We can also replace one tier with another like so (preserving the order of the tiers)
tg.replaceTier('phone', wordTier)
print(tg.tierNameList)
# Or rename a tier
tg.renameTier('word', 'lexical items')
print(tg.tierNameList)
# Let's start by observing the pre-cropped entry lists
wordTier = tg.tierDict['lexical items']
print(wordTier.entryList)
utteranceTier = tg.tierDict['utterance']
print(utteranceTier.entryList)
print("Start time: %f" % wordTier.minTimestamp)
print("End time: %f" % utteranceTier.maxTimestamp)
# Now let's crop and see what changes!
# Crop takes four arguments
# If mode is 'truncated', all intervals contained within the crop region will appear in the
# returned TG--however, intervals that span the crop region will be truncated to fit within
# the crop region
# If rebaseToZero is True, the times in the textgrid are recalibrated with the start of
# the crop region being 0.0s
croppedTG = tg.crop(0.5, 1.0, mode='truncated', rebaseToZero=True)
wordTier = croppedTG.tierDict['lexical items']
print(wordTier.entryList)
utteranceTier = croppedTG.tierDict['utterance']
print(utteranceTier.entryList)
print("Start time: %f" % croppedTG.minTimestamp)
print("End time: %f" % croppedTG.maxTimestamp)
# If rebaseToZero is False, the values in the cropped textgrid will be what they were in the
# original textgrid (but without values outside the crop region)
# Compare the output here with the output above
croppedTG = tg.crop(0.5, 1.0, mode='truncated', rebaseToZero=False)
wordTier = croppedTG.tierDict['lexical items']
print(wordTier.entryList)
utteranceTier = croppedTG.tierDict['utterance']
print(utteranceTier.entryList)
print("Start time: %f" % croppedTG.minTimestamp)
print("End time: %f" % croppedTG.maxTimestamp)
# If mode is 'strict', only wholly contained intervals will be included in the output.
# Compare this with the previous result
croppedTG = tg.crop(0.5, 1.0, mode='strict', rebaseToZero=False)
# Let's start by observing the pre-cropped entry lists
wordTier = croppedTG.tierDict['lexical items']
print(wordTier.entryList)
utteranceTier = croppedTG.tierDict['utterance']
print(utteranceTier.entryList)
print("Start time: %f" % croppedTG.minTimestamp)
print("End time: %f" % croppedTG.maxTimestamp)
# If mode is 'lax', partially contained intervals will be wholly contained in the outpu.
# Compare this with the previous result
croppedTG = tg.crop(0.5, 1.0, mode='lax', rebaseToZero=False)
# Let's start by observing the pre-cropped entry lists
wordTier = croppedTG.tierDict['lexical items']
print(wordTier.entryList)
utteranceTier = croppedTG.tierDict['utterance']
print(utteranceTier.entryList)
print("Start time: %f" % croppedTG.minTimestamp)
print("End time: %f" % croppedTG.maxTimestamp)
# Let's reload everything, just as before
from os.path import join
from praatio import textgrid
inputFN = join('..', 'examples', 'files', 'mary.TextGrid')
tg = textgrid.openTextgrid(inputFN, includeEmptyIntervals=False)
# Ok, what are our tiers?
print(tg.tierNameList)
# The entryList, which holds the tier point or interval data, is the heart of the tier.
# Recall the 'new()' function, if you want to modify all of the entries in a tier at once
wordTier = tg.tierDict['word']
newEntryList = [(start, stop, 'bloop') for start, stop, label in wordTier.entryList]
newWordTier = wordTier.new(entryList=newEntryList)
print(wordTier.entryList)
print(newWordTier.entryList)
# If, however, we only want to modify a few entries, there are some functions for doing so
# deleteEntry() takes an entry and deletes it
maryEntry = wordTier.entryList[0]
wordTier.deleteEntry(maryEntry)
print(wordTier.entryList)
# insertEntry() does the opposite of deleteEntry.
wordTier.insertEntry(maryEntry)
print(wordTier.entryList)
print()
# you can also set the collision code to 'merge' or 'replace' to set the behavior in the event an entry already exists
# And the collisionReportingMode can be used to have warnings printed out when a collision occurs
wordTier.insertEntry((maryEntry[0], maryEntry[1], 'bob'), collisionMode='replace', collisionReportingMode='silence')
print(wordTier.entryList)
# Let's say we have some time series data
# Where the data is organized as [(timeV1, dataV1a, dataV1b, ...), (timeV2, dataV2a, dataV2b, ...), ...]
dataValues = [(0.1, 15), (0.2, 98), (0.3, 105), (0.4, 210), (0.5, ),
(0.6, 154), (0.7, 181), (0.8, 110), (0.9, 203), (1.0, 240)]
# Often times when working with such data, we want to know which data
# corresponds to certain speech events
# e.g. what was the max pitch during the stressed vowel of a particular word etc...
intervalDataList = wordTier.getValuesInIntervals(dataValues)
# The returned list is of the form [(interval1, )]
for interval, subDataList in intervalDataList:
print(interval)
print(subDataList)
print()
bobWordIList = wordTier.find('bob')
bobWord = wordTier.entryList[bobWordIList[0]]
print(bobWord)
import os
from os.path import join
from praatio import textgrid
from praatio import pitch_and_intensity
# For pitch extraction, we need the location of praat on your computer
#praatEXE = r"C:\Praat.exe"
praatEXE = "/Applications/Praat.app/Contents/MacOS/Praat"
# The 'os.getcwd()' is kindof a hack. With jypter __file__ is undefined and
# os.getcwd() seems to default to the praatio installation files.
rootPath = join(os.getcwd(), '..', 'examples', 'files')
pitchPath = join(rootPath, "pitch_extraction", "pitch")
fnList = [('mary.wav', 'mary.TextGrid'),
('bobby.wav', 'bobby_words.TextGrid')]
# The names of interest -- in an example working with more data, this would be more comprehensive
nameList = ['mary', 'BOBBY', 'lisa', 'john', 'sarah', 'tim', ]
outputList = []
for wavName, tgName in fnList:
pitchName = os.path.splitext(wavName)[0] + '.txt'
tg = textgrid.openTextgrid(join(rootPath, tgName), includeEmptyIntervals=False)
# 1 - get pitch values
pitchList = pitch_and_intensity.extractPitch(join(rootPath, wavName),
join(pitchPath, pitchName),
praatEXE, 50, 350,
forceRegenerate=True)
# 2 - find the intervals where a name was spoken
nameIntervals = []
targetTier = tg.tierDict['word']
for name in nameList:
findMatches = targetTier.find(name)
for i in findMatches:
nameIntervals.append(targetTier.entryList[i])
# 3 - isolate the relevant pitch values
matchedIntervals = []
intervalDataList = []
for entry in nameIntervals:
start, stop, label = entry
croppedTier = targetTier.crop(start, stop, "truncated", False)
intervalDataList = croppedTier.getValuesInIntervals(pitchList)
matchedIntervals.extend(intervalDataList)
# 4 - find the maximum value
for interval, subDataList in intervalDataList:
pitchValueList = [pitchV for timeV, pitchV in subDataList]
maxPitch = max(pitchValueList)
outputList.append((wavName, interval, maxPitch))
# Output results
for name, interval, value in outputList:
print((name, interval, value))
# Let's reload everything
from os.path import join
from praatio import textgrid
# We'll use a special textgrid for this purpose
inputFN = join('..', 'examples', 'files', 'damon_set_test.TextGrid')
tg = textgrid.openTextgrid(inputFN, includeEmptyIntervals=False)
# Ok, what are our tiers?
print(tg.tierNameList)
# Let's take set operations between these two tiers
syllableTier = tg.tierDict['tonicSyllable']
errorTier = tg.tierDict['manually_labeled_pitch_errors']
print(syllableTier.entryList)
print(errorTier.entryList)
# Set difference -- the entries that are not in errorTier are kept
diffTier = syllableTier.difference(errorTier)
diffTier = diffTier.new(name="different")
print(diffTier.entryList)
# Set intersection -- the overlapping regions between the two tiers are kept
interTier = syllableTier.intersection(errorTier)
interTier = interTier.new(name="intersection")
print(interTier.entryList)
# Set union -- the two tiers are merged
unionTier = syllableTier.union(errorTier)
unionTier = unionTier.new(name="union")
print(unionTier.entryList)
outputFN = join('..', 'examples', 'files', 'damon_set_test_output.TextGrid')
setTG = textgrid.Textgrid()
for tier in [syllableTier, errorTier, diffTier, interTier, unionTier]:
setTG.addTier(tier)
setTG.save(outputFN, format="short_textgrid", includeBlankSpaces=True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <hr>
Step2: <a id="example_create_blank_textgrids">
Step3: Bravo! You've saved your colleagues the tedium of creating empty textgrids for each wav file from scratch and removed one vector of human error from your workflow.
Step4: <hr>
Step5: Ok, so with the TextGrid, we got a Tier. What happens next? Most of the time, you'll be accessing the intervals or points stored in the tier. These are stored in the entryList.
Step6: I use this idiom--open textgrid, get target tier, and forloop through the entryList--on a regular basis. For clarity, here the whole idiom is presented in a concise example
Step7: <hr>
Step8: <hr>
Step9: The above featured functions are perhaps the most useful functions in praatio. But there are some other functions which I'll mention briefly here.
Step10: <hr>
Step11: <hr>
Step12: <hr>
Step13: <hr>
Step14: <hr>
Step15: That output might be a little hard to visualize. Here is what the output looks like in a textgrid
|
496 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'hadgem3-gc31-hh', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
497 | <ASSISTANT_TASK:>
Python Code:
# define base values and measurements
v1_s = 0.500
v1_sb1 = 1.800
v1_sb2 = 1.640
v1_m = np.mean([0.47, 0.46, 0.46, 0.46, 0.46, 0.47, 0.46, 0.46, 0.46, 0.46, 4.65 / 10]) * 1e-3
v1_T = np.mean([28.68 / 10, 28.91 / 10])
v1_cw = 0.75
v1_cw_u = 0.08
v1_A = 4*1e-6
v1_pl = 1.2041
def air_resistance(s, v):
k = v1_cw * v1_A * v1_pl / 2 / v1_m
v0 = v / (1 - k * s)
return v0
# Evaluate Data
# Read Data
v1_df = pd.read_csv('data/ballistisch.csv')
v1_M = 0.09836
v1_l = 1.973
v1_g = 9.80705
# Plot only one graph of the linear fit
df = pd.read_csv('data/ballistsisch_0.csv')
slope, intercept, r, p, sem = stats.linregress(df['measurement'] + 0.25, df['x'])
n = np.linspace(0, 21, 100)
ax = df.plot(kind='scatter', x='measurement', y='x', label='gemessener Ausschlag')
plt.plot(n, [i * slope + intercept for i in n], label='linearer Fit', axes=ax)
plt.xlabel('Messung')
plt.ylabel('x [m]')
plt.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.2)
plt.close(ax.figure)
figure = PrettyFigure(ax.figure, label='fig:luftwiderstand_fit', caption='Lineare Regression zur bestimmung des maximalen Ausschlages des Pendels, falls keinerlei Unidealitäten dieses beeinflussen.')
figure.show()
# Calculate mean velocity in air
v1_df['v_flug'] = pd.Series(v1_s / v1_df['t'])
# Calculate all x from the measurements
v1_x = []
for n in range(10):
df = pd.read_csv('data/ballistsisch_{}.csv'.format(n))
slope, intercept, r, p, sem = stats.linregress(df['measurement'] + 0.25, df['x'])
v1_x.append(intercept)
v1_df['x'] = pd.Series(v1_x)
# Calculate all v_B at impact
v1_k = v1_g / (2 * math.pi) * (v1_M + v1_m) / v1_m / v1_l * v1_T
v1_b = 1 - 3 / 8 * (v1_df['x'] / v1_l)**2
v1_df['v_ballistic'] = pd.Series(v1_k * v1_df['x'] * v1_b)
v1_mean = v1_df.mean()
# Plot calculated velocities
ax = v1_df.plot(kind='scatter', x='measurement', y='v_flug', label='$v_F$: errechnete Geschwindigkeit mit Flugzeitmethode')
plt.scatter(v1_df['measurement'], v1_df['v_ballistic'], label='$v_B$: errechnete Geschwindigkeit mit ballistischer Methode', color='red', axes=ax)
plt.axhline(y=v1_mean['v_flug'], axes=ax, color='blue', label='Mittelwert von $v_F$')
plt.axhline(y=v1_mean['v_ballistic'], axes=ax, color='red', label='Mittelwert von $v_B$')
plt.xlabel('Messung')
plt.ylabel('$v[\\frac{m}{s}]$')
#plt.ylim([150, 160])
plt.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.2)
plt.close(ax.figure)
figure = PrettyFigure(ax.figure, label='fig:v_ballistisch', caption='Aufprallgeschwindigkeiten der Kugel durch Ballistische Methode errechnet, im Vergleich zu den jeweiligen Geschwindigkeiten, welche durch die Flugzeitmethode berechnet wurden.')
figure.show()
# Luftwiderstandskorrektur single and mean calculation
v1_flug_muendung = pd.Series(list(map(partial(air_resistance, v1_s / 2), v1_df['v_flug'])))
v1_ballistic_muendung = pd.Series(list(map(partial(air_resistance, v1_sb1), v1_df['v_ballistic'])))
v1_a = v1_flug_muendung / v1_ballistic_muendung
v1_a_mean = np.mean(v1_a)
v1_a_sem = stats.sem(v1_a)
v1_df['a'] = v1_a
v1_df['v_flug_muendung'] = v1_flug_muendung
v1_df['v_ballistic_muendung'] = v1_ballistic_muendung
v1_mean = v1_df.mean()
ax = v1_df.plot(kind='scatter', x='measurement', y='v_flug_muendung', label='$v_{0,F}$')
plt.scatter(v1_df['measurement'], v1_df['v_ballistic_muendung'], label='$v_{0,B}$', color='red', axes=ax)
plt.axhline(y=v1_mean['v_flug_muendung'], axes=ax, color='blue', label='Mittelwert von $v_F$')
plt.axhline(y=v1_mean['v_ballistic_muendung'], axes=ax, color='red', label='Mittelwert von $v_B$')
plt.ylabel('$v_0 [\\frac{m}{s}]$')
plt.xlabel('Messung')
plt.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.2)
plt.close(ax.figure)
figure = PrettyFigure(ax.figure, label='fig:laufzeiten_luft_v_balllistisch_mean_muendung', caption='Die Geschwindigkeiten errechnet mit der Flugzeit- und der ballistischen Methode an Mündung im Vergleich.')
figure.show()
# Evaluate Data
# Calculate inertia
v2_d = np.array([92e-3, 91e-3])
v2_m = np.array([np.mean([15.86, 15.88])*1e-3, np.mean([42.03, 42.13])*1e-3])
# Inertia
v2_J = v2_m * v2_d**2
v2_J3 = 7.4e-5
v2_J3_u = 0.1e-5
# Read Data
v2_df = pd.read_csv('data/drehstoss.csv')
v2_df['omega'] = math.pi / v2_df['T_2']
v2_df['v_drehstoss'] = v2_df['omega'] * ((v2_J[v2_df['m']] * 2 + v2_J3) / (v2_d[v2_df['m']] * v1_m) + v2_d[v2_df['m']])
v2_df['v_flug'] = v1_s / v2_df['t']
v2_mean = v2_df.mean()
ax = v2_df.plot(kind='scatter', x='measurement', y='v_drehstoss', label='$v_{D,k}$')
plt.scatter(v2_df['measurement'], v2_df['v_flug'], label='$v_{F}$', color='red', axes=ax)
plt.axhline(y=v2_mean['v_drehstoss'], axes=ax, color='blue', label='Mittelwert von $v_F$')
plt.axhline(y=v2_mean['v_flug'], axes=ax, color='red', label='Mittelwert von $v_D$')
plt.ylabel('$v [\\frac{m}{s}]$')
plt.xlabel('Messung')
plt.ylim([150, 160])
plt.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.2)
plt.close(ax.figure)
figure = PrettyFigure(ax.figure, label='fig:v_luft_v_drehstoss', caption='Die Geschwindigkeiten errechnet mit der Flugzeit- und der Drehstossmethode mit je fünf Messungen beider Gewichten im Vergleich.')
figure.show()
# Luftwiderstandskorrektur single and mean calculation
v2_mean = v2_df.mean()
v2_sem = v2_df.sem()
v2_df['v_flug_muendung'] = pd.Series(list(map(partial(air_resistance, v1_s / 2), v2_df['v_flug'])))
v2_df['v_drehstoss_muendung'] = pd.Series(list(map(partial(air_resistance, v1_sb1), v2_df['v_drehstoss'])))
v2_df['a'] = v2_df['v_flug_muendung'] / v2_df['v_drehstoss_muendung']
v2_mean = v2_df.mean()
ax = v2_df.plot(kind='scatter', x='measurement', y='v_flug_muendung', label='$v_{0,F}$')
plt.scatter(v2_df['measurement'], v2_df['v_drehstoss_muendung'], label='$v_{0,D}$', color='red', axes=ax)
plt.axhline(y=v2_mean['v_flug_muendung'], axes=ax, color='blue', label='Mittelwert von $v_F$')
plt.axhline(y=v2_mean['v_drehstoss_muendung'], axes=ax, color='red', label='Mittelwert von $v_D$')
plt.ylabel('$v_0 [\\frac{m}{s}]$')
plt.xlabel('Messung')
plt.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.2)
plt.close(ax.figure)
figure = PrettyFigure(ax.figure, label='fig:laufzeiten_luft_v_drehstoss_mean', caption='Die Geschwindigkeiten errechnet mit der Flugzeit- und der Drehstossmethode an Mündung im Vergleich.')
figure.show()
# Plot different methods with sem
v1_mean = v1_df.mean()
v1_sem = v1_df.sem()
v1_flug_muendung_mean = air_resistance(v1_s / 2, v1_mean['v_flug'])
v1_flug_muendung_sem = air_resistance(v1_s / 2, v1_sem['v_flug'])
v1_ballistic_muendung_mean = air_resistance(v1_sb1, v1_mean['v_ballistic'])
v1_ballistic_muendung_sem = air_resistance(v1_sb1, v1_sem['v_ballistic'])
x = [1, 4, 7, 10]
y = [
v1_mean['v_flug'],
v1_mean['v_ballistic'],
v1_flug_muendung_mean,
v1_ballistic_muendung_mean
]
e = [
v1_sem['v_flug'],
v1_sem['v_ballistic'],
v1_flug_muendung_sem,
v1_ballistic_muendung_sem
]
labels = ['$v_F$', '$v_B$', '$v_{0,F}$', '$v_{0,B}$']
fig = plt.figure()
plt.errorbar(x, y, yerr=e, fmt='o')
plt.xlim([0, 11])
#plt.ylim([150, 160])
plt.xticks(x, labels, rotation='35')
plt.ylabel('$v_0 [\\frac{m}{s}]$')
plt.text(0.01, 0.7,
'''$v_F $: Flugzeitmethode
$v_B $: Ballistische Methode
$v_{0,F}$: Mündungsgeschwindigkeit Flugzeitmethode
$v_{0,B}$: Mündungsgeschwindigkeit ballistische Methode'''
, transform = ax.transAxes, backgroundcolor='white', va='bottom')
plt.close(fig)
figure = PrettyFigure(fig, label='fig:laufzeiten_luft_v_balllistisch', caption='Die mittleren Geschwindigkeiten der ballistischen und Flugzeitmethode im Vergleich mit ihren respektiven mittleren Mündungsgeschwindigkeiten.')
figure.show()
# Calculate systematic error
v1_T_u = 3e-2
v1_x_u = 5e-3
v1_l_u = 5e-3
v1_mean = v1_df.mean()
v1_sem = v1_df.sem()
v1_k1 = v1_g / (2 * math.pi) * (v1_M + v1_m) / v1_m * v1_mean['x'] / v1_l
v1_b1 = 1 - 3 / 8 * (v1_mean['x'] / v1_l)**2
v1_k2 = v1_g / (2 * math.pi) * (v1_M + v1_m) / v1_m * v1_T / v1_l
v1_b2 = 1 - 9 / 8 * (v1_mean['x'] / v1_l)**2
v1_k3 = v1_g / (2 * math.pi) * (v1_M + v1_m) / v1_m * v1_T * v1_mean['x'] / v1_l**2
v1_b3 = -1 + 9 / 8 * (v1_mean['x'] / v1_l)**2
v1_systematic_error = math.sqrt((v1_k1 * v1_b1 * v1_T_u)**2
+ (v1_k2 * v1_b2 * v1_x_u)**2
+ (v1_k3 * v1_b3 * v1_l_u)**2)
# Plot different methods with sem
x = [1, 4]
y = [
v1_mean['v_flug'],
v1_mean['v_ballistic']
]
e = [
v1_sem['v_flug'],
v1_systematic_error
]
labels = ['$v_F$', '$v_B$']
fig = plt.figure()
plt.errorbar(x, y, yerr=e, fmt='o')
plt.xlim([0, 5])
#plt.ylim([150, 160])
plt.xticks(x, labels, rotation='35')
plt.ylabel('$v_0 [\\frac{m}{s}]$')
plt.text(-0.026, 0.76,
'''$v_F $: Flugzeitmethode
$v_B $: Ballistische Methode'''
, transform = ax.transAxes, backgroundcolor='white', va='bottom')
plt.close(fig)
figure = PrettyFigure(fig, label='fig:laufzeiten_luft_v_balllistisch_systematischer_fehler', caption='Die systematischen Fehler der mittleren Geschwindigkeiten der ballistischen und Flugzeitmethode im Vergleich.')
figure.show()
# Plot different methods with sem
v2_mean = v2_df.mean()
v2_sem = v2_df.sem()
v2_flug_muendung_mean = air_resistance(v1_s / 2, v2_mean['v_flug'])
v2_flug_muendung_sem = air_resistance(v1_s / 2, v2_sem['v_flug'])
v2_drehstoss_muendung_mean = air_resistance(v1_sb2, v2_mean['v_drehstoss'])
v2_drehstoss_muendung_sem = air_resistance(v1_sb2, v2_sem['v_drehstoss'])
x = [1, 4, 7, 10]
y = [
v2_mean['v_flug'],
v2_mean['v_drehstoss'],
v2_flug_muendung_mean,
v2_drehstoss_muendung_mean
]
e = [
v2_sem['v_flug'],
v2_sem['v_drehstoss'],
v2_flug_muendung_sem,
v2_drehstoss_muendung_sem
]
labels = ['$v_F$', '$v_D$', '$v_{0,F}$', '$v_{0,D}$']
fig = plt.figure()
plt.errorbar(x, y, yerr=e, fmt='o')
plt.xlim([0, 11])
plt.ylim([150, 160])
plt.xticks(x, labels, rotation='35')
plt.ylabel('$v_0 [\\frac{m}{s}]$')
plt.text(-0.026, 0.7,
'''$v_F $: Flugzeitmethode
$v_D $: Drehstossmethode
$v_{0,F}$: Mündungsgeschwindigkeit Flugzeitmethode
$v_{0,D}$: Mündungsgeschwindigkeit Drehstossmethode'''
, transform = ax.transAxes, backgroundcolor='white', va='bottom')
plt.close(fig)
figure = PrettyFigure(fig, label='fig:laufzeiten_luft_v_drehstoss', caption='Die mittleren Geschwindigkeiten der Drehstoss- und Flugzeitmethode im Vergleich mit ihren respektiven mittleren Mündungsgeschwindigkeiten.')
figure.show()
# Calculate systematic error
v2_d_u = 0.5e-3
v2_T_u = 5e-3
v2_mean = v2_df.mean()
v2_sem = v2_df.sem()
v2_k1 = -math.pi / v2_mean['T_2']**2 * ((v2_J[0] * 2 + v2_J3) / v1_m / v2_d[0] + v2_d[0])
v2_k2 = math.pi / v2_mean['T_2'] * (1 - (v2_J[0] * 2 + v2_J3) / v1_m / v2_d[0]**2)
v2_systematic_error = math.sqrt((v2_k1 * v2_d_u)**2 + (v2_k1 * v2_T_u)**2)
# Plot different methods with sem
x = [1, 4]
y = [
v2_mean['v_flug'],
v2_mean['v_drehstoss']
]
e = [
v2_sem['v_flug'],
v2_systematic_error
]
labels = ['$v_F$', '$v_D$']
fig = plt.figure()
plt.errorbar(x, y, yerr=e, fmt='o')
plt.xlim([0, 5])
#plt.ylim([150, 160])
plt.xticks(x, labels, rotation='35')
plt.ylabel('$v_0 [\\frac{m}{s}]$')
plt.text(0.01, 0.76,
'''$v_F $: Flugzeitmethode
$v_D $: Drehstossmethode'''
, transform = ax.transAxes, backgroundcolor='white', va='bottom')
plt.close(fig)
figure = PrettyFigure(fig, label='fig:laufzeiten_luft_v_drehstoss_systematischer_fehler', caption='Die systematischen Fehler der mittleren Geschwindigkeiten der Drehstoss- und Flugzeitmethode im Vergleich.')
figure.show()
# Plot different methods with sem
v1_mean = v1_df.mean()['v_flug_muendung']
v1_sem = v1_df.sem()['v_flug_muendung']
v1_std = v1_df.std()['v_flug_muendung']
ax = v1_df.plot(kind='scatter', x='measurement', y='v_flug_muendung', label='errechnete Geschwindigkeit an der Mündung')
plt.axhline(y=v1_mean, axes=ax, color='red', label='Mittelwert')
plt.axhline(y=v1_mean+v1_sem, axes=ax, color='green', label='Mittelwert ± Fehler')
plt.axhline(y=v1_mean-v1_sem, axes=ax, color='green')
plt.axhline(y=v1_mean+v1_std, axes=ax, color='purple', label='Mittelwert ± Standardabweichung')
plt.axhline(y=v1_mean-v1_std, axes=ax, color='purple')
plt.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.2)
plt.xlabel('Messung')
plt.ylabel('$v_{0,F} [\\frac{m}{s}]$')
plt.close(ax.figure)
figure = PrettyFigure(ax.figure, label='fig:laufzeiten_luft_muendung', caption='Die Mündungsgeschwindigkeiten errechnet mit der Flugzeitmethode. Dazu ihr Mittelwert und der Bereich der Standardabweichung.')
figure.show()
# Show results
import math
from IPython.display import (
display, display_html, display_png, display_svg
)
class PrettyTable(list):
Overridden list class which takes a 2-dimensional list of
the form [[1,2,3],[4,5,6]], and renders HTML and LaTeX Table in
IPython Notebook. For LaTeX export two styles can be chosen.
def __init__(self, initlist=[], label=None, caption='Description missing', extra_header=None, entries_per_column=100, significant_digits=4, print_latex_longtable=True):
self.print_latex_longtable = print_latex_longtable
self.entries_per_column = entries_per_column
self.significant_digits = significant_digits
self.caption = caption
self.label = label
if extra_header is not None:
extra_header = [e.replace('%', '\\%') for e in extra_header]
if len(initlist[0]) != len(extra_header):
raise ValueError("Header list must have same length as data has columns.")
initlist = [extra_header]+list(initlist)
super(PrettyTable, self).__init__(initlist)
def latex_table_tabular(self):
latex = ["\\begin{tabular}"]
latex.append("{"+"|".join((["l"]*len(self[0])))+"}\n")
for row in self:
latex.append(" & ".join(map(format, row)))
latex.append("\\\\ \n")
latex.append("\\end{tabular}")
return ''.join(latex)
def latex_longtable(self):
latex = ["\\begin{longtable}[H]{@{}"]
l = len(self) - 1
li = len(self[0])
latex.append("l" * (li * math.ceil(l / self.entries_per_column)))
latex.append("@{}}\n")
latex.append("\\toprule\\addlinespace\n")
line = (" & ".join(map(format, self[0])))
latex.append((line + " & ") * (math.ceil(l / self.entries_per_column) - 1))
latex.append(line)
latex.append("\\\\\\addlinespace \n")
latex.append("\\midrule\\endhead\n")
rows = []
rows_done = 0
for row in self[1:]:
if rows_done < self.entries_per_column:
if isinstance(row, str):
rows.append(" & ".join(row))
elif isinstance(row, float):
rows.append(" & ".join(map(('{0:.' + str(self.significant_digits) + 'f}').format, row)))
else:
rows.append(" & ".join(map(str, row)))
rows.append("\\\\\\addlinespace \n")
else:
rows[(rows_done % self.entries_per_column) * 2] += " & " + " & ".join(map(('{0:.' + str(self.significant_digits) + 'f}').format, row))
rows_done += 1
latex.extend(rows)
latex.append('\\\\\\bottomrule\\caption{%s}\\\\\\label{%s}' % (self.caption, self.label))
latex.append("\n \\end{longtable}")
return ''.join(latex).replace('%','\\%')
def _repr_html_(self):
html = ["<table style=\"margin:auto;\">"]
for row in self:
html.append("<tr>")
for col in row:
html.append("<td>{0}</td>".format(col))
html.append("</tr>")
html.append("</table>")
html.append('<p style="text-align:center">{0}</p>'.format(self.caption))
return ''.join(html)
def _repr_latex_(self):
if self.print_latex_longtable:
return self.latex_longtable()
else:
return self.latex_table_tabular()
def show(self):
display(self)
v1_mean = v1_df.mean()
v1_sem = v1_df.sem()
v1_std = v1_df.std()
values = [
'mit Flugzeitmethode $v_{F}$',
'an der Mündung mit Flugzeitmethode $v_{0,F}$',
'mit ballistischer Methode $v_{B}$',
'an der Mündung mit ballistischer Methode $v_{0,B}$',
]
means = [
'{0:.2f}'.format(v1_mean['v_flug']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v1_mean['v_flug_muendung']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v1_mean['v_ballistic']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v1_mean['v_ballistic_muendung']) + r'$\frac{m}{s}$'
]
sem = [
'{0:.2f}'.format(v1_sem['v_flug_muendung']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v1_sem['v_flug']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v1_sem['v_ballistic']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v1_sem['v_ballistic_muendung']) + r'$\frac{m}{s}$'
]
syst = [
r'$\footnotetext[1]{Fehler nicht relevanter Grösse und deshalb weggelassen.}$',
r'$\footnotemark[1]$',
'{0:.2f}'.format(v1_systematic_error) + r'$\frac{m}{s}$',
r'$\footnotetext[2]{Fehler wurde der Einfachheit halber auf Absprache mit dem Dozenten weggelassen.}$'
]
rel = [
'{0:.2f}'.format(v1_sem['v_flug'] / v1_mean['v_flug'] * 100) + '%',
'{0:.2f}'.format(v1_sem['v_flug_muendung'] / v1_mean['v_flug_muendung'] * 100) + '%',
'{0:.2f}'.format(v1_systematic_error / v1_mean['v_ballistic'] * 100) + '%',
r'$\footnotemark[2]$'
]
v1_results_tbl = PrettyTable(list(zip(values, means, sem, syst, rel)), label='tab:resultat_ballistisch', caption='Resultate aus dem Versuch der Ballistischen Methode.', extra_header=[
'Projektilgeschwindigkeit',
'Wert',
'stat. Fehler',
'syst. Fehler',
'Relativer Fehler'
], entries_per_column=4)
v1_results_tbl.show()
# Plot of a
ax = v1_df.plot(kind='scatter', x='measurement', y='a', label='errechnetes Verhältnis $a_i = V_{F,i}/V_{B,i}$')
plt.axhline(y=v1_a_mean, axes=ax, label='Mittelwert des Verhältnisses a', color='green')
plt.axhline(y=1, axes=ax, label='Erwarteter Wert 1', color='purple')
plt.xlabel('Messung')
plt.ylabel('a')
plt.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.2)
plt.close(ax.figure)
figure = PrettyFigure(ax.figure, label='fig:laufzeiten_luft_v_balllistisch_mean_ratio', caption='Die mittleren Geschwindigkeiten errechnet mit der Flugzeit- und der ballistischen Methode an Mündung im Verhältnis dargestellt und ihr erwarteter Wert von 1 zum Vergleich.')
figure.show()
# Show results
v2_mean = v2_df.mean()
v2_sem = v2_df.sem()
v2_std = v2_df.std()
values = [
'mit Flugzeitmethode $v_{F}$',
'an der Mündung mit Flugzeitmethode $v_{0,F}$',
'mit Drehstossmethode $v_{B}$',
'an der Mündung mit Drehstossmethode $v_{0,B}$',
]
means = [
'{0:.2f}'.format(v2_mean['v_flug']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v2_mean['v_flug_muendung']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v2_mean['v_drehstoss']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v2_mean['v_drehstoss_muendung']) + r'$\frac{m}{s}$'
]
sem = [
'{0:.2f}'.format(v2_sem['v_flug_muendung']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v2_sem['v_flug']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v2_sem['v_drehstoss']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(v2_sem['v_drehstoss_muendung']) + r'$\frac{m}{s}$'
]
syst = [
r'$\footnotemark[1]$',
r'$\footnotemark[1]$',
'{0:.2f}'.format(v2_systematic_error) + r'$\frac{m}{s}$',
r'$\footnotemark[2]$'
]
rel = [
'{0:.2f}'.format(v2_sem['v_flug'] / v2_mean['v_flug'] * 100) + '%',
'{0:.2f}'.format(v2_sem['v_flug_muendung'] / v2_mean['v_flug_muendung'] * 100) + '%',
'{0:.2f}'.format(v2_systematic_error / v2_mean['v_drehstoss'] * 100) + '%',
r'$\footnotemark[2]$'
]
v2_results_tbl = PrettyTable(list(zip(values, means, sem, syst, rel)), label='tab:resultat_drehstoss', caption='Resultate aus dem Versuch der Drehstossmethode.', extra_header=[
'Projektilgeschwindigkeit',
'Wert',
'stat. Fehler',
'syst. Fehler',
'Relativer Fehler'
], entries_per_column=4)
v2_results_tbl.show()
# Plot ratio a
ax = v2_df.plot(kind='scatter', x='measurement', y='a', label='errechnetes Verhältnis $a_i = V_{F,i}/V_{D,i}$')
plt.axhline(y=v2_mean['a'], axes=ax, label='Mittelwert des Verhältnisses a', color='green')
plt.axhline(y=1, axes=ax, label='Erwarteter Wert 1', color='purple')
plt.xlabel('Messung')
plt.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.2)
plt.close(ax.figure)
figure = PrettyFigure(ax.figure, label='fig:laufzeiten_luft_v_drehstoss_mean_ratio', caption='Die mittleren Geschwindigkeiten errechnet mit der Flugzeit- und der Drehstossmethode an Mündung im Verhältnis dargestellt und ihr erwarteter Wert von 1 zum Vergleich.')
figure.show()
for n in range(10):
df = pd.read_csv('data/ballistsisch_{}.csv'.format(n))
data = PrettyTable(list(zip(df['measurement'], df['x'])), caption='Schuss {0}'.format(n + 1), entries_per_column=5, extra_header=['Messung [1]', 'Ausschlag [m]'])
data.show()
data = PrettyTable(list(zip(v1_df['measurement'], v1_df['t'])), caption='Laufzeiten der einzelnen Schüsse.', entries_per_column=10, extra_header=['Messung [1]', 'Laufzeit [s]'])
data.show()
v2_df['real_m'] = v2_m[v2_df['m']]
data = PrettyTable(list(zip(v2_df['measurement'], v2_df['t'], v2_df['T_2'], v2_df['real_m'])), caption='Messwerte der Drehstossmethode.', entries_per_column=10, extra_header=['Messung [1]', 'Laufzeit [s]', 'Halbe Periode [s]', 'Masse eines Hantelstücks [kg]'])
data.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <center>
Step2: Es resultiert in Grafik \ref{fig
Step3: Vergleich der Messmethoden
Step4: Streuung der Mündungsgeschwindigkeit
Step5: Vergleich der Messmethoden
Step6: Fehlerrechnung
Step7: Wie anhand der Fehlerbalken in Grafik \ref{fig
Step8: Natürlich kann sofort erkannt werden wie gross der systematische Fehler bei der ballistischen Methode ist. Deswegen wurden so unterschiedliche Ergebnisse für die Projektilgeschwindigkeit erhalten. Die Fehlerbereiche überlappen sich nun und es ist somit okay anzunehmen dass die beiden Rechnungen stimmen wobei die Flugzeitmethode um ein Vielfaches genauer ist.
Step9: Wie anhand der Fehlerbalken in Grafik \ref{fig
Step10: Auch hier kann erneut erkannt werden wie gross der systematische Fehler bei der Drehstossmethode ist. Deswegen wurden auch hier so unterschiedliche Ergebnisse für die Projektilgeschwindigkeit erhalten. Die Fehlerbereiche überlappen sich nun und es ist somit okay anzunehmen dass die beiden Rechnungen stimmen wobei die Flugzeitmethode auch hier um ein Vielfaches genauer ist.
Step12: Ballistische Methode
Step13: Die Resultate aus der ballistischen Methode unterscheiden sich kaum von denen der Flugzeitmethode. Um genau {{'{0
Step14: Die Resultate aus der Drehstossmethode unterscheiden sich ebenfalls kaum von denen der Flugzeitmethode. Um {{'{0
Step15: Flugzeitmethode
Step16: Drehstossmethode
|
498 | <ASSISTANT_TASK:>
Python Code:
import datetime
import os
import shutil
import matplotlib.pyplot as plt
import tensorflow as tf
print(tf.__version__)
%%bash
ls *.csv
%%bash
head -5 *.csv
# Determine CSV, label, and key columns
# TODO: Create list of string column headers, make sure order matches.
CSV_COLUMNS = [""]
# TODO: Add string name for label column
LABEL_COLUMN = ""
# Set default values for each CSV column as a list of lists.
# Treat is_male and plurality as strings.
DEFAULTS = []
def features_and_labels(row_data):
Splits features and labels from feature dictionary.
Args:
row_data: Dictionary of CSV column names and tensor values.
Returns:
Dictionary of feature tensors and label tensor.
label = row_data.pop(LABEL_COLUMN)
return row_data, label # features, label
def load_dataset(pattern, batch_size=1, mode=tf.estimator.ModeKeys.EVAL):
Loads dataset using the tf.data API from CSV files.
Args:
pattern: str, file pattern to glob into list of files.
batch_size: int, the number of examples per batch.
mode: tf.estimator.ModeKeys to determine if training or evaluating.
Returns:
`Dataset` object.
# TODO: Make a CSV dataset
dataset = tf.data.experimental.make_csv_dataset()
# TODO: Map dataset to features and label
dataset = dataset.map() # features, label
# Shuffle and repeat for training
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(buffer_size=1000).repeat()
# Take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(buffer_size=1)
return dataset
def create_input_layers():
Creates dictionary of input layers for each feature.
Returns:
Dictionary of `tf.Keras.layers.Input` layers for each feature.
# TODO: Create dictionary of tf.keras.layers.Input for each raw feature
inputs = {}
return inputs
def create_feature_columns():
Creates dictionary of feature columns from inputs.
Returns:
Dictionary of feature columns.
# TODO: Create feature columns for numeric features
feature_columns = {}
# TODO: Add feature columns for categorical features
return feature_columns
def get_model_outputs(inputs):
Creates model architecture and returns outputs.
Args:
inputs: Dense tensor used as inputs to model.
Returns:
Dense tensor output from the model.
# TODO: Create two hidden layers of [64, 32] just in like the BQML DNN
# TODO: Create final output layer
return output
def rmse(y_true, y_pred):
Calculates RMSE evaluation metric.
Args:
y_true: tensor, true labels.
y_pred: tensor, predicted labels.
Returns:
Tensor with value of RMSE between true and predicted labels.
# TODO: Calculate RMSE from true and predicted labels
pass
# Build a simple Keras DNN using its Functional API
def build_dnn_model():
Builds simple DNN using Keras Functional API.
Returns:
`tf.keras.models.Model` object.
# Create input layer
inputs = create_input_layers()
# Create feature columns
feature_columns = create_feature_columns()
# The constructor for DenseFeatures takes a list of numeric columns
# The Functional API in Keras requires: LayerConstructor()(inputs)
dnn_inputs = tf.keras.layers.DenseFeatures(
feature_columns=feature_columns.values()
)(inputs)
# Get output of model given inputs
output = get_model_outputs(dnn_inputs)
# Build model and compile it all together
model = tf.keras.models.Model(inputs=inputs, outputs=output)
# TODO: Add custom eval metrics to list
model.compile(optimizer="adam", loss="mse", metrics=["mse"])
return model
print("Here is our DNN architecture so far:\n")
model = build_dnn_model()
print(model.summary())
tf.keras.utils.plot_model(
model=model, to_file="dnn_model.png", show_shapes=False, rankdir="LR"
)
TRAIN_BATCH_SIZE = 32
NUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset repeats, it'll wrap around
NUM_EVALS = 5 # how many times to evaluate
# Enough to get a reasonable sample, but not so much that it slows down
NUM_EVAL_EXAMPLES = 10000
# TODO: Load training dataset
trainds = load_dataset()
# TODO: Load evaluation dataset
evalds = load_dataset().take(count=NUM_EVAL_EXAMPLES // 1000)
steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=logdir, histogram_freq=1
)
# TODO: Fit model on training dataset and evaluate every so often
history = model.fit()
# Plot
import matplotlib.pyplot as plt
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(["loss", "rmse"]):
ax = fig.add_subplot(nrows, ncols, idx + 1)
plt.plot(history.history[key])
plt.plot(history.history[f"val_{key}"])
plt.title(f"model {key}")
plt.ylabel(key)
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left");
OUTPUT_DIR = "babyweight_trained"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(
OUTPUT_DIR, datetime.datetime.now().strftime("%Y%m%d%H%M%S")
)
tf.saved_model.save(
obj=model, export_dir=EXPORT_PATH
) # with default serving function
print(f"Exported trained model to {EXPORT_PATH}")
!ls $EXPORT_PATH
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Verify CSV files exist
Step2: Create Keras model
Step5: Lab Task #2
Step7: Lab Task #3
Step9: Lab Task #4
Step11: Lab Task #5
Step13: Lab Task #6
Step15: Lab Task #7
Step16: We can visualize the DNN using the Keras plot_model utility.
Step17: Run and evaluate model
Step18: Visualize loss curve
Step19: Save the model
|
499 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
# Create a random array with a lot of zeros
X = np.random.random((10, 5))
print(X)
X[X < 0.7] = 0 # note: fancy indexing
print(X)
from scipy import sparse
# turn X into a csr (Compressed-Sparse-Row) matrix
X_csr = sparse.csr_matrix(X)
print(X_csr)
# convert the sparse matrix to a dense array
print(X_csr.toarray())
# Sparse matrices support linear algebra:
y = np.random.random(X_csr.shape[1])
z1 = X_csr.dot(y)
z2 = X.dot(y)
np.allclose(z1, z2)
# Create an empty LIL matrix and add some items
X_lil = sparse.lil_matrix((5, 5))
for i, j in np.random.randint(0, 5, (15, 2)):
X_lil[i, j] = i + j
print(X_lil)
print(X_lil.toarray())
X_csr = X_lil.tocsr()
print(X_csr)
from scipy.sparse import bsr_matrix
indptr = np.array([0, 2, 3, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
from scipy.sparse import dok_matrix
S = dok_matrix((5, 5), dtype=np.float32)
for i in range(5):
for j in range(i, 5):
S[i,j] = i+j
S.toarray()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The CSR representation can be very efficient for computations, but it is not as good for adding elements.
Step2: Often, once an LIL matrix is created, it is useful to convert it to a CSR format
Step3: There are several other sparse formats that can be useful for various problems
Step4: COO - Coordinate Sparse Matrix
|