Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
---|---|---|
900 | <ASSISTANT_TASK:>
Python Code:
m1 = "hello world!!".encode('hex')
m2 = "other message".encode('hex')
key = "secretkey123!".encode('hex')
print 'm1: {}\nm2: {}\nkey: {}'.format(m1, m2, key)
print len(m1), len(m2), len(key)
ct1 = hex(int(m1, 16) ^ int(key, 16))[2:-1]
ct2 = hex(int(m2, 16) ^ int(key, 16))[2:-1]
print 'ct1: {}\nct2: {}'.format(ct1, ct2)
ctx = hex(int(ct1, 16) ^ int(ct2, 16))[2:-1]
print 'ctx: {}'.format(ctx)
print hex(int(ct1, 16) ^ int(key, 16))[2:-1].decode('hex')
print hex(int(ct2, 16) ^ int(key, 16))[2:-1].decode('hex')
import string
space = ' '.encode('hex')
def attack(crib, ctx):
width = len(crib)
print 'crib in hex: {}\ncrib width: {}\n------------'.format(crib, width)
for i in range(0, len(ctx)):
decoded = hex(int(crib, 16) ^ int(ctx[i:i+width], 16))[2:].decode('hex')
if decoded.isalpha():
print "{}:{}\t".format(i, i+width), '{} XOR {}'.format(crib, ctx[i:i+width]), decoded
attack(space, ctx)
for i in string.uppercase[:10]:
print hex(int(' '.encode('hex'), 16) ^ int(i.encode('hex'), 16))[2:].decode('hex')
crib = ' '.encode('hex')
attack(crib, ctx)
crib = 'World!!'.encode('hex')
attack(crib, ctx)
ct1 = 0x315c4eeaa8b5f8aaf9174145bf43e1784b8fa00dc71d885a804e5ee9fa40b16349c146fb778cdf2d3aff021dfff5b403b510d0d0455468aeb98622b137dae857553ccd8883a7bc37520e06e515d22c954eba5025b8cc57ee59418ce7dc6bc41556bdb36bbca3e8774301fbcaa3b83b220809560987815f65286764703de0f3d524400a19b159610b11ef3e
ct2 = 0x234c02ecbbfbafa3ed18510abd11fa724fcda2018a1a8342cf064bbde548b12b07df44ba7191d9606ef4081ffde5ad46a5069d9f7f543bedb9c861bf29c7e205132eda9382b0bc2c5c4b45f919cf3a9f1cb74151f6d551f4480c82b2cb24cc5b028aa76eb7b4ab24171ab3cdadb8356f
ct3 = 0x32510ba9a7b2bba9b8005d43a304b5714cc0bb0c8a34884dd91304b8ad40b62b07df44ba6e9d8a2368e51d04e0e7b207b70b9b8261112bacb6c866a232dfe257527dc29398f5f3251a0d47e503c66e935de81230b59b7afb5f41afa8d661cb
ct4 = 0x32510ba9aab2a8a4fd06414fb517b5605cc0aa0dc91a8908c2064ba8ad5ea06a029056f47a8ad3306ef5021eafe1ac01a81197847a5c68a1b78769a37bc8f4575432c198ccb4ef63590256e305cd3a9544ee4160ead45aef520489e7da7d835402bca670bda8eb775200b8dabbba246b130f040d8ec6447e2c767f3d30ed81ea2e4c1404e1315a1010e7229be6636aaa
ct5 = 0x3f561ba9adb4b6ebec54424ba317b564418fac0dd35f8c08d31a1fe9e24fe56808c213f17c81d9607cee021dafe1e001b21ade877a5e68bea88d61b93ac5ee0d562e8e9582f5ef375f0a4ae20ed86e935de81230b59b73fb4302cd95d770c65b40aaa065f2a5e33a5a0bb5dcaba43722130f042f8ec85b7c2070
ct6 = 0x32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd2061bbde24eb76a19d84aba34d8de287be84d07e7e9a30ee714979c7e1123a8bd9822a33ecaf512472e8e8f8db3f9635c1949e640c621854eba0d79eccf52ff111284b4cc61d11902aebc66f2b2e436434eacc0aba938220b084800c2ca4e693522643573b2c4ce35050b0cf774201f0fe52ac9f26d71b6cf61a711cc229f77ace7aa88a2f19983122b11be87a59c355d25f8e4
ct7 = 0x32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd90f1fa6ea5ba47b01c909ba7696cf606ef40c04afe1ac0aa8148dd066592ded9f8774b529c7ea125d298e8883f5e9305f4b44f915cb2bd05af51373fd9b4af511039fa2d96f83414aaaf261bda2e97b170fb5cce2a53e675c154c0d9681596934777e2275b381ce2e40582afe67650b13e72287ff2270abcf73bb028932836fbdecfecee0a3b894473c1bbeb6b4913a536ce4f9b13f1efff71ea313c8661dd9a4ce
ct8 = 0x315c4eeaa8b5f8bffd11155ea506b56041c6a00c8a08854dd21a4bbde54ce56801d943ba708b8a3574f40c00fff9e00fa1439fd0654327a3bfc860b92f89ee04132ecb9298f5fd2d5e4b45e40ecc3b9d59e9417df7c95bba410e9aa2ca24c5474da2f276baa3ac325918b2daada43d6712150441c2e04f6565517f317da9d3
ct9 = 0x271946f9bbb2aeadec111841a81abc300ecaa01bd8069d5cc91005e9fe4aad6e04d513e96d99de2569bc5e50eeeca709b50a8a987f4264edb6896fb537d0a716132ddc938fb0f836480e06ed0fcd6e9759f40462f9cf57f4564186a2c1778f1543efa270bda5e933421cbe88a4a52222190f471e9bd15f652b653b7071aec59a2705081ffe72651d08f822c9ed6d76e48b63ab15d0208573a7eef027
ct10 = 0x466d06ece998b7a2fb1d464fed2ced7641ddaa3cc31c9941cf110abbf409ed39598005b3399ccfafb61d0315fca0a314be138a9f32503bedac8067f03adbf3575c3b8edc9ba7f537530541ab0f9f3cd04ff50d66f1d559ba520e89a2cb2a83
target = 0x32510ba9babebbbefd001547a810e67149caee11d945cd7fc81a05e9f85aac650e9052ba6a8cd8257bf14d13e6f0a803b54fde9e77472dbff89d71b57bddef121336cb85ccb8f3315f4b52e301d16e9f52f904
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To demonstrate the OTP, we can decrypt the CTs by XOR'ing with the key (k)
Step2: The assignment asks us to examine what happens when we a space character ' ' with uppercase or lowercase letters
Step3: So, if we XOR a space with a letter we'll get it's opposite case. Thus far we have several candidates, if we didn't know anything about the PTs we'd have to expand our search with each candidate. For brevity's sake I'll expand the search with knowledge of the PT.
Step4: In this demonstration I'll proceed with knowledge of the PT to expand the crib. Note that by XOR'ing with the crib we get the plaintext of the second message
|
901 | <ASSISTANT_TASK:>
Python Code:
%load_ext pandas_td.ipython
c = get_config()
c.InteractiveShellApp.extensions = [
'pandas_td.ipython',
]
%td_databases
%td_tables sample
%td_jobs
%td_use sample_datasets
nasdaq
%%td_presto
select count(1) cnt
from nasdaq
%%td_presto -o df
select count(1) cnt
from nasdaq
df
%%td_presto -O './output.csv'
select count(1) cnt
from nasdaq
start = '2010-01-01'
end = '2011-01-01'
%%td_presto
select count(1) cnt
from nasdaq
where td_time_range(time, '{start}', '{end}')
%%td_presto -n
select count(1) cnt
from nasdaq
where td_time_range(time, '{start}', '{end}')
%%td_presto
select
-- Time-series index (yearly)
td_date_trunc('year', time) time,
-- Same as above
-- td_time_format(time, 'yyyy-01-01') time,
count(1) cnt
from
nasdaq
group by
1
limit
3
%matplotlib inline
%%td_presto --plot
select
-- x-axis
td_date_trunc('year', time) time,
-- y-axis
min(low) low,
max(high) high
from
nasdaq
where
symbol = 'AAPL'
group by
1
%%td_presto -o df
select
-- daily summary
td_date_trunc('day', time) time,
min(low) low,
max(high) high,
sum(volume) volume
from
nasdaq
where
symbol = 'AAPL'
group by
1
# Use resample for local calculation
df['high'].resample('1m', how='max').plot()
%%td_presto --plot
select
-- x-axis
td_date_trunc('month', time) time,
-- columns
symbol,
-- y-axis
avg(close) close
from
nasdaq
where
symbol in ('AAPL', 'MSFT')
group by
1, 2
%%td_presto --pivot
select
td_date_trunc('year', time) time,
symbol,
avg(close) close
from
nasdaq
where
td_time_range(time, '2010', '2015')
and symbol like 'AA%'
group by
1, 2
%%td_presto -v --plot
select
td_date_trunc('year', time) time,
sum(volume) volume
from
nasdaq
group by
1
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It can be loaded automatically by the following configuration in "~/.ipython/profile_default/ipython_config.py"
Step2: After loading the extension, type "%td" and press TAB to list magic functions
Step3: %td_tables returns the list of tables
Step4: %td_jobs returns the list of recently executed jobs
Step5: Use database
Step6: By printing a table name, you can describe column names
Step7: Tab completion is also supported
Step8: The result of the query can be stored in a variable by -o
Step9: Or you can save the result into a file by -O
Step10: Python-style variable substition is supported
Step11: You can preview the actual query by --dry-run (or -n)
Step12: Time-series index
Step13: Plotting
Step14: In practice, however, it is more efficient to execute rough calculation on the server side and store the result into a variable for further analysis
Step15: --plot provides a shortcut way of plotting "pivot charts", as a combination of pivot() and plot(). If the query result contains non-numeric columns, or column names ending with "_id", they are used as columns parameter
Step16: Pivot tables
Step17: Verbose output
|
902 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
df = pd.read_csv ('http://vuduc.org/cse6040/logreg_points_train.csv')
df.head()
sns.lmplot(data=df, x="x_1", y="x_2", hue="label", fit_reg=False,)
points = df.as_matrix (['x_1', 'x_2'])
labels = df['label'].as_matrix ()
n = points.shape[0]
d = points.shape[1]
k = 2
def init_centers(X, k):
# @YOUSE: randomly sample k data points as centers
# should return a (k x d) numpy array
pass
def compute_d2(X, centers):
D = np.empty((n, k))
# @YOUSE: fill D[i,j] as the square euclidean distance from point i to center j
pass
return D
def cluster_points(D):
# @YOUSE: return an (n x 1) numpy array which shows the assigned cluster for each point
# For example, D = [[0.3, 0.2],
# [0.1, 0.5],
# [0.4, 0.2]]
# should return np.array([1,0,1])
pass
def update_centers(X, clustering):
centers = np.empty((k, d))
for i in range(k):
# @YOUSE: compute the new center of cluster i (centers[i, :])
pass
return centers
def WCSS(D):
# @YOUSE: return the objective function value (within-cluster sum of squares)
# For example, D = [[0.3, 0.2],
# [0.1, 0.5],
# [0.4, 0.2]]
# should return 0.2 + 0.1 + 0.2 = 0.5
pass
def has_converged(old_centers, centers):
# @YOUSE: return true if the k center points remain the same
# note that the ordering may be different
pass
def kmeans(X, k):
# @YOUSE: implement the k-means algorithm
# print the objective function value (WCSS) at each iteration until it converges
# return the final clustering
pass
clustering = kmeans(points, k)
df['clustering'] = clustering
sns.lmplot(data=df, x="x_1", y="x_2", hue="clustering", fit_reg=False,)
difference = np.sum(labels != clustering)
error = min(difference, n - difference)
error
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's use the same dataset from the last Lab.
Step2: Note that the labels should not be used in the k-means algorithm. It is only used here as the ground truth for later verification.
Step3: Compute the distance matrix
Step4: Exercise
Step5: Update the center of each cluster
Step6: Calculate the objective function
Step7: Check if k-means has already converged
Step8: Putting it all together
Step9: Plot the clustering result
Step10: Number of misclustered points
|
903 | <ASSISTANT_TASK:>
Python Code:
# import the things we need
from __future__ import print_function, division
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pykalman
import ipywidgets
import IPython
import matplotlib, matplotlib.colors
matplotlib.rcParams['figure.figsize'] = (14.0, 8.0)
%matplotlib inline
from scipy.stats import norm
import scipy.stats
%%javascript
IPython.OutputArea.auto_scroll_threshold = 9999;
OutputArea.prototype._should_scroll = function(){return false};
from noise_cursor import NoiseCursorDemo
# no noise
n = NoiseCursorDemo()
%gui tk
# some noise
n = NoiseCursorDemo(noise=20)
%gui tk
# Creates a simple one-pole IIR smoothing filter,
# with a cutoff set by alpha (closer to 1 is more extreme filtering)
def mk_lowpass(alpha):
state = [0,0]
def update(x,y):
if x==x and y==y: # nan test
state[0] = alpha*state[0] + (1-alpha)*x
state[1] = alpha*state[1] + (1-alpha)*y
return list(state)
return update
# apply filtering to the cursor output
n = NoiseCursorDemo(filter=mk_lowpass(alpha=0.97), noise=30)
%gui tk
# and some mistracks
n = NoiseCursorDemo(filter=mk_lowpass(alpha=0.95), noise=30,
jump=0.05, jump_scale=5000)
%gui tk
# add some tracking losses
n = NoiseCursorDemo(filter=mk_lowpass(alpha=0.95), noise=30,
jump=0.05, jump_scale=5000,
dropout=[0.04, 0.1])
%gui tk
from oneeurofilter import OneEuroFilter
# make a 2D OneEuroFilter function
def mk_oneuro(*args, **kwargs):
# state, which is propagated from time step to time step
filters = [OneEuroFilter(*args, **kwargs),OneEuroFilter(*args, **kwargs)]
state = [0,0]
def update(x,y):
if x==x and y==y: # nan test
state[0] = filters[0](x)
state[1] = filters[1](y)
return list(state)
return update
# The one-euro filter has two critical parameters: mincutoff and beta, which set
# the limit on alpha and the responsiveness adjustment rate, respectively
# it works very well for variable velocity problems, which are very common in cursor tracking
# type problems
n = NoiseCursorDemo(filter=mk_oneuro(freq=1.0, mincutoff=0.01, beta=0.0008), noise=30)
# but with dropout and mistracks, the one euro filter starts to struggle
n = NoiseCursorDemo(filter=mk_oneuro(freq=1.0, mincutoff=0.001, beta=0.001), noise=30,
jump=0.05, jump_scale=5000,
dropout=[0.04, 0.1])
%gui tk
## A selection of different possible shapes of 2D Gaussians
def sigma_plot(sigma):
mx = np.linspace(-5,5,40)
x, y = np.meshgrid(mx,mx)
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
rv = scipy.stats.multivariate_normal([0,0], sigma)
plt.contourf(x, y, rv.pdf(pos))
plt.figure(figsize=(12,12))
plt.subplot(2,2,1)
sigma_plot([[1,0], [0,1]])
plt.title("Unit variance, square")
plt.subplot(2,2,2)
sigma_plot([[0.25,0], [0,0.25]])
plt.title("Smaller variance, square")
plt.subplot(2,2,3)
sigma_plot([[2,0], [0,0.5]])
plt.title("Unequal variance")
plt.subplot(2,2,4)
plt.title("Skewed")
sigma_plot([[6,0], [1.2,0.4]])
plt.suptitle("Various covariance matrices for a 2D Gaussian")
sigma_x = 1
sigma_dx = 0.1
sigma_ddx = 0.001
sigma_a = np.diag([sigma_x, sigma_x, sigma_dx, sigma_dx, sigma_ddx, sigma_ddx])
dt = 0.5 # 1 / frame rate in some time units
dt2 = 0.5 * dt * dt
A = np.array([
[1,0, dt,0, dt2,0],
[0,1, 0,dt, 0,dt2],
[0,0, 1,0, dt,0],
[0,0, 0,1 , 0,dt],
[0,0, 0,0, 1, 0],
[0,0, 0,0, 0, 1]])
print(sigma_a)
sigma_a *= 0.01
print(A)
def simple_simulate_dynamics(A, sigma_a, x=None, n=100):
# given update matrix A and noise matrix sigma_a, produce a trajectory
# using the Kalman filter prediction equations
# start from all zero conditions
if x is None:
x = np.zeros((A.shape[0],))
xs = []
for i in range(n):
# apply update rule
# x_{t+1} = A x_t + N(0, sigma_a)
x = np.dot(A,x) + scipy.stats.multivariate_normal.rvs(cov=sigma_a)
xs.append(x)
return np.array(xs)
# run 10 random walks with these dynamics
def trajectory_realisations(A, sigma_a, n=100, walks=10):
for i in range(walks):
xs = simple_simulate_dynamics(A, sigma_a)
plt.plot(xs[:,0], xs[:,1], '-', markersize=2)
trajectory_realisations(A, sigma_a)
# Just acceleration; smooth trajectories
sigma_a2 = np.diag([0.0, 0.0, 0.0, 0.0, 2e-4, 2e-4])
trajectory_realisations(A, sigma_a2)
# no acceleration, no velocity noise, just position noise
sigma_a3 = np.diag([0.05, 0.05, 0 , 0, 0, 0])
trajectory_realisations(A, sigma_a3)
C = np.array([[1,0,0,0,0,0],
[0,1,0,0,0,0]]).astype(np.float64)
print(C)
sig_c = 15
sigma_c = np.diag([sig_c, sig_c])
print(sigma_c)
xmax, ymax = 400, 400 # screen size
xc, yc = xmax/2, ymax/2 # coordinates of screen centre
mu_0 = np.array([xc, yc, 0, 0, 0, 0])
sigma_vel = 100
sigma_acc = 100
sigma_0 = np.diag([xc/2, yc/2, sigma_vel, sigma_vel, sigma_acc, sigma_acc])
import pykalman
# create a filter with the parameters defined above
kalman_filter = pykalman.KalmanFilter(
transition_matrices = A,
observation_matrices = C,
transition_covariance = sigma_a,
observation_covariance = sigma_c,
initial_state_mean = mu_0,
initial_state_covariance = sigma_0
)
# generate a simple parabolic trajectory, with a bit of noise and some missig
# observations
def gen_path(n):
path = []
cx, cy = 50,50
t = 0
for k in range(n):
t+= 2.5
# noise
nx, ny = np.random.normal(0,3), np.random.normal(0,3)
# drop out
if k>35 and k<65:
obs = [np.nan, np.nan]
else:
obs = [1.5*t+cx+nx,5*t-0.025*t*t+cy+ny]
path.append(obs)
return np.array(path)
path = gen_path(100)
plt.plot(path[:,0], path[:,1], '.')
ax = plt.gca()
ax.invert_yaxis()
# filter-update example
mean, cov = mu_0, sigma_0
for i in range(10):
mean, cov = kalman_filter.filter_update(mean, cov, observation=path[i])
print(mean[:2]) # x,y co-ordinates of mean
# for interactive drawing
import kalman_utils
kalman_utils = reload(kalman_utils)
kalman_utils.run_kalman(path, mu_0, sigma_0, A, C, sigma_a, sigma_c)
%gui tk
glitch_path = gen_path(100)
glitch_path[::10,1] = 0 # every `10th y value set to zero
kalman_utils.run_kalman(glitch_path, mu_0, sigma_0, A, C, sigma_a, sigma_c, frame_time=20)
%gui tk
# The cursor before it has been filtered
test_cursor = NoiseCursorDemo(noise=30,
jump=0.08, jump_scale=2000,
dropout=[0.02, 0.15])
%gui tk
# creates a new Kalman filter with the given parameters
def make_kf(A,sigma_a,C,sigma_C,mu_0,sigma_0,reject_lik=-np.inf):
state = {"mean":mu_0, "cov":sigma_0}
# construct the filter object
kf = pykalman.KalmanFilter(transition_matrices = A, observation_matrices = C,
transition_covariance = sigma_a, observation_covariance = sigma_c,
initial_state_mean = mu_0, initial_state_covariance = sigma_0)
def update(x,y):
# project state into obsevation space, so we can compute
# the log-likelihood of observations directly
pred_obs_mean = np.dot(C, state["mean"])
pred_obs_cov = np.dot(C, np.dot(state["cov"], C.T))
obs_arr = np.array([x,y])
# likelihood of this sample
lik = scipy.stats.multivariate_normal.logpdf(obs_arr, mean=pred_obs_mean,
cov=pred_obs_cov)
# apply likelihood filtering
if x==x and lik==lik and lik>reject_lik: # if x is not NaN
mean, cov = kf.filter_update(state["mean"], state["cov"], observation=[x,y])
else:
# update without observation
mean, cov = kf.filter_update(state["mean"], state["cov"])
state["mean"] = mean
state["cov"] = cov
# return the various parameters
return {"mean":[mean[0], mean[1]], "cov":cov[:2,:2], "lik":lik}
return update
## Modify this cell to adjust KF parameters
## Hints:
# adjust dt, sigma_a, sigma_c and reject_lik
# you can change A or C, but make sure you know what you are doing!
# changing mu_0 and sigma_0 probably won't have much effect, as the
# prior will be forgotten very quickly anyway
# A
dt = 1 # increasing this will speed up all dynamics, and vice versa
dt2 = 0.5 * dt * dt
# the transition matrix
A = np.array([[1,0, dt,0, dt2,0],
[0,1, 0,dt, 0,dt2],
[0,0, 1,0, dt,0],
[0,0, 0,1, 0,dt],
[0,0, 0,0, 1,0],
[0,0, 0,0, 0,1]])
# the process (transition) noise
# sigma_A
sigma_x = 0.1
sigma_dx = 0.1
sigma_ddx = 0.1
sigma_a = np.diag([sigma_x, sigma_x, sigma_dx, sigma_dx, sigma_ddx, sigma_ddx])
# C: the observation matrix, projecting state onto observations
C = np.array([[1,0,0,0,0,0],
[0,1,0,0,0,0]]).astype(np.float64)
# sigma_C: the expected noise in observations
sig_c = 1
sigma_c = np.diag([sig_c, sig_c])
### Prior N(mu_0, Sigma_0)
# mu_0
xmax, ymax = 800, 800 # screen size
xc, yc = xmax/2, ymax/2 # coordinates of screen centre
mu_0 = np.array([xc, yc, 0, 0, 0, 0])
# sigma_0
# just a diagonal matrix
sigma_vel = 1
sigma_acc = 1
sigma_0 = np.diag([xc/2, yc/2, sigma_vel, sigma_vel, sigma_acc, sigma_acc])
# rejection threshold for observations
# if you make this too close to zero (e.g. -5) all observations will be ignored
# if you make it too large, jumps will still get through
# note that you can see this value interactively at the top left of the screen (bottom row)
reject_lik = -10000
#### Create the filter and run it
kfilter=make_kf(A,sigma_a,C,sigma_c,mu_0,sigma_0,reject_lik=reject_lik)
kalman_cursor = NoiseCursorDemo(filter=kfilter,
noise=30,
jump=0.05, jump_scale=5000,
dropout=[0.02, 0.15])
%gui tk
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: Why not just smooth things with a simple linear filter?
Step3: Spike noise
Step4: Signal dropout
Step5: Maybe we need a better filter?
Step6: Thoughts
Step7: Linearity of dynamics
Step8: Since this is predictive (generative) model of how we expect the cursor to behave, we can try running the prediction, starting from some preset initial conditions. If our dynamics model is good, we should expect the trajectories generated to be something (vaguely!) like cursor motion
Step9: Alternative dynamics (changing $\Sigma_a$)
Step10: Observations
Step11: We also know that our observation is noisy (i.e. not a true measurement of the world).
Step12: The complete equation for the observations is
Step13: Creating the filter
Step14: We can generate some synthetic data to track
Step15: We can use the filter_update() function to compute new states as data comes in.
Step16: Interactive animation
Step17: Rejecting observations
Step18: But we can be cleverer. Because we can obtain the likelihood of any observation under our current model, we can simply ignore observations that appear to be too unlikely to be plausible.
Step19: Metric of success
Step20: Parameterisation
Step21: Testing code
|
904 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
corpus, y = load_data()
assert type(corpus) == list
assert type(y) == list
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(corpus)
svc = LinearSVC(penalty='l1', dual=False)
svc.fit(X, y)
selected_feature_names = np.asarray(vectorizer.get_feature_names_out())[np.flatnonzero(svc.coef_)]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
905 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import fqe
except ImportError:
!pip install fqe --quiet
from itertools import product
import fqe
from fqe.hamiltonians.diagonal_coulomb import DiagonalCoulomb
import numpy as np
import openfermion as of
from scipy.linalg import expm
#Utility function
def uncompress_tei(tei_mat, notation='chemistry'):
uncompress chemist notation integrals
tei_tensor[i, k, j, l] = tei_mat[(i, j), (k, l)]
[1, 1, 2, 2] = [1, 1, 2, 2] = [1, 1, 2, 2] = [1, 1, 2, 2]
[i, j, k, l] = [k, l, i, j] = [j, i, l, k]* = [l, k, j, i]*
For real we also have swap of i <> j and k <> l
[j, i, k, l] = [l, k, i, j] = [i, j, l, k] = [k, l, j, i]
tei_mat[(i, j), (k, l)] = int dr1 int dr2 phi_i(dr1) phi_j(dr1) O(r12) phi_k(dr1) phi_l(dr1)
Physics notation is the notation that is used in FQE.
Args:
tei_mat: compressed two electron integral matrix
Returns:
uncompressed 4-electron integral tensor. No antisymmetry.
if notation not in ['chemistry', 'physics']:
return ValueError("notation can be [chemistry, physics]")
norbs = int(0.5 * (np.sqrt(8 * tei_mat.shape[0] + 1) - 1))
basis = {}
cnt = 0
for i, j in product(range(norbs), repeat=2):
if i >= j:
basis[(i, j)] = cnt
cnt += 1
tei_tensor = np.zeros((norbs, norbs, norbs, norbs))
for i, j, k, l in product(range(norbs), repeat=4):
if i >= j and k >= l:
tei_tensor[i, j, k, l] = tei_mat[basis[(i, j)], basis[(k, l)]]
tei_tensor[k, l, i, j] = tei_mat[basis[(i, j)], basis[(k, l)]]
tei_tensor[j, i, l, k] = tei_mat[basis[(i, j)], basis[(k, l)]]
tei_tensor[l, k, j, i] = tei_mat[basis[(i, j)], basis[(k, l)]]
tei_tensor[j, i, k, l] = tei_mat[basis[(i, j)], basis[(k, l)]]
tei_tensor[l, k, i, j] = tei_mat[basis[(i, j)], basis[(k, l)]]
tei_tensor[i, j, l, k] = tei_mat[basis[(i, j)], basis[(k, l)]]
tei_tensor[k, l, j, i] = tei_mat[basis[(i, j)], basis[(k, l)]]
if notation == 'chemistry':
return tei_tensor
elif notation == 'physics':
return np.asarray(tei_tensor.transpose(0, 2, 1, 3), order='C')
return tei_tensor
norbs = 4
tedim = norbs * (norbs + 1) // 2
if (norbs // 2) % 2 == 0:
n_elec = norbs // 2
else:
n_elec = (norbs // 2) + 1
sz = 0
fqe_wfn = fqe.Wavefunction([[n_elec, sz, norbs]])
fci_data = fqe_wfn.sector((n_elec, sz))
fci_graph = fci_data.get_fcigraph()
hf_wf = np.zeros((fci_data.lena(), fci_data.lenb()), dtype=np.complex128)
hf_wf[0, 0] = 1 # right most bit is zero orbital.
fqe_wfn.set_wfn(strategy='from_data',
raw_data={(n_elec, sz): hf_wf})
fqe_wfn.print_wfn()
tei_compressed = np.random.randn(tedim**2).reshape((tedim, tedim))
tei_compressed = 0.5 * (tei_compressed + tei_compressed.T)
tei_tensor = uncompress_tei(tei_compressed, notation='physics')
diagonal_coulomb = of.FermionOperator()
diagonal_coulomb_mat = np.zeros((norbs, norbs))
for i, j in product(range(norbs), repeat=2):
diagonal_coulomb_mat[i, j] = tei_tensor[i, j, i, j]
for sigma, tau in product(range(2), repeat=2):
diagonal_coulomb += of.FermionOperator(
((2 * i + sigma, 1), (2 * i + sigma, 0), (2 * j + tau, 1),
(2 * j + tau, 0)), coefficient=diagonal_coulomb_mat[i, j])
dc_ham = DiagonalCoulomb(diagonal_coulomb_mat)
alpha_occs = [list(range(fci_graph.nalpha()))]
beta_occs = [list(range(fci_graph.nbeta()))]
occs = alpha_occs[0] + beta_occs[0]
diag_ele = 0.
for ind in occs:
for jnd in occs:
diag_ele += diagonal_coulomb_mat[ind, jnd]
evolved_phase = np.exp(-1j * diag_ele)
print(evolved_phase)
# evolve FQE wavefunction
evolved_hf_wfn = fqe_wfn.time_evolve(1, dc_ham)
# check they the accumulated phase is equivalent!
assert np.isclose(evolved_hf_wfn.get_coeff((n_elec, sz))[0, 0], evolved_phase)
norbs = 6
tedim = norbs * (norbs + 1) // 2
if (norbs // 2) % 2 == 0:
n_elec = norbs // 2
else:
n_elec = (norbs // 2) + 1
sz = 0
fqe_wfn = fqe.Wavefunction([[n_elec, sz, norbs]])
fqe_wfn.set_wfn(strategy='random')
inital_coeffs = fqe_wfn.get_coeff((n_elec, sz)).copy()
print("Random initial wavefunction")
fqe_wfn.print_wfn()
tei_compressed = np.random.randn(tedim**2).reshape((tedim, tedim))
tei_compressed = 0.5 * (tei_compressed + tei_compressed.T)
tei_tensor = uncompress_tei(tei_compressed, notation='physics')
diagonal_coulomb = of.FermionOperator()
diagonal_coulomb_mat = np.zeros((norbs, norbs))
for i, j in product(range(norbs), repeat=2):
diagonal_coulomb_mat[i, j] = tei_tensor[i, j, i, j]
for sigma, tau in product(range(2), repeat=2):
diagonal_coulomb += of.FermionOperator(
((2 * i + sigma, 1), (2 * i + sigma, 0), (2 * j + tau, 1),
(2 * j + tau, 0)), coefficient=diagonal_coulomb_mat[i, j])
dc_ham = DiagonalCoulomb(diagonal_coulomb_mat)
cirq_wfn = fqe.to_cirq(fqe_wfn).reshape((-1, 1))
final_cirq_wfn = expm(-1j * of.get_sparse_operator(diagonal_coulomb)) @ cirq_wfn
# recover a fqe wavefunction
from_cirq_wfn = fqe.from_cirq(final_cirq_wfn.flatten(), 1.0E-8)
fqe_wfn = fqe_wfn.time_evolve(1, dc_ham)
print("Evolved wavefunction")
fqe_wfn.print_wfn()
print("From Cirq Evolution")
from_cirq_wfn.print_wfn()
assert np.allclose(from_cirq_wfn.get_coeff((n_elec, sz)),
fqe_wfn.get_coeff((n_elec, sz)))
print("Wavefunctions are equivalent")
fqe_wfn = fqe.Wavefunction([[n_elec, sz, norbs]])
fqe_wfn.set_wfn(strategy='from_data',
raw_data={(n_elec, sz): inital_coeffs})
for term, coeff in diagonal_coulomb.terms.items():
op = of.FermionOperator(term, coefficient=coeff)
fqe_wfn = fqe_wfn.time_evolve(1, op)
assert np.allclose(from_cirq_wfn.get_coeff((n_elec, sz)),
fqe_wfn.get_coeff((n_elec, sz)))
print("Individual term evolution is equivalent")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: FQE vs OpenFermion vs Cirq
Step3: The first example we will perform is diagonal Coulomb evolution on the Hartree-Fock state. The diagonal Coulomb operator is defined as
Step4: Now we can define a random 2-electron operator $V$. To define $V$ we need a $4 \times 4$ matrix. We will generate this matrix by making a full random two-electron integral matrix and then just take the diagonal elements
Step5: Evolution under $V$ can be computed by looking at each bitstring, seeing if $n_{p\alpha}n_{q\beta}$ is non-zero and then phasing that string by $V_{pq}$. For the Hartree-Fock state we can easily calculate this phase accumulation. The alpha and beta bitstrings are "0001" and "0001".
Step6: We can now try this out for more than 2 electrons. Let's reinitialize a wavefunction on 6-orbitals with 4-electrons $S_{z} = 0$ to a random state.
Step7: We need to build our Diagoanl Coulomb operator For this bigger system.
Step8: Now we can convert our wavefunction to a cirq wavefunction, evolve under the diagonal_coulomb operator we constructed and then compare the outputs.
Step9: Finally, we can compare against evolving each term of $V$ individually.
|
906 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(128, 128)),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
fb_model = tf.lite.TFLiteConverter.from_keras_model(model).convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model)
model = tf.keras.applications.MobileNetV3Large()
fb_model = tf.lite.TFLiteConverter.from_keras_model(model).convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model)
import tensorflow as tf
@tf.function(input_signature=[
tf.TensorSpec(shape=[4, 4], dtype=tf.float32)
])
def func(x):
return tf.cosh(x) + tf.slice(x, [1, 1], [1, 1])
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[func.get_concrete_function()], func)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS,
tf.lite.OpsSet.SELECT_TF_OPS,
]
fb_model = converter.convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model, gpu_compatibility=True)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(128, 128)),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
fb_model = tf.lite.TFLiteConverter.from_keras_model(model).convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model, gpu_compatibility=True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TensorFlow Lite Model Analyzer
Step2: Basic usage with MobileNetV3Large Keras model
Step3: Check GPU delegate compatibility
Step4: Case 2
|
907 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import sklearn.svm
%matplotlib inline
ex6data1 = scipy.io.loadmat('ex6data1.mat')
X = ex6data1['X']
y = ex6data1['y'][:, 0]
def plot_data(X, y, ax=None):
if ax == None:
fig, ax = plt.subplots(figsize=(7,5))
pos = y==1
neg = y==0
ax.scatter(X[pos,0], X[pos,1], marker='+', color='b')
ax.scatter(X[neg,0], X[neg,1], marker='o', color='r', s=5)
plot_data(X, y)
svm = sklearn.svm.SVC(C=1, kernel='linear')
svm.fit(X, y)
np.mean(svm.predict(X) == y)
svm.coef_
fig, ax = plt.subplots(figsize=(7,5))
def draw_contour(X, model):
x1 = np.linspace(np.min(X[:,0]), np.max(X[:,0]), 200)
x2 = np.linspace(np.min(X[:,1]), np.max(X[:,1]), 200)
xx1, xx2 = np.meshgrid(x1, x2)
yy = model.predict(np.c_[xx1.flat, xx2.flat]).reshape(xx1.shape)
ax.contour(x1, x2, yy, levels=[0.5])
plot_data(X, y, ax)
draw_contour(X, svm)
def gaussianKernel(x1, x2, sigma):
# ====================== YOUR CODE HERE ======================
# Instructions: Fill in this function to return the similarity between x1
# and x2 computed using a Gaussian kernel with bandwidth
# sigma
#
#
return 0
# =============================================================
gaussianKernel(x1=np.array([1, 2, 1]), x2=np.array([0, 4, -1]), sigma=2)
ex6data2 = scipy.io.loadmat('ex6data2.mat')
X = ex6data2['X']
y = ex6data2['y'][:,0]
print(X.shape, y.shape)
plot_data(X, y)
model = sklearn.svm.SVC(C=1, gamma=100, kernel='rbf')
model.fit(X, y)
np.mean((model.predict(X) == y))
fig, ax = plt.subplots()
plot_data(X, y, ax)
draw_contour(X, model)
ex6data3 = scipy.io.loadmat('ex6data3.mat')
X = ex6data3['X']
y = ex6data3['y'][:, 0]
Xval = ex6data3['Xval']
yval = ex6data3['yval'][:, 0]
print(X.shape, y.shape, Xval.shape, yval.shape)
plot_data(X, y)
plot_data(Xval, yval)
import itertools
possible_C = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100, 300, 1000]
possible_gamma = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100, 300, 1000]
cv_errors = np.zeros((len(possible_C), len(possible_gamma)))
# YOUR CODE GOES HERE
C = 7
gamma = 7
# ==================
model = sklearn.svm.SVC(C=C, gamma=gamma, kernel='rbf')
model.fit(X, y)
fig, ax = plt.subplots()
plot_data(X, y, ax)
draw_contour(X, model)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 1
Step2: Part 2
Step3: Part 3
Step4: The Gaussian Kernel between x1 = [1; 2; 1], x2 = [0; 4; -1], sigma = 2 should be about 0.324652.
Step5: Part 4
Step6: Part 5
Step7: Part 6
Step8: Part 7
|
908 | <ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
# TODO: Implement Function
target_text_eos = []
target_id_text = []
source_id_text = []
for line in target_text.split('\n'):
target_text_eos.append(line + ' <EOS> ')
for line in source_text.split('\n'):
index = len(source_id_text)
source_id_text.append([])
for word in line.split():
source_id_text[index].append(source_vocab_to_int[word])
for line in target_text_eos:
index = len(target_id_text)
target_id_text.append([])
for word in line.split():
target_id_text[index].append(target_vocab_to_int[word])
return source_id_text, target_id_text
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
# TODO: Implement Function
input = tf.placeholder(tf.int32, shape=[None, None], name="input")
targets = tf.placeholder(tf.int32, shape=[None, None], name="targets")
learnRate = tf.placeholder(tf.float32, name="learning_rate")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
return input, targets, learnRate, keep_prob
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for decoding
:param target_data: Target Placeholder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
# TODO: Implement Function
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
starting = tf.fill([batch_size, 1], target_vocab_to_int['<GO>'])
target_data = tf.concat([starting, ending], axis=1)
return target_data
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_decoding_input(process_decoding_input)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
# TODO: Implement Function
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm_dropout = tf.contrib.rnn.DropoutWrapper(lstm, input_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([lstm_dropout]*num_layers)
_, states = tf.nn.dynamic_rnn(cell, rnn_inputs, dtype=tf.float32)
return states
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
# TODO: Implement Function
train_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)
train_pred,_,_ = tf.contrib.seq2seq.dynamic_rnn_decoder(
dec_cell, train_decoder_fn, dec_embed_input, sequence_length, scope=decoding_scope)
train_logits = output_fn(tf.nn.dropout(train_pred, keep_prob))
return train_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
# TODO: Implement Function
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn, encoder_state, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size)
infer_logits,_,_ = tf.contrib.seq2seq.dynamic_rnn_decoder(
dec_cell, infer_decoder_fn, scope=decoding_scope)
return infer_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm_dropout = tf.contrib.rnn.DropoutWrapper(lstm, keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([lstm_dropout]*num_layers)
with tf.variable_scope("decoding") as decoding_scope:
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
train_logits = decoding_layer_train(encoder_state, cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
infer_logits = decoding_layer_infer(encoder_state, cell, dec_embeddings, target_vocab_to_int['<GO>'],
target_vocab_to_int['<EOS>'], sequence_length-1, vocab_size,
decoding_scope, output_fn, keep_prob)
return train_logits, infer_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
rnn_inputs = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)
enc_state = encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob)
dec_input = process_decoding_input(target_data, target_vocab_to_int, batch_size)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
train_logits, infer_logits = decoding_layer(dec_embed_input, dec_embeddings, enc_state, target_vocab_size,
sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
return train_logits, infer_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 15
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 256
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 128
decoding_embedding_size = 128
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.8
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
import time
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
# TODO: Implement Function
ids = [vocab_to_int.get(word, vocab_to_int['<UNK>']) for word in sentence.lower().split()]
return ids
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'hello does this work ?'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoding Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Sentence to Sequence
Step48: Translate
|
909 | <ASSISTANT_TASK:>
Python Code:
from numpy import array
a = array([
[1, 1, 0],
[1, 0, 0],
[0, 0, 0]
])
b = array([
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]
])
c = array([
[0, 0, 0],
[0, 0, 1],
[0, 0, 0]
])
init = array([
[1, 0, 0]
])
final = array([
[0],
[0],
[1]
])
init.dot(a).dot(c).dot(final)
init.dot(b).dot(c).dot(final)
init.dot(a).dot(a).dot(b).dot(c).dot(final)
S = set( ['a', 'b', 'c'] )
Q = set( [0, 1, 2] )
i = 0
F = set( [ 2 ] )
td = { (0, 'a'): [0, 1],
(1, 'a'): [0],
(0, 'b'): [1],
(1, 'b'): [1],
(1, 'c'): [2]
}
def df(state, symbol):
print(state, symbol)
return td.get(tuple( [state, symbol] ), [])
def accept(sequence):
agenda = []
state = i
count = len(sequence)
agenda.append((state, 0))
while agenda:
print(agenda)
if not agenda:
break
state, pos = agenda.pop()
states = df(state, sequence[pos])
if not states:
print("No transition")
return False
state = states[0]
if pos == count - 1:
print("Reached end")
if F.intersection(set(states)):
return True
break
for s in states[1:]:
agenda.append( (s, pos+1) )
if state in F:
print("Not final state")
return True
return False
accept("aac")
alphabetMatrices = {}
alphabetMatrices["a"] = array([
[1, 1, 0],
[1, 0, 0],
[0, 0, 0]
])
alphabetMatrices["b"] = array([
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]
])
alphabetMatrices["c"] = array([
[0, 0, 0],
[0, 0, 1],
[0, 0, 0]
])
alphabetMatrices["default"] = array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
])
def paths(seq):
res = init
for x in seq:
res = res.dot( alphabetMatrices.get(x, alphabetMatrices["default"]) )
return res.dot(array([
[0],
[0],
[1]
]))[0][0]
paths("aabc")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The transitions are coded in terms of state to state transitions. The columns and rows represent the states 0, 1, and 2. The following transition matrix shows all transitions that are associated with the label "a", that is from 0 to 0, from 0 to 1, and from 1 to 0.
Step2: The following transition matrix shows that for the transitions associated with "b".
Step3: The following transition matrix shows this for the transitions associated with "c".
Step4: We can define the start state using an init vector. This init vector indicates that the start state should be 0.
Step5: The set of final states can be encoded as a column vector that in this case defines state 3 as the only final state.
Step6: If we want to compute the possibility for a sequence like "aa" to be accepted by this automaton, we could compute the dot product of the init-vector and the a matrices, with the dot product of the final state.
Step7: The 0 indicates that there is no path from the initial state to the final state based on a sequence "aa".
Step8: Just to verify once more, let us consider the sequence "aabc"
Step9: There are obviously three paths in our Non-deterministic Finite State Automaton that generate the sequence "aabc".
|
910 | <ASSISTANT_TASK:>
Python Code:
from astropy.time import Time
import matplotlib.pyplot as plt
plt.ion()
import poliastro.plotting as plotting
from poliastro.bodies import Earth, Mars, Jupiter, Sun
from poliastro.twobody import Orbit
epoch = Time("2018-08-17 12:05:50", scale="tdb")
plotter = plotting.OrbitPlotter()
plotter.plot(Orbit.from_body_ephem(Earth, epoch), label='Earth')
plotter.plot(Orbit.from_body_ephem(Mars, epoch), label='Mars')
plotter.plot(Orbit.from_body_ephem(Jupiter, epoch), label='Jupiter');
epoch = Time("2018-08-17 12:05:50", scale="tdb")
plotter = plotting.OrbitPlotter()
earthPlots = plotter.plot(Orbit.from_body_ephem(Earth, epoch), label='Earth')
earthPlots[0].set_linestyle('-') # solid line
earthPlots[0].set_linewidth(0.5)
earthPlots[1].set_marker('H') # Hexagon
earthPlots[1].set_markersize(15)
marsPlots = plotter.plot(Orbit.from_body_ephem(Mars, epoch), label='Mars')
jupiterPlots = plotter.plot(Orbit.from_body_ephem(Jupiter, epoch), label='Jupiter')
epoch = Time("2018-08-17 12:05:50", scale="tdb")
plotter = plotting.OrbitPlotter()
earthPlots = plotter.plot(Orbit.from_body_ephem(Earth, epoch), label='Earth')
earthPlots[0].set_linestyle('-') # solid line
earthPlots[0].set_linewidth(0.5)
earthPlots[1].set_marker('H') # Hexagon
earthPlots[1].set_markersize(15)
marsPlots = plotter.plot(Orbit.from_body_ephem(Mars, epoch), label='Mars')
marsPlots[0].set_dashes([0,1,0,1,1,0])
marsPlots[0].set_linewidth(2)
marsPlots[1].set_marker('D') # Diamond
marsPlots[1].set_markersize(15)
marsPlots[1].set_fillstyle('none')
marsPlots[1].set_markeredgewidth(1) # make sure this is set if you use fillstyle 'none'
jupiterPlots = plotter.plot(Orbit.from_body_ephem(Jupiter, epoch), label='Jupiter')
jupiterPlots[0].set_linestyle('') # No line
jupiterPlots[1].set_marker('*') # star
jupiterPlots[1].set_markersize(15)
epoch = Time("2018-08-17 12:05:50", scale="tdb")
fig, ax = plt.subplots()
ax.grid(True)
ax.set_title("Earth, Mars, and Jupiter")
ax.set_facecolor('None')
plotter = plotting.OrbitPlotter(ax)
plotter.plot(Orbit.from_body_ephem(Earth, epoch), label='Earth')
plotter.plot(Orbit.from_body_ephem(Mars, epoch), label='Mars')
plotter.plot(Orbit.from_body_ephem(Jupiter, epoch), label='Jupiter');
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we get hold of the lines list from the OrbitPlotter.plot method this is a list of lines. The first is the orbit line. The second is the current position marker. With the matplotlib lines objects we can start changing the style. First we make the line solid but thin line. Then we change the current position marker to a large hexagon.
Step2: You can also change the style of the plot using the matplotlib axis which can be aquired from the OrbitPlotter()
|
911 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import print_function
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import norm
from math import sin, cos, pi, sqrt
from math import radians as d2r
# def printFoot(i, newpos):
# if i == 0:
# print('New [{}](x,y,z): {:.2f}\t{:.2f}\t{:.2f}'.format(i, newpos[0], newpos[1], newpos[2]))
def rot_z(t, c):
t - theta [radians]
c - [x,y,z] or [x,y] ... the function detects 2D or 3D vector
if len(c) == 3:
ans = np.array([
c[0]*cos(t)-c[1]*sin(t),
c[0]*sin(t)+c[1]*cos(t),
c[2]
])
else:
ans = np.array([
c[0]*cos(t)-c[1]*sin(t),
c[0]*sin(t)+c[1]*cos(t)
])
return ans
print('{:.2f} {:.2f} {:.2f}'.format(*rot_z(pi/4, np.array([84,0,-65]))))
class Gait(object):
def __init__(self):
self.legOffset = [0, 6, 3, 9]
# self.body = np.array([72.12, 0, 0])
self.rest = None
def calcRotatedOffset(self, cmd, frame_angle):
calculate the foot offsets for each leg and delta linear/rotational
in - cmd(x,y,z_rotation)
out - array(leg0, leg1, ...)
where leg0 = {'linear': [x,y], 'rotational': [x,y], 'angle': zrotation(rads)}
# I could do the same here as I do below for rotation
# rotate the command into the leg frame
rc = rot_z(frame_angle, cmd)
# get rotation distance: dist = rot_z(angle, rest) - rest
# this just reduces the function calls and math
zrot = d2r(float(cmd[2])) # should I assume this is always radians? save conversion
# fromcenter = self.rest + self.body
# value of this?
# rot = rot_z(zrot/2, fromcenter) - rot_z(-zrot/2, fromcenter)
# ans = {'linear': rc, 'rotational': rot, 'angle': zrot}
ans = {'linear': rc, 'angle': zrot}
return ans
def command(self, cmd, func, steps=12):
# handle no movement command ... do else where?
if sqrt(cmd[0]**2 + cmd[1]**2 + cmd[2]**2) < 0.001:
for leg in range(0, 4):
func(leg, self.rest) # move to resting position
return
cmd = [100.0, 0.0, 0.0]
# frame rotations for each leg
frame = [-pi/4, pi/4, 3*pi/4, -3*pi/4]
for i in range(0, steps): # iteration, there are 12 steps in gait cycle
for legNum in [0, 2, 1, 3]: # order them diagonally
rcmd = self.calcRotatedOffset(cmd, frame[legNum])
pos = self.eachLeg(i, rcmd) # move each leg appropriately
func(legNum, pos)
class DiscreteRippleGait(Gait):
def __init__(self, h, r):
Gait.__init__(self)
self.phi = [9/9, 6/9, 3/9, 0/9, 1/9, 2/9, 3/9, 4/9, 5/9, 6/9, 7/9, 8/9] # foot pos in gait sequence
maxl = h #
minl = maxl/2
self.z = [minl, maxl, maxl, minl, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # leg height
self.rest = r # idle leg position
def eachLeg(self, index, cmd):
interpolates the foot position of each leg
cmd:
linear (mm)
angle (rads)
rest = self.rest
i = index % 12
phi = self.phi[i]
# rotational commands -----------------------------------------------
angle = cmd['angle']/2-cmd['angle']*phi
rest_rot = rot_z(-angle, rest)
# linear commands ----------------------------------------------------
linear = cmd['linear']
xx = linear[0]
yy = linear[1]
# create new move command
move = np.array([
xx/2 - phi*xx,
yy/2 - phi*yy,
self.z[i]
])
# new foot position: newpos = rest + move ----------------------------
newpos = move + rest_rot
return newpos
# class ContinousRippleGait(Gait):
# alpha = 0.5
# def __init__(self, h, r):
# Gait.__init__(self)
# self.height = h
# self.rest = r
# def phi(self, x):
#
# The phase
#
# phi = 0.0
# if x <= 3.0:
# phi = 1/3*(3.0-x)
# else:
# phi = 1/9*(x-3)
# return phi
# def z(self, x):
#
# Leg height
# duty cycle:
# 0-3: leg lifted
# 3-12: leg on ground
# duty = (12-3)/12 = 0.75 = 75% a walking gait
#
# height = self.height
# z = 0.0
# if x <= 1:
# z = height/1.0*x
# elif x <= 2.0:
# z = height
# elif x <= 3.0:
# z = -height/1.0*(x-2.0)+height
# return z
# def eachLeg(self, index, cmd):
#
# interpolates the foot position of each leg
#
# rest = self.rest
# i = (index*self.alpha) % 12
# phi = self.phi(i)
# z = self.z(i)
# # rotational commands -----------------------------------------------
# angle = cmd['angle']/2-cmd['angle']*phi
# rest_rot = rot_z(-angle, rest)
# # linear commands ----------------------------------------------------
# linear = cmd['linear']
# xx = linear[0]
# yy = linear[1]
# # create new move command
# move = np.array([
# xx/2 - phi*xx,
# yy/2 - phi*yy,
# z
# ])
# # new foot position: newpos = rest + move ----------------------------
# newpos = move + rest_rot
# return newpos
cmd = {'linear': [0,0], 'angle': pi/4}
leg = np.array([84,0.0,-65]) # idle leg position
height = 25
# gait = ContinousRippleGait(height, leg)
gait = DiscreteRippleGait(height, leg)
alpha = 1
pos = []
for i in range(0,12):
p = gait.eachLeg(i*alpha,cmd)
pos.append(p)
print(p)
# Run the entire class
# remember!! command does a rotation of the leg coord system, so it
# will output different numbers than above.
cmd = [0,0,pi/4]
leg = np.array([84,0.0,-65]) # idle leg position
height = 25
# gait = ContinousRippleGait(height, leg)
# gait.alpha = 0.5
gait = DiscreteRippleGait(height, leg)
gait.command(cmd, print, steps=12) # doesn't return anything
px = []
py = []
pz = []
for p in pos:
px.append(p[0])
py.append(p[1])
pz.append(p[2])
plt.subplot(2,2,1);
plt.plot(px)
plt.ylabel('x')
plt.subplot(2,2,2)
plt.plot(py)
plt.ylabel('y')
plt.subplot(2,2,3);
plt.plot(pz)
plt.ylabel('z')
plt.subplot(2,2,4);
plt.plot(px,py)
plt.ylabel('y')
plt.xlabel('x');
for p in pos:
print(p)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Hildebrand Diagram
Step5: Discrete Ripple Gait
Step9: Continous Ripple Gait
Step10: Gait Plot
Step11: Now the code below runs the for all 4 legs and only prints out the position for leg 0. You can modify this above in the printFoot function. In reality, you would pass a function to move the leg.
|
912 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from jyquickhelper import add_notebook_menu
add_notebook_menu()
from actuariat_python.data import elections_legislatives_bureau_vote
tour = elections_legislatives_bureau_vote(source='xd')
tour["T2"].sort_values(["Code département", "N° de circonscription Lg"]).head()
from actuariat_python.data import elections_legislatives_circonscription_geo
geo = elections_legislatives_circonscription_geo()
geo.sort_values(["department", "code_circonscription"]).head()
c = list(geo.sort_values(["department", "code_circonscription"])["communes"])[0].split("-")
c.sort()
c[:5]
list(geo.sort_values(["department", "code_circonscription"])["kml_shape"])[:1]
from actuariat_python.data import elections_vote_places_geo
bureau_geo = elections_vote_places_geo()
bureau_geo.head()
from actuariat_python.data import elections_vote_place_address
bureau = elections_vote_place_address(hide_warning=True)
bureau.head()
import keyring, os
bing_key = keyring.get_password("bing", "actuariat_python,key")
coders = ["Nominatim"]
if bing_key:
# si la clé a été trouvée
coders.append(("bing", bing_key))
len(coders)
import os
if not os.path.exists("bureauxvotegeo.zip"):
from actuariat_python.data import geocode
from pyquickhelper.loghelper import fLOG
fLOG(OutputPrint=True)
bureau_geo = geocode(bureau, fLOG=fLOG, index=False, encoding="utf-8",
exc=False, save_every="bureau.dump.txt", sep="\t", every=100,
coders=coders)
else:
print("Les données ont déjà été geocodées.")
import missingno
missingno.matrix(bureau_geo, figsize=(12, 6));
from actuariat_python.data import elections_vote_places_geo
places = elections_vote_places_geo()
places.head()
from actuariat_python.data import elections_vote_places_geo
bureau_geo = elections_vote_places_geo()
villes_geo = bureau_geo[["city", "zip", "n"]].groupby(["city", "zip"], as_index=False).count()
villes_geo.head()
from actuariat_python.data import villes_geo
villes_geo = villes_geo(as_df=True)
villes_geo.head()
import keyring, os
bing_key = keyring.get_password("bing", "actuariat_python,key")
coders = []
if bing_key:
# si la clé a été trouvée
coders.append(("bing", bing_key))
len(coders)
import os
geocode = True
if geocode:
if os.path.exists("villes_geo.txt"):
import pandas
villes_geo = pandas.read_csv("villes_geo.txt", sep="\t", encoding="utf-8")
else:
from actuariat_python.data import geocode
from pyquickhelper.loghelper import fLOG
fLOG(OutputPrint=True)
villes_geo = geocode(villes_geo, fLOG=fLOG, index=False, encoding="utf-8",
exc=False, save_every="villes.dump.txt", sep="\t", every=100,
coders=coders, country="France")
villes_geo.head()
villes_geo.to_csv("villes_geo.txt", sep="\t", index=False, encoding="utf-8")
villes_geo.shape
from pyensae.datasource import download_data
shp_vote = download_data("base_cartelec_2007_2010.zip")
shp_vote
# La version 2.0.0.dev de pyshp est buggée. Il vaut mieux ne pas l'utiliser.
import shapefile
if "dev" in shapefile.__version__:
raise ImportError("Use a different version of pyshp not '{0}'".format(shapefile.__version__))
r = shapefile.Reader("fond0710.shp", encoding="utf8", encodingErrors="ignore")
shapes = r.shapes()
records = r.records()
len(shapes), len(records)
{k[0]: v for k, v in zip(r.fields, records[0])}
shapes[0].points
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Données
Step2: Géolocalisation des circonscription
Step3: Géolocation des bureaux de vote
Step4: Ce qui suit explique la façon dont j'ai constuit cette table.
Step5: On récupère une clé pour utiliser l'API de Bing Maps avec le module keyring. Pour stocker son mot de passe sur la machine, il suffit d'écrire
Step6: On regarde les valeurs manquantes.
Step7: On pourra finalement récupérer la base des géocodes comme ceci
Step8: Géolocalisation des villes
Step9: On conserve les données pour éviter de les reconstuire et faire appel à l'API Bing à nouveau.
Step10: Géolocation des bureaux de vote avec Cartélec
|
913 | <ASSISTANT_TASK:>
Python Code:
#bsearch_ph_sel = 'all-ph'
#bsearch_ph_sel = 'Dex'
bsearch_ph_sel = 'DexDem'
data_file = 'results/usALEX-5samples-PR-raw-%s.csv' % bsearch_ph_sel
from __future__ import division
import numpy as np
import pandas as pd
from IPython.display import display
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
%config InlineBackend.figure_format='retina' # for hi-dpi displays
sns.set_style('whitegrid')
palette = ('Paired', 10)
sns.palplot(sns.color_palette(*palette))
sns.set_palette(*palette)
data = pd.read_csv(data_file).set_index('sample')
data
display(data[['E_pr_do_gauss', 'E_pr_do_kde', 'E_pr_do_hsm', 'n_bursts_do']])
print('KDE Mean (%): ', data.E_pr_do_kde.mean()*100)
print('KDE Std. Dev. (%):', data.E_pr_do_kde.std()*100)
d = data[['E_pr_do_gauss', 'E_pr_do_kde', 'E_pr_do_hsm']]#, 'n_bursts_do']]
d.plot(lw=3);
E_table = data[['E_pr_do_gauss', 'E_pr_do_kde']]
E_table
lk_table = E_table / (1 - E_table)
lk_table.columns = [c.replace('E_pr_do', 'lk') for c in E_table.columns]
lk_table['num_bursts'] = data['n_bursts_do']
lk_table
data.E_pr_do_kde
lk_table.lk_kde
E_m = np.average(data.E_pr_do_kde, weights=data.n_bursts_do)
E_m
k_E_m = E_m / (1 - E_m)
k_E_m
k_m = np.average(lk_table.lk_kde, weights=data.n_bursts_do)
k_m
stats = pd.concat([lk_table.mean(), lk_table.std()], axis=1, keys=['mean', 'std']).T
stats
table_to_save = lk_table.append(stats)
table_to_save = table_to_save.round({'lk_gauss': 5, 'lk_kde': 5, 'num_bursts': 2})
table_to_save
table_to_save.to_csv('results/table_usalex_5samples_leakage_coeff.csv')
'%.5f' % k_m
with open('results/usALEX - leakage coefficient %s.csv' % bsearch_ph_sel, 'w') as f:
f.write('%.5f' % k_m)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To recompute the PR data used by this notebook run the
Step2: Create Leakage Table
Step3: Average leakage coefficient
Step4: Conclusions
Step5: Average coefficient
|
914 | <ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/google/starthinker
from starthinker.util.configuration import Configuration
CONFIG = Configuration(
project="",
client={},
service={},
user="/content/user.json",
verbose=True
)
FIELDS = {
'auth_read':'user', # Credentials used for reading data.
'email_from':'', # Must match from field.
'email_to':'', # Must match to field.
'subject':'', # Regular expression to match subject.
'link':'', # Regular expression to match email.
'attachment':'', # Regular expression to match atttachment.
'dataset':'', # Existing dataset in BigQuery.
'table':'', # Name of table to be written to.
'schema':'[]', # Schema provided in JSON list format or empty list.
'header':False, # Does the csv contain a header row.
'is_incremental_load':False, # Append report data to table based on date column, de-duplicates.
}
print("Parameters Set To: %s" % FIELDS)
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
TASKS = [
{
'email':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'read':{
'from':{'field':{'name':'email_from','kind':'string','order':1,'default':'','description':'Must match from field.'}},
'to':{'field':{'name':'email_to','kind':'string','order':2,'default':'','description':'Must match to field.'}},
'subject':{'field':{'name':'subject','kind':'string','order':3,'default':'','description':'Regular expression to match subject.'}},
'link':{'field':{'name':'link','kind':'string','order':4,'default':'','description':'Regular expression to match email.'}},
'attachment':{'field':{'name':'attachment','kind':'string','order':5,'default':'','description':'Regular expression to match atttachment.'}}
},
'write':{
'bigquery':{
'dataset':{'field':{'name':'dataset','kind':'string','order':6,'default':'','description':'Existing dataset in BigQuery.'}},
'table':{'field':{'name':'table','kind':'string','order':7,'default':'','description':'Name of table to be written to.'}},
'schema':{'field':{'name':'schema','kind':'json','order':8,'default':'[]','description':'Schema provided in JSON list format or empty list.'}},
'header':{'field':{'name':'header','kind':'boolean','order':9,'default':False,'description':'Does the csv contain a header row.'}},
'is_incremental_load':{'field':{'name':'is_incremental_load','kind':'boolean','order':10,'default':False,'description':'Append report data to table based on date column, de-duplicates.'}}
}
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(CONFIG, TASKS, force=True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Set Configuration
Step2: 3. Enter Email Fetch Recipe Parameters
Step3: 4. Execute Email Fetch
|
915 | <ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.preprocessing import compute_proj_ecg
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=150).resample(100).pick('meg')
ecg_proj, _ = compute_proj_ecg(raw, ch_name='MEG 0511') # No ECG chan
raw.add_proj(ecg_proj)
raw.apply_proj()
epochs = mne.make_fixed_length_epochs(raw, duration=30, preload=False)
event_related_plot = epochs.plot_image(picks=['MEG 1142'])
epochs.load_data().filter(l_freq=8, h_freq=12)
alpha_data = epochs.get_data()
corr_matrix = mne.connectivity.envelope_correlation(alpha_data, combine=None)
first_30 = corr_matrix[0]
last_30 = corr_matrix[-1]
corr_matrices = [first_30, last_30]
color_lims = np.percentile(np.array(corr_matrices), [5, 95])
titles = ['First 30 Seconds', 'Last 30 Seconds']
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.suptitle('Correlation Matrices from First 30 Seconds and Last 30 Seconds')
for ci, corr_matrix in enumerate(corr_matrices):
ax = axes[ci]
mpbl = ax.imshow(corr_matrix, clim=color_lims)
ax.set_xlabel(titles[ci])
fig.subplots_adjust(right=0.8)
cax = fig.add_axes([0.85, 0.2, 0.025, 0.6])
cbar = fig.colorbar(ax.images[0], cax=cax)
cbar.set_label('Correlation Coefficient')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For this tutorial we'll crop and resample the raw data to a manageable size
Step2: To create fixed length epochs, we simply call the function and provide it
Step3: Characteristics of Fixed Length Epochs
Step4: For information about creating epochs for event-related analyses, please see
Step5: If desired, separate correlation matrices for each epoch can be obtained.
Step6: Now we can plot correlation matrices. We'll compare the first and last
|
916 | <ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 1
sample_id = 50
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
return x / 255
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
return np.eye(10)[x]
# Our own implementation of one hot logic.
# return np.array([one_hot_encode_helper(label) for label in x])
one_hot_map = {}
def one_hot_encode_helper(x):
if x in one_hot_map:
return one_hot_map[x]
result = np.zeros(10)
result[x] = 0
one_hot_map[x] = result
return result
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a bach of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# TODO: Implement Function
return tf.placeholder(tf.float32, [None, image_shape[0],image_shape[1],image_shape[2]], name='x')
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# TODO: Implement Function
return tf.placeholder(tf.float32, [None, n_classes], name='y')
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
# TODO: Implement Function
return tf.placeholder(tf.float32, name='keep_prob')
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
# TODO: Implement Function
weight = tf.Variable(tf.random_normal([conv_ksize[0],
conv_ksize[1],
x_tensor.get_shape().as_list()[-1],
conv_num_outputs],
stddev=0.1))
bias = tf.Variable(tf.zeros(conv_num_outputs,
dtype=tf.float32))
conv_layer = tf.nn.conv2d(x_tensor,
weight,
strides=[1, conv_strides[0], conv_strides[1], 1],
padding='SAME')
conv_layer = tf.nn.bias_add(conv_layer, bias)
conv_layer = tf.nn.relu(conv_layer)
conv_layer = tf.nn.max_pool(conv_layer,
ksize=[1, pool_ksize[0], pool_ksize[1], 1],
strides=[1, pool_strides[0], pool_strides[1], 1],
padding='SAME')
return conv_layer
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# TODO: Implement Function
# return tf.contrib.layers.flatten(x_tensor)
flattened_size = x_tensor.shape[1] * x_tensor.shape[2] * x_tensor.shape[3]
return tf.reshape(x_tensor,
[-1, flattened_size.value])
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
# return tf.contrib.layers.fully_connected(x_tensor, num_outputs=num_outputs)
num_features = x_tensor.shape[1].value
weights = tf.Variable(tf.random_normal([num_features, num_outputs],
stddev=0.1))
biases = tf.Variable(tf.zeros([num_outputs]))
fc = tf.add(tf.matmul(x_tensor, weights),
biases)
fc = tf.nn.relu(fc)
return fc
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
# return tf.contrib.layers.fully_connected(x_tensor, num_outputs=num_outputs)
num_features = x_tensor.shape[1].value
weights = tf.Variable(tf.random_normal([num_features, num_outputs],
stddev=0.1))
biases = tf.Variable(tf.zeros([num_outputs]))
output_layer = tf.add(tf.matmul(x_tensor, weights),
biases)
# output_layer = tf.nn.softmax(output_layer)
return output_layer
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
x = conv2d_maxpool(x, 8, (4, 4), (1, 1), (2, 2), (2, 2))
x = conv2d_maxpool(x, 16, (4, 4), (1, 1), (2, 2), (2, 2))
x = conv2d_maxpool(x, 32, (4, 4), (1, 1), (2, 2), (2, 2))
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
x = flatten(x)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
x = fully_conn(x, 1024)
x = tf.nn.dropout(x, keep_prob=keep_prob)
x = fully_conn(x, 1024)
x = tf.nn.dropout(x, keep_prob=keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
x = output(x, 10)
# TODO: return output
return x
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# TODO: Implement Function
session.run(optimizer, feed_dict={x:feature_batch, y:label_batch, keep_prob:keep_probability})
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function
loss = session.run(cost, feed_dict={x:feature_batch,
y:label_batch,
keep_prob:1.0})
acc = session.run(accuracy,
feed_dict={x:valid_features,
y:valid_labels,
keep_prob:1.0})
print('Loss={0} ValidationAccuracy={1}'.format(loss, acc))
# print("ValidationAccuracy={0}".format(acc))
# TODO: Tune Parameters
epochs = 35
batch_size = 2048
keep_probability = 0.9
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Checkpoint
|
917 | <ASSISTANT_TASK:>
Python Code:
def gen_periodic_data(x, period=1, amplitude=1, phase=0, noise=0):
'''Generate periodic data given the function inputs
y = A*sin(2*pi*x/p - phase) + noise
Parameters
----------
x : array-like
input values to evaluate the array
period : float (default=1)
period of the periodic signal
amplitude : float (default=1)
amplitude of the periodic signal
phase : float (default=0)
phase offset of the periodic signal
noise : float (default=0)
variance of the noise term added to the periodic signal
Returns
-------
y : array-like
Periodic signal evaluated at all points x
'''
y = # complete
dy = # complete
return y + dy
x = np.linspace( # complete
y = gen_periodic_data( # complete
fig, ax = plt.subplots()
ax.scatter(x,y, edgecolors='0.2', linewidths=0.5)
ax.set_xlabel('x')
ax.set_ylabel('y')
fig.tight_layout()
def phase_plot(x, y, period, y_unc = 0.0):
'''Create phase-folded plot of input data x, y
Parameters
----------
x : array-like
data values along abscissa
y : array-like
data values along ordinate
period : float
period to fold the data
y_unc : array-like
uncertainty of the
'''
phases = # complete
if type(y_unc) == float:
y_unc = np.zeros_like(x)
plot_order = np.argsort(phases)
fig, ax = plt.subplots()
ax.errorbar(phases[plot_order], y[plot_order], y_unc[plot_order],
fmt='o', mec="0.2", mew=0.1)
ax.set_xlabel("phase")
ax.set_ylabel("signal")
fig.tight_layout()
phase_plot( # complete
fourier_pairs_plot()
fourier_pairs_plot()
x = np.linspace( # complete
y = gen_periodic_data( # complete
x_signal = np.linspace( # complete
y_signal = gen_periodic_data( # complete
fig, ax = plt.subplots(figsize=(8,4))
ax.scatter(x,y)
ax.plot(x_signal, y_signal)
ax.set_xlabel('x')
ax.set_ylabel('y')
fig.tight_layout()
x = np.linspace( # complete
y = gen_periodic_data( # complete
x_signal = np.linspace(# complete
y_signal = gen_periodic_data(# complete
fig, ax = plt.subplots(figsize=(8,4))
ax.scatter(x,y)
ax.plot(x_signal, y_signal)
ax.set_xlabel('x')
ax.set_ylabel('y')
fig.tight_layout()
x = np.linspace(# complete
y = gen_periodic_data( # complete
x_signal = np.linspace( # complete
y_signal = gen_periodic_data( # complete
fig, ax = plt.subplots(figsize=(8,4))
ax.scatter(x,y)
ax.plot(x_signal, y_signal)
y_high = gen_periodic_data( # complete
y_signal_high = gen_periodic_data( # complete
ax.scatter(x,y_high)
ax.plot(x_signal, y_signal_high)
ax.set_xlabel('x')
ax.set_ylabel('y')
fig.tight_layout()
def chi2(theta, y, y_unc, x, f):
a = theta[0]
phi = theta[1]
# complete
return # complete
from scipy.optimize import minimize
def min_chi2(theta, y, y_unc, x, f):
res = minimize( # complete
return res.fun
def ls_periodogram(y, y_unc, x, f_grid):
psd = np.empty_like(f_grid)
chi2_0 = np.sum(((y - np.mean(y))/y_unc)**2)
for f_num, f in enumerate(f_grid):
psd[f_num] = 0.5*(chi2_0 - min_chi2([0,0], y, y_unc, x, f))
return psd
np.random.seed(185)
# calculate the periodogram
x = 10*np.random.rand(100)
y = gen_periodic_data(x, period=5.25, amplitude=7.4, noise=0.8)
y_unc = np.ones_like(x)*np.sqrt(0.8)
f_grid = np.linspace( # complete
psd_ls = ls_periodogram(y, y_unc, x, f_grid)
# plot the periodogram
fig, ax = plt.subplots()
ax.plot(1/f_grid, psd_ls)
ax.set_ylabel('P')
ax.set_xlabel('Period')
fig.tight_layout()
# calculate the periodogram
f_grid = np.linspace( # complete
psd_ls = ls_periodogram(y, y_unc, x, f_grid)
# plot the periodogram
fig,ax = plt.subplots()
ax.plot(1/f_grid, psd_ls)
ax.set_ylabel('P')
ax.set_xlabel('Period')
fig.tight_layout()
print("The best fit period is: {:.4f}".format(1/f_grid[np.argmax(psd_ls)]))
phase_plot( # complete
f_min = # complete
f_max = # complete
delta_f = # complete
f_grid = np.arange(f_min, f_max, delta_f)
print("{:d} grid points are needed to sample the periodogram".format(len(f_grid)))
def fourier_pairs_plot():
fig, ax = plt.subplots(4, 2, figsize=(10, 6))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.02, top=0.95,
hspace=0.3, wspace=0.2)
x = np.linspace(-5, 5, 1000)
for axi in ax.flat:
axi.xaxis.set_major_formatter(plt.NullFormatter())
axi.yaxis.set_major_formatter(plt.NullFormatter())
# draw center line
axi.axvline(0, linestyle='dotted', color='gray')
axi.axhline(0, linestyle='dotted', color='gray')
style_re = dict(linestyle='solid', color='k', linewidth=2)
style_im = dict(linestyle='solid', color='gray', linewidth=2)
text_style = dict(size=14, color='gray')
# sine -> delta
ax[0, 0].plot(x, np.cos(x),**style_re)
ax[0, 0].set(xlim=(-5, 5), ylim=(-1.2, 1.2))
ax[0, 0].annotate('', (-np.pi, 0), (np.pi, 0),
arrowprops=dict(arrowstyle='|-|', color='gray'))
ax[0, 0].text(0, 0, '$1/f_0$', ha='center', va='bottom', **text_style)
ax[0, 0].set_title('Sinusoid')
ax[0, 1].plot([-5, 2, 2, 2, 5], [0, 0, 1, 0, 0], **style_re)
ax[0, 1].plot([-5, -2, -2, -2, 5], [0, 0, 1, 0, 0], **style_re)
ax[0, 1].set(xlim=(-5, 5), ylim=(-0.2, 1.2))
ax[0, 1].annotate('', (0, 0.4), (2, 0.4), arrowprops=dict(arrowstyle='<-', color='gray'))
ax[0, 1].annotate('', (0, 0.4), (-2, 0.4), arrowprops=dict(arrowstyle='<-', color='gray'))
ax[0, 1].text(1, 0.45, '$+f_0$', ha='center', va='bottom', **text_style)
ax[0, 1].text(-1, 0.45, '$-f_0$', ha='center', va='bottom', **text_style)
ax[0, 1].set_title('Delta Functions')
# gaussian -> gaussian
ax[1, 0].plot(x, np.exp(-(2 * x) ** 2), **style_re)
ax[1, 0].set(xlim=(-5, 5), ylim=(-0.2, 1.2))
ax[1, 0].annotate('', (0, 0.35), (0.6, 0.35), arrowprops=dict(arrowstyle='<-', color='gray'))
ax[1, 0].text(0, 0.4, '$\sigma$', ha='center', va='bottom', **text_style)
ax[1, 0].set_title('Gaussian')
ax[1, 1].plot(x, np.exp(-(x / 2) ** 2), **style_re)
ax[1, 1].set(xlim=(-5, 5), ylim=(-0.2, 1.2))
ax[1, 1].annotate('', (0, 0.35), (2, 0.35), arrowprops=dict(arrowstyle='<-', color='gray'))
ax[1, 1].text(0, 0.4, '$(2\pi\sigma)^{-1}$', ha='center', va='bottom', **text_style)
ax[1, 1].set_title('Gaussian')
# top hat -> sinc
ax[2, 0].plot([-2, -1, -1, 1, 1, 2], [0, 0, 1, 1, 0, 0], **style_re)
ax[2, 0].set(xlim=(-2, 2), ylim=(-0.3, 1.2))
ax[2, 0].annotate('', (-1, 0.5), (1, 0.5), arrowprops=dict(arrowstyle='<->', color='gray'))
ax[2, 0].text(0.0, 0.5, '$T$', ha='center', va='bottom', **text_style)
ax[2, 0].set_title('Top Hat')
ax[2, 1].plot(x, np.sinc(x), **style_re)
ax[2, 1].set(xlim=(-5, 5), ylim=(-0.3, 1.2))
ax[2, 1].annotate('', (-1, 0), (1, 0), arrowprops=dict(arrowstyle='<->', color='gray'))
ax[2, 1].text(0.0, 0.0, '$2/T$', ha='center', va='bottom', **text_style)
ax[2, 1].set_title('Sinc')
# comb -> comb
ax[3, 0].plot([-5.5] + sum((3 * [i] for i in range(-5, 6)), []) + [5.5],
[0] + 11 * [0, 1, 0] + [0], **style_re)
ax[3, 0].set(xlim=(-5.5, 5.5), ylim=(-0.2, 1.2))
ax[3, 0].annotate('', (0, 0.5), (1, 0.5), arrowprops=dict(arrowstyle='<->', color='gray'))
ax[3, 0].text(0.5, 0.6, '$T$', ha='center', va='bottom', **text_style)
ax[3, 0].set_title('Dirac Comb')
ax[3, 1].plot([-5.5] + sum((3 * [i] for i in range(-5, 6)), []) + [5.5],
[0] + 11 * [0, 1, 0] + [0], **style_re)
ax[3, 1].set(xlim=(-2.5, 2.5), ylim=(-0.2, 1.2));
ax[3, 1].annotate('', (0, 0.5), (1, 0.5), arrowprops=dict(arrowstyle='<->', color='gray'))
ax[3, 1].text(0.5, 0.6, '$1/T$', ha='center', va='bottom', **text_style)
ax[3, 1].set_title('Dirac Comb')
for i, letter in enumerate('abcd'):
ax[i, 0].set_ylabel('({0})'.format(letter), rotation=0)
# Draw arrows between pairs of axes
for i in range(4):
left = ax[i, 0].bbox.transformed(fig.transFigure.inverted()).bounds
right = ax[i, 1].bbox.transformed(fig.transFigure.inverted()).bounds
x = 0.5 * (left[0] + left[2] + right[0])
y = left[1] + 0.5 * left[3]
fig.text(x, y, r'$\Longleftrightarrow$',
ha='center', va='center', size=30)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 1b
Step2: Problem 1c
Step3: Problem 1d
Step4: Problem 2) A Brief Review of Fourier Analysis
Step5: The common Fourier pairs are especially useful in light of the convolution theorem. Fourier transforms convert convolutions into point-wise products. We define a convolution as
Step6: Fourier transforms are all well and good, but ultimately we desire a measure of periodicity in actual observations of astrophysical sources, which cannot be (a) continuous, or (b) infinite. So, we cannot calculate a Fourier transform.
Step7: Sampling a signal directly at the Nyquist frequency results in a lack of any variability. But does this just mean that $f_\mathrm{Ny}$ is special? What happens at $f > f_\mathrm{Ny}$?
Step8: From the plot the signal is clearly variable (unlike when $f = f_\mathrm{Ny}$). However, there are fewer than 2 observations per cycle.
Step9: The observations are identical! Here is what you need to remember about the Nyquist frequency
Step10: Problem 3b
Step11: Problem 3c
Step12: Problem 3d
Step13: Problem 3e
Step14: Problem 3f
Step15: Congratulations
Step16: Code from PracticalLombScargle by Jake Van der Plas
|
918 | <ASSISTANT_TASK:>
Python Code:
def my_function(arg_one, arg_two, optional_1=6, optional_2="seven"):
return " ".join([str(arg_one), str(arg_two), str(optional_1), str(optional_2)])
print(my_function("a", "b"))
print(my_function("a", "b", optional_2="eight"))
#go ahead and try out different components
def count_args(*args, **kwargs):
print("i was called with " + str(len(args)) + " arguments and " + str(len(kwargs)) + " keyword arguments")
count_args(1, 2, 3, 4, 5, foo=1, bar=2)
def random():
Always the number 4.
Chosen by fair dice roll. Guaranteed to be random.
return 4
def print_dashes():
print("---")
def print_asterisks():
print("***")
def pretty_print(string, function):
function()
print(string)
function()
pretty_print("hello", print_dashes)
pretty_print("hey", print_asterisks)
dictionaries = [
{"name": "Jack", "age": 35, "telephone": "555-1234"},
{"name": "Jane", "age": 40, "telephone": "555-3331"},
{"name": "Joe", "age": 20, "telephone": "555-8765"}
]
def get_age(x):
return x["age"]
dictionaries.sort(key=get_age)
dictionaries
dictionaries.sort(key=lambda x: x["age"], reverse=True)
dictionaries
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python has special syntax for catching an arbitary number of parameters. For regular parameters it is a variable with one asterisk * and for keyword parameters it is a variable with two asterisks. It is conventional to name these *args and **kwargs, but this is not required.
Step3: The length of sequences can be checked using the built-in len() function.
Step4: Functions as parameters
Step5: Extra
Step6: Now if we want to sort it using Python's built-in sort() function the sort won't know which attribute to base the sorting on.
Step7: This is all nice and well, but now you have a function called get_age that you don't intend to use a second time.
|
919 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
now = datetime.now()
now
now.day
now.weekday()
from datetime import date, time
time(3, 24)
date(1970, 9, 3)
my_age = now - datetime(1970, 9, 3)
my_age
my_age.days/365.
segments = pd.read_csv("../data/AIS/transit_segments.csv")
segments.head()
segments.seg_length.hist(bins=500)
# Write your answer here
segments.st_time.dtype
datetime.strptime(segments.st_time.loc[0], '%m/%d/%y %H:%M')
from dateutil.parser import parse
parse(segments.st_time.loc[0])
segments.st_time.apply(lambda d: datetime.strptime(d, '%m/%d/%y %H:%M'))
pd.to_datetime(segments.st_time[:10])
pd.to_datetime([None])
vessels = pd.read_csv("../data/AIS/vessel_information.csv", index_col='mmsi')
vessels.head()
[v for v in vessels.type.unique() if v.find('/')==-1]
# Write your answer here
df1 = pd.DataFrame(dict(id=range(4), age=np.random.randint(18, 31, size=4)))
df2 = pd.DataFrame(dict(id=list(range(3))+list(range(3)),
score=np.random.random(size=6)))
df1
df2
pd.merge(df1, df2)
pd.merge(df1, df2, how='outer')
segments.head(1)
vessels.head(1)
segments_merged = pd.merge(vessels, segments, left_index=True, right_on='mmsi')
segments_merged.head()
vessels.merge(segments, left_index=True, right_on='mmsi').head()
segments['type'] = 'foo'
pd.merge(vessels, segments, left_index=True, right_on='mmsi').head()
np.concatenate([np.random.random(5), np.random.random(5)])
np.r_[np.random.random(5), np.random.random(5)]
np.c_[np.random.random(5), np.random.random(5)]
mb1 = pd.read_excel('../data/microbiome/MID1.xls', 'Sheet 1', index_col=0, header=None)
mb2 = pd.read_excel('../data/microbiome/MID2.xls', 'Sheet 1', index_col=0, header=None)
mb1.shape, mb2.shape
mb1.head()
# Write your answer here
mb1.index[:3]
mb1.index.is_unique
pd.concat([mb1, mb2], axis=0).shape
pd.concat([mb1, mb2], axis=0).index.is_unique
pd.concat([mb1, mb2], axis=1).shape
pd.concat([mb1, mb2], axis=1).head()
pd.concat([mb1, mb2], axis=1, join='inner').head()
mb1.combine_first(mb2).head()
pd.concat([mb1, mb2], keys=['patient1', 'patient2']).head()
pd.concat([mb1, mb2], keys=['patient1', 'patient2']).index.is_unique
pd.concat(dict(patient1=mb1, patient2=mb2), axis=1).head()
# Write solution here
cdystonia = pd.read_csv("../data/cdystonia.csv", index_col=None)
cdystonia.head()
stacked = cdystonia.stack()
stacked
stacked.unstack().head()
# Write your answer here
twstrs_wide = cdystonia2['twstrs'].unstack('obs')
twstrs_wide.head()
cdystonia_wide = (cdystonia[['patient','site','id','treat','age','sex']]
.drop_duplicates()
.merge(twstrs_wide, right_index=True, left_on='patient', how='inner')
.head())
cdystonia_wide
(cdystonia.set_index(['patient','site','id','treat','age','sex','week'])['twstrs']
.unstack('week').head())
pd.melt(cdystonia_wide, id_vars=['patient','site','id','treat','age','sex'],
var_name='obs', value_name='twsters').head()
cdystonia.pivot(index='patient', columns='obs', values='twstrs').head()
# Write your answer here
cdystonia.pivot_table(index=['site', 'treat'], columns='week', values='twstrs',
aggfunc=max).head(20)
pd.crosstab(cdystonia.sex, cdystonia.site)
vessels.duplicated(subset='names')
vessels.drop_duplicates(['names'])
cdystonia.treat.value_counts()
treatment_map = {'Placebo': 0, '5000U': 1, '10000U': 2}
cdystonia['treatment'] = cdystonia.treat.map(treatment_map)
cdystonia.treatment
scores = pd.Series([99, 76, 85, -999, 84, 95])
scores.replace(-999, np.nan)
cdystonia2.treat.replace({'Placebo': 0, '5000U': 1, '10000U': 2})
# Write your answer here
pd.get_dummies(vessels5.type).head(10)
cdystonia.age.describe()
pd.cut(cdystonia.age, [20,30,40,50,60,70,80,90])[:30]
pd.cut(cdystonia.age, [20,30,40,50,60,70,80,90], right=False)[:30]
pd.cut(cdystonia.age, [20,40,60,80,90], labels=['young','middle-aged','old','really old'])[:30]
pd.qcut(cdystonia.age, 4)[:30]
quantiles = pd.qcut(segments.seg_length, [0, 0.01, 0.05, 0.95, 0.99, 1])
quantiles[:30]
# Write your answer here
new_order = np.random.permutation(len(segments))
new_order[:30]
segments.take(new_order).head()
segments.head()
vessels.sample(n=10)
# Write your answer here
cdystonia_grouped = cdystonia.groupby(cdystonia.patient)
cdystonia_grouped
for patient, group in cdystonia_grouped:
print('patient', patient)
print('group', group)
cdystonia_grouped.agg(np.mean).head()
cdystonia_grouped.mean().head()
cdystonia_grouped.mean().add_suffix('_mean').head()
# Write your answer here
cdystonia.groupby(['week','site']).mean().head()
normalize = lambda x: (x - x.mean())/x.std()
cdystonia_grouped.transform(normalize).head()
cdystonia_grouped['twstrs'].mean().head()
cdystonia_grouped[['twstrs']].mean().head()
chunks = dict(list(cdystonia_grouped))
chunks[4]
dict(list(cdystonia.groupby(cdystonia.dtypes, axis=1)))
cdystonia2.head(10)
cdystonia2.groupby(level='obs', axis=0)['twstrs'].mean()
def top(df, column, n=5):
return df.sort_index(by=column, ascending=False)[:n]
top3segments = segments_merged.groupby('mmsi').apply(top, column='seg_length', n=3)[['names', 'seg_length']]
top3segments.head(15)
mb1.index[:3]
class_index = mb1.index.map(lambda x: ' '.join(x.split(' ')[:3]))
mb_class = mb1.copy()
mb_class.index = class_index
mb_class.head()
mb_class.groupby(level=0).sum().head(10)
from IPython.core.display import HTML
HTML(filename='data/titanic.html')
# Write your answer here
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Date/Time data handling
Step2: In addition to datetime there are simpler objects for date and time information only, respectively.
Step3: Having a custom data type for dates and times is convenient because we can perform operations on them easily. For example, we may want to calculate the difference between two times
Step4: In this section, we will manipulate data collected from ocean-going vessels on the eastern seaboard. Vessel operations are monitored using the Automatic Identification System (AIS), a safety at sea navigation technology which vessels are required to maintain and that uses transponders to transmit very high frequency (VHF) radio signals containing static information including ship name, call sign, and country of origin, as well as dynamic information unique to a particular voyage such as vessel location, heading, and speed.
Step5: For example, we might be interested in the distribution of transit lengths, so we can plot them as a histogram
Step6: Though most of the transits appear to be short, there are a few longer distances that make the plot difficult to read. This is where a transformation is useful.
Step7: We can see that although there are date/time fields in the dataset, they are not in any specialized format, such as datetime.
Step8: Our first order of business will be to convert these data to datetime. The strptime method parses a string representation of a date and/or time field, according to the expected format of this information.
Step9: The dateutil package includes a parser that attempts to detect the format of the date strings, and convert them automatically.
Step10: We can convert all the dates in a particular column by using the apply method.
Step11: As a convenience, Pandas has a to_datetime method that will parse and convert an entire Series of formatted strings into datetime objects.
Step12: Pandas also has a custom NA value for missing datetime objects, NaT (i.e. "Not a Time").
Step13: Also, if to_datetime() has problems parsing any particular date/time format, you can pass the spec in using the format= argument.
Step14: Exercise
Step15: The challenge, however, is that several ships have travelled multiple segments, so there is not a one-to-one relationship between the rows of the two tables. The table of vessel information has a one-to-many relationship with the segments.
Step16: Notice that without any information about which column to use as a key, Pandas did the Right Thing™ and used the id column in both tables. Unless specified otherwise, merge will use any common column names as keys for merging the tables.
Step17: The outer join above yields the union of the two tables, so all rows are represented, with missing values inserted as appropriate. One can also perform right and left joins to include all rows of the right or left table (i.e. first or second argument to merge), but not necessarily the other.
Step18: we see that there is a mmsi value (a vessel identifier) in each table, but it is used as an index for the vessels table. In this case, we have to specify to join on the index for this table, and on the mmsi column for the other.
Step19: In this case, the default inner join is suitable; we are not interested in observations from either table that do not have corresponding entries in the other.
Step20: Occasionally, there will be fields in common between two tables that we do not wish to use to join them. Why not? They may contain different information, despite having the same name. In this case, Pandas will by default append suffixes _x and _y to the columns to uniquely identify them.
Step21: This default behavior can be overridden by specifying a suffixes argument, containing a list of the suffixes to be used for the columns of the left and right columns, respectively.
Step22: Notice that c_ and r_ are not really functions at all, since it is performing some sort of indexing operation, rather than being called. They are actually class instances, but they are here behaving mostly like functions. Don't think about this too hard; just know that they are there.
Step23: Let's give the index and columns meaningful labels.
Step24: The index of these data is the unique biological classification of each organism, beginning with domain, phylum, class, and for some organisms, going all the way down to the genus level.
Step25: If we concatenate along axis=0 (the default), we will obtain another data frame with the the rows concatenated
Step26: However, the index is no longer unique, due to overlap between the two DataFrames.
Step27: Concatenating along axis=1 will concatenate column-wise, but respecting the indices of the two DataFrames.
Step28: If we are only interested in taxa that are included in both DataFrames, we can specify a join=inner argument.
Step29: If we wanted to use the second table to fill values absent from the first table, we could use combine_first.
Step30: We can also create a hierarchical index based on keys identifying the original tables.
Step31: Alternatively, you can pass keys to the concatenation by supplying the DataFrames (or Series) as a dict, resulting in a "wide" format table.
Step32: If you want concat to work like numpy.concatanate, you may provide the ignore_index=True argument.
Step33: Reshaping DataFrame objects
Step34: This dataset includes repeated measurements of the same individuals (longitudinal data). Its possible to present such information in (at least) two ways
Step35: Have a peek at the structure of the index of the stacked data (and the data itself).
Step36: Exercise
Step37: If we want to transform this data so that repeated measurements are in columns, we can unstack the twstrs measurements according to obs.
Step38: We can now merge these reshaped outcomes data with the other variables to create a wide format DataFrame that consists of one row for each patient.
Step39: A slightly cleaner way of doing this is to set the patient-level information as an index before unstacking
Step40: To convert our "wide" format back to long, we can use the melt function, appropriately parameterized. This function is useful for DataFrames where one
Step41: This illustrates the two formats for longitudinal data
Step42: Exercise
Step43: A related method, pivot_table, creates a spreadsheet-like table with a hierarchical index, and allows the values of the table to be populated using an arbitrary aggregation function.
Step44: For a simple cross-tabulation of group frequencies, the crosstab function (not a method) aggregates counts of data according to factors in rows and columns. The factors may be hierarchical if desired.
Step45: Data transformation
Step46: These rows can be removed using drop_duplicates
Step47: Value replacement
Step48: A logical way to specify these numerically is to change them to integer values, perhaps using "Placebo" as a baseline value. If we create a dict with the original values as keys and the replacements as values, we can pass it to the map method to implement the changes.
Step49: Alternately, if we simply want to replace particular values in a Series or DataFrame, we can use the replace method.
Step50: In such situations, we can use replace to substitute nan where the sentinel values occur.
Step51: We can also perform the same replacement that we used map for with replace
Step52: Inidcator variables
Step53: We can now apply get_dummies to the vessel type to create 5 indicator variables.
Step54: Discretization
Step55: Let's transform these data into decades, beginnnig with individuals in their 20's and ending with those in their 80's
Step56: The parentheses indicate an open interval, meaning that the interval includes values up to but not including the endpoint, whereas the square bracket is a closed interval, where the endpoint is included in the interval. We can switch the closure to the left side by setting the right flag to False
Step57: Since the data are now ordinal, rather than numeric, we can give them labels
Step58: A related function qcut uses empirical quantiles to divide the data. If, for example, we want the quartiles -- (0-25%], (25-50%], (50-70%], (75-100%] -- we can just specify 4 intervals, which will be equally-spaced by default
Step59: Alternatively, one can specify custom quantiles to act as cut points
Step60: Exercise
Step61: Permutation and sampling
Step62: Using this sequence as an argument to the take method results in a reordered DataFrame
Step63: Compare this ordering with the original
Step64: For random sampling, DataFrame and Series objects have a sample method that can be used to draw samples, with or without replacement
Step65: Exercise
Step66: Data aggregation and GroupBy operations
Step67: This grouped dataset is hard to visualize
Step68: However, the grouping is only an intermediate step; for example, we may want to iterate over each of the patient groups
Step69: A common data analysis procedure is the split-apply-combine operation, which groups subsets of data together, applies a function to each of the groups, then recombines them into a new data table.
Step70: Notice that the treat and sex variables are not included in the aggregation. Since it does not make sense to aggregate non-string variables, these columns are simply ignored by the method.
Step71: The add_prefix and add_suffix methods can be used to give the columns of the resulting table labels that reflect the transformation
Step72: Exercise
Step73: If we wish, we can easily aggregate according to multiple keys
Step74: Alternately, we can transform the data, using a function of our choice with the transform method
Step75: It is easy to do column selection within groupby operations, if we are only interested split-apply-combine operations on a subset of columns
Step76: Or, as a DataFrame
Step77: If you simply want to divide your DataFrame into chunks for later use, its easy to convert them into a dict so that they can be easily indexed out as needed
Step78: By default, groupby groups by row, but we can specify the axis argument to change this. For example, we can group our columns by dtype this way
Step79: Its also possible to group by one or more levels of a hierarchical index. Recall cdystonia2, which we created with a hierarchical index
Step80: The level argument specifies which level of the index to use for grouping.
Step81: Apply
Step82: To see this in action, consider the vessel transit segments dataset (which we merged with the vessel information to yield segments_merged). Say we wanted to return the 3 longest segments travelled by each ship
Step83: Notice that additional arguments for the applied function can be passed via apply after the function name. It assumes that the DataFrame is the first argument.
Step84: Using the string methods split and join we can create an index that just uses the first three classifications
Step85: However, since there are multiple taxonomic units with the same class, our index is no longer unique
Step86: We can re-establish a unique index by summing all rows with the same class, using groupby
Step87: Exercise
Step88: Women and children first?
|
920 | <ASSISTANT_TASK:>
Python Code:
import json
import base64
import os
import hashlib
from pprint import pprint # for debug
from jupyter_client.ioloop import IOLoopKernelManager
from tornado.httpclient import AsyncHTTPClient, HTTPClient
SLACK_URL = 'https://hooks.slack.com/services/XXXXXX/XXXXXX/XXXXXXX'
SLACK_TOKEN = 'XXXXXXXXX'
if os.path.exists('/tmp/defrag_demo'):
km = IOLoopKernelManager(connection_file='/tmp/defrag_demo')
km.load_connection_file()
print('connecting to existing kernel')
else:
km = IOLoopKernelManager()
km.start_kernel()
PLOT_DIR = '/home/jovyan/plots'
!mkdir -p $PLOT_DIR
def b64_to_file(b64_str, ext):
'''Dump a base64 encoded string to disk as a binary file with the given extension.'''
# decode base64 image and write to disk under a unique ID
img = base64.decodebytes(b64_str.encode('utf-8'))
# hash to filename
name = hashlib.sha1(img).hexdigest()
with open(os.path.join(PLOT_DIR, name+'.'+ext), 'wb') as f:
f.write(img)
return name
if 'iopub' in locals():
iopub.close()
iopub = km.connect_iopub()
def on_stream(content):
'''Handles stdout, stderr.'''
return dict(text=content['text'])
def on_display_data(content):
'''Handles rich output.'''
data = content['data']
response = {}
# prefer images
attachments = []
for key in data.keys():
if key.startswith('image'):
_, ext = key.split('/')
name = b64_to_file(data[key], ext)
# point to plot on the web
# TODO: don't hard code the server URL
attachments.append({
"fallback": "Oh noes! The plot didn't render!",
'image_url': 'http://parente.cloudet.xyz:9001/static/{}.png'.format(name)
})
if len(attachments):
response['attachments'] = attachments
# fallback on text
if 'text/plain' in data:
response['text'] = data['text/plain']
return response if len(response) else None
on_execute_result = on_display_data
http_client = AsyncHTTPClient()
def on_reply(stream, msg_list):
# process raw messages
idents, msg_list = km.session.feed_identities(msg_list)
msg = km.session.deserialize(msg_list)
# get delegate based on message type
func = globals().get('on_'+msg['msg_type'])
if func is not None:
# get an optional response
response = func(msg['content'])
if response:
# dump the response as JSON to Slack
http_client.fetch(SLACK_URL, method='POST',
body=json.dumps(response),
headers={'Content-Type' : 'application/json'})
iopub.on_recv_stream(on_reply)
kc = km.client()
import tornado.web
import tornado.httpserver
import json
class IncomingHandler(tornado.web.RequestHandler):
def get(self):
self.finish('{"status": "ok"}')
def post(self):
token = self.get_body_argument('token')
if token != SLACK_TOKEN:
return self.send_error(401)
# get code to run
code = self.get_body_argument('text')
# remove command prefix, up to first space
code = code[code.find(' ')+1:].strip()
# execute the code in the other kernel
kc.execute(code)
# return nothing for now
self.finish()
application = tornado.web.Application([
(r"/", IncomingHandler)
], static_path=PLOT_DIR)
if 'server' in locals():
server.stop()
server = tornado.httpserver.HTTPServer(application)
server.listen(9001, '0.0.0.0')
kc.execute('print("Hi, all")')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll set some hard coded values up front. These should really get read from the environment but I'm lazy at the moment.
Step2: Try connecting to an existing kernel connection file if one exists specifically for the defrag demo. If it's not there, Start kernel separate from this one. Keeps user code out of the namespace of this service. Avoids hard-to-reason-about async request handling all within a single kernel.
Step3: Slack doesn't accept base64 encoded blobs back as images. We can only send back a real URL. We'll make a local directory where we'll dump any plots generated by the kernel to disk. Later, we'll serve them up through the same web server that we'll use to handle the Slack outgoing webhook calls.
Step4: Connect to the iopub socket to receive kernel output.
Step5: Define functions to handle message types of interest. The generic on_reply below dispatches to these.
Step6: The doc says these two types of messages are equivalent. So just alias the function.
Step7: Build a HTTP client to use to push messages back to Slack.
Step8: Process all kernel replies as they come in. Use the kernel manager methods for converting ZeroMQ stream byte strings to nice Python dictionaries.
Step9: Hook the on_reply to the iopub stream.
Step10: Create a client that can be used to execute code on the kernel.
Step11: From Slack
Step12: Define a simple HTTP handler for Slack POSTs. Support a GET for liveliness checks too.
Step13: Map the handler and start listening.
Step14: Say hi.
|
921 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from string import ascii_uppercase as alphabet
def get_board(board_size):
x, y = np.meshgrid(range(board_size), range(board_size))
board = np.empty(shape=(board_size, board_size), dtype='uint8')
text_colors = np.empty_like(board, dtype='<U5')
# force left bottom corner cell to be black
if board_size % 2 == 0:
extra_term = 1
else:
extra_term = 0
for i, j in zip(x.flatten(), y.flatten()):
board[i, j] = (i + j + extra_term) % 2
# text color should be the opposite to a cell color
text_colors[i, j] = 'black' if board[i, j] else 'white'
return board, text_colors
def get_valencies(piece, board):
# Get valencies for the given piece on the given board
valencies = np.empty_like(board)
if piece == 'Pawn':
valencies = pawn(valencies)
elif piece == 'Knight':
valencies = knight(valencies)
elif piece == 'Rook':
valencies = rook(valencies)
elif piece == 'King':
valencies = king(valencies)
else:
valencies = bishop_or_queen(piece, valencies)
return valencies
def plot_board(board, text_colors, piece):
board_size = np.shape(board)[0]
x, y = np.meshgrid(range(board_size), range(board_size))
# fixed figure size
plt.figure(figsize=(2*board_size/5, 2*board_size/5))
ax = plt.subplot(111)
ax.imshow(board, cmap='gray', interpolation='none')
# Display valency (degree) values
val_board = get_valencies(piece, board)
for i, j, valency, text_col in zip(x.flatten(), y.flatten(),
val_board.flatten(),
text_colors.flatten()):
ax.text(i, j, str(valency), color=text_col,
va='center', ha='center', fontsize=12)
ax.set_xticks(np.arange(board_size+1)) # one tick per cell
ax.set_xticklabels(alphabet[:board_size]) # set letters as ticklabels
# one tick per cell
ax.set_yticks(np.arange(board_size+1))
# set numbers as ticklabels (upside down)
ax.set_yticklabels(np.arange(board_size, 0, -1))
ax.axis('tight') # get rid of the white spaces on the edges
# ax.set_title(piece, fontsize=30)
plt.show()
def pawn(valencies):
valencies[0, :] = 0 # empty horizontal line
valencies[1:, 0] = valencies[1:, -1] = 1 # vertical edges
valencies[1:, 1:-1] = 2
return valencies
def special_cases(piece, board_sizes):
''' Plot boards of every board_size,
contained in board_sizes list for given piece.
'''
for board_size in board_sizes:
board, text_colors = get_board(board_size=board_size)
plot_board(board, text_colors, piece=piece)
special_cases(piece='Pawn', board_sizes=range(4,6))
def knight(valencies):
board_size = valencies.shape[0]
if board_size > 3:
# Four points in each corner are the same for any board size > 3.
# corner cells
valencies[0, 0] = valencies[0, -1] = \
valencies[-1, 0] = valencies[-1, -1] = 2
# cells horizontally/vertically adjacent to the corners
valencies[0, 1] = valencies[1, 0] = \
valencies[0, -2] = valencies[1, -1] = \
valencies[-2, 0] = valencies[-1, 1] = \
valencies[-2, -1] = valencies[-1, -2] = 3
# cells diagonally adjacent
valencies[1, 1] = valencies[1, -2] = \
valencies[-2, 1] = valencies[-2, -2] = 4
if board_size > 4:
valencies[0, 2:-2] = valencies[2:-2, 0] = \
valencies[2:-2, -1] = valencies[-1, 2:-2] = 4
valencies[1, 2:-2] = valencies[2:-2, 1] = \
valencies[2:-2, -2] = valencies[-2, 2:-2] = 6
valencies[2:-2, 2:-2] = 8
# Patholigical cases
elif board_size == 3:
valencies = 2 * np.ones((board_size, board_size), dtype='uint8')
valencies[1, 1] = 0
else:
valencies = np.zeros((board_size, board_size), dtype='uint8')
return valencies
special_cases(piece='Knight', board_sizes=[4,5,6])
def bishop_or_queen(piece, valencies):
board_size = np.shape(valencies)[0]
if piece == 'Bishop':
smallest_val = board_size-1
else:
smallest_val = 3*(board_size-1)
# external square
valencies[0, :] = valencies[:, 0] = \
valencies[:, -1] = valencies[-1, :] = smallest_val
# internal sqares
for i in range (1, int(board_size/2)+1):
# top, left
# right, bottom
valencies[i, i:-i] = valencies[i:-i, i] = \
valencies[i:-i, -(i+1)] = valencies[-(i+1), i:-i] = \
smallest_val + 2*i
return valencies
special_cases(piece='Bishop', board_sizes=range(4,8))
def rook(valencies):
board_size = np.shape(valencies)[0]
x, y = np.meshgrid(range(board_size), range(board_size))
for i, j in zip(x.flatten(), y.flatten()):
valencies[i, j] = 2*(board_size-1)
return valencies
special_cases(piece='Rook', board_sizes=range(4,6))
special_cases(piece='Queen', board_sizes=range(4,8))
def king(valencies):
# corners : top left = top right = \
# bottom left = bottom right
valencies[0, 0] = valencies[0, -1] = \
valencies[-1, 0] = valencies[-1, -1] = 3
# edges : top, left, right, bottom
valencies[0, 1:-1] = valencies[1:-1, 0] = \
valencies[1:-1, -1] = valencies[-1, 1:-1] = 5
# center
valencies[1:-1, 1:-1] = 8
return valencies
special_cases(piece='King', board_sizes=range(4,6))
def get_probabilities(piece, n):
# NOTE: Results can be wrong for large n because of dividing by
# the huge denominator!
if piece == 'Pawn':
return 2*(n-1)/((n**2)*(n+1))
elif piece == 'Knight':
return 8*(n-2)/((n**2)*(n+1))
elif piece == 'Bishop':
return 2*(2*n-1)/(3*n*(n+1))
elif piece == 'Rook':
return 2/(n+1)
elif piece == 'Queen':
return 2*(5*n-1)/(3*n*(n+1))
elif piece == 'King':
return 4*(2*n-1)/(n**2*(n+1))
def straightforward_prob(piece, board_size):
# Get probability directly from the board of valencies
board, _ = get_board(board_size)
val_board = get_valencies(piece, board)
unique, counts = np.unique(val_board, return_counts=True)
prob = np.dot(unique, counts)/((board_size)**2 * (board_size**2 - 1))
return prob
start = 2
end = 16
step = 0.02
x = np.arange(start, end)
names_list = ['Pawn', 'Knight', 'Bishop', 'Rook', 'Queen', 'King']
# Check if analytical results match straightforward calculations
for name in names_list:
for board_size in x:
y = get_probabilities(name, board_size)
if not y == straightforward_prob(name, board_size):
print('Mistake in equation for %s' % name)
# print('Analytical results approved')
# Let's expand the range from Z to R for the sake of visual clarity
x = np.arange(start, end, step)
fig, ax = plt.subplots(figsize=(8, 5))
for name in names_list:
y = get_probabilities(name, x)
plt.plot(x, y, label=name, linewidth=3.0)
legend = plt.legend(loc='upper right')
for label in legend.get_lines():
label.set_linewidth(3)
for label in legend.get_texts():
label.set_fontsize(20)
plt.xlabel("Board size", fontsize=20)
plt.ylabel("Probability", fontsize=20)
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Пешка
Step2: Рассмотрим несколько частных случаев в поисках закономерности.
Step3: Закономерность очевидна - всегда присутствует горизонталь (верхняя или нижняя - в зависимости от цвета фигуры), с которой пешка не бьёт ни одну клетку - все поля этой горизонтали $0$-валентны. Их количество равно $n$.
Step4: Количество $2$- и $3$-валентных клеток фиксировано при любом $n\geq 4$. Первые расположены в углах, а вторые прилегают к ним по вертикали и горизонтали. Стало быть, количество $2$-валентных клеток равно $4$, а $3$-валентных - $8$, вдвое больше. $4$-валентные клетки образуют арифметическую прогрессию с начальным элементом $4$ и шагом $4$ для всех $n\geq 4$ (при увеличении $n$ на единицу с каждой стороны появляется одна $4$-валентная клетка). Легко видеть, что рост количества $6$-валентных клеток устроен аналогично, однако существуют они только при $n\geq 5$. Таким образом, $4$-валентных клеток $4(n-3)$, а $6$-валентных клеток - $4(n-4)$ штук. Количество $8$-валентных клеток растёт квадратично, к тому же, они существуют только при $n\geq 5$. То есть, их количество - $(n-4)^2$. Итого имеем
Step5: Видно, что эквивалентные клетки располагаются по периметрам образованных ими концентрических квадратов. Поскольку при чётных $n$ в центре доски расположены $4$ поля с максимальной валентностью, а при нечётных - одно, случаи чётных и нечётных $n$ представляется удобным рассмотреть раздельно.
Step6: Известная особенность ладьи - независимо от расположения на доске, она всегда контролирует постоянное количество полей, а именно $2(n-1)$ - это сумма полей по горизонтали и вертикали минус поле, на котором стоит сама ладья.
Step7: Поскольку ферзь сочетает в себе возможности офицера и ладьи, выражение для него может быть получено как сумма выражений для этих фигур
Step8: Видно, что края доски, за исключением $3$-валентных углов, $5$-валентны, а всё оставшееся пространство $8$-валентно. Ввиду того, что краёв $4$, а $5$-валентных клеток на одном краю $(n-2)$ штук, имеем
Step9: График, отображающий зависимость вероятности от размера доски, представлен как функция действительного переменного в целях наглядности.
|
922 | <ASSISTANT_TASK:>
Python Code:
from fbprophet import Prophet
import pandas as pd
%matplotlib notebook
import matplotlib
date_parse = lambda date: pd.datetime.strptime(date, '%Y-%m-%d')
time_series = pd.read_csv("solarhringsumferd-a-talningarsto.csv", header=0, names=['ds', 'y'], usecols=[0, 1],
parse_dates=[0], date_parser=date_parse)
training = time_series[time_series['ds'] < '2009-01-01']
testing = time_series[time_series['ds'] > '2009-01-01']
testing.columns = ['ds', 'ytest']
training.plot(x='ds');
# Train the model.
model = Prophet()
model.fit(training)
# Define period to make forecast for.
future = model.make_future_dataframe(periods=365*2)
# Perform prediction for the defined period.
predicted_full = model.predict(future)
# We only plot date and predicted value.
# Full prediction contains much more data, like confidence intervals, for example.
predicted = predicted_full[['ds', 'yhat']]
# Plot training, testing and predicted values together.
combined = training.merge(testing, on='ds', how='outer')
combined = combined.merge(predicted, on='ds', how='outer')
combined.columns = ['Date', 'Training', 'Testing', 'Predicted']
# Only show the "intersting part" - no point in looking at the past.
combined[combined['Date'] > '2008-01-01'].plot(x='Date');
model.plot_components(predicted_full);
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sample road traffic data in CSV format can be downloaded here (for the sake of this example, dataset is included in the repository).
Step2: Split data into two sets - one for training and one for testing our model
Step3: All that is required to do the forecast
Step4: We can also query our model to show us trend, weekly and yearly components of the forecast
|
923 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
import sys
from pathlib import Path
%load_ext autoreload
%autoreload 2
from DeepSDF import DeepSDF
# model config
deep_sdf_config = {
"nb_layers": 8,
"latent_dim": 1,
"inner_activation": "relu",
"out_activation": "tanh",
"hidden_dims" : [512]*7,
"norm_layers" : [0, 1, 2, 3, 4, 5, 6, 7],
"weight_norm" : True,
"clamp_delta": 0.1,
}
latent_dim = deep_sdf_config['latent_dim']
# init model
deep_sdf = DeepSDF(**deep_sdf_config)
model = deep_sdf.model
model.summary()
# setup training data
latent = np.random.rand(latent_dim)
#x_train = [all_xyz, np.tile(latent, (all_xyz.shape[0], 1))]
#y_train = mesh_voxels.flatten()
x_train = [points, np.tile(latent, (points.shape[0], 1))]
y_train = sdf
#print(x_train.shape)
print(y_train.shape)
# train
deep_sdf.train(
x_train, y_train,
nb_epochs=10,
batch_size=256,
latent=latent
)
pred_voxel = deep_sdf.model.predict([all_xyz, np.tile(latent, (all_xyz.shape[0], 1))])
pred_voxel.shape
pred_voxel
data_dir = Path.home() / 'Documents/generated_data/neural_rendering'
from mesh_to_sdf import mesh_to_voxels, sample_sdf_near_surface
import trimesh
import skimage.measure
voxel_resolution = 50
mesh = trimesh.load(data_dir / 'suzanne.obj')
mesh_voxels = mesh_to_voxels(mesh, voxel_resolution, pad=False)
mesh_voxels.shape
voxels = pred_voxel.reshape(tuple([size]*3))
# show mesh
vertices, faces, normals, _ = skimage.measure.marching_cubes_lewiner(voxels, level=0)
mesh = trimesh.Trimesh(vertices=vertices, faces=faces, vertex_normals=normals)
mesh.show()
# save voxels to file
np.save(data_dir / 'suzanne.npy', np.array(voxels, dtype=np.float16))
mesh = trimesh.load(data_dir / 'suzanne.obj')
number_of_points = 250000
points, sdf = sample_sdf_near_surface(mesh, number_of_points=number_of_points)
points.shape
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model
Step2: Predict
Step3: Data Processing
Step4: Voxelize Mesh
Step5: Sample SDF
|
924 | <ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
ax = plt.axes()
line1, = ax.plot([0, 1, 2, 1.5], [3, 1, 2, 4])
ax.set_xlim(0.5, 2)
ax.set_ylim(1, 5)
plt.show()
ax = plt.axes()
line1, = ax.plot([0, 1, 2, 1.5], [3, 1, 2, 4])
ax.margins(0.1)
plt.show()
ax = plt.axes()
line1, = ax.plot([0, 1, 2, 1.5], [3, 1, 2, 4])
ax.margins(0.1)
ax.set_ylim(bottom=0)
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Defining a margin such that there is 10% whitespace inside the axes around the drawn line. (Hint
Step2: 3. Setting a 10% margin on the axes with the lower y limit set to 0. (Note
|
925 | <ASSISTANT_TASK:>
Python Code:
import sys
import os
sys.path.append(os.environ.get('NOTEBOOK_ROOT'))
%matplotlib inline
import matplotlib.pyplot as plt
import datacube
import datetime as dt
import xarray as xr
import numpy as np
from utils.data_cube_utilities.data_access_api import DataAccessApi
from utils.data_cube_utilities.plotter_utils import figure_ratio
api = DataAccessApi()
dc = api.dc
# Get available products
products_info = dc.list_products()
# List LANDSAT 7 products
print("LANDSAT 7 Products:")
products_info[["platform", "name"]][products_info.platform == "LANDSAT_7"]
# List LANDSAT 8 products
print("LANDSAT 8 Products:")
products_info[["platform", "name"]][products_info.platform == "LANDSAT_8"]
product = 'ls8_usgs_sr_scene'
platform = 'LANDSAT_8'
collection = 'c1'
level = 'l2'
from utils.data_cube_utilities.dc_load import get_product_extents
full_lat, full_lon, min_max_dates = get_product_extents(api, platform, product)
print("{}:".format(platform))
print("Lat bounds:", full_lat)
print("Lon bounds:", full_lon)
print("Time bounds:", min_max_dates)
from utils.data_cube_utilities.dc_display_map import display_map
# Display the total shared area available for these datacube products.
display_map(latitude = full_lat,longitude = full_lon)
# from datetime import datetime
# start_date, end_date = (datetime(2010,1,1), datetime(2011,1,1))
# start_date, end_date = dt.datetime(2014,1,1), dt.datetime(2016,1,1)
start_date, end_date = dt.datetime(2014,9,1), dt.datetime(2015,3,1)
date_range = (start_date, end_date)
# Specify latitude and longitude bounds of an interesting area within the full extents
# Vietnam
# lat_small = (9.8, 9.85) # Area #1
# lon_small = (105.1, 105.15) # Area #1
# Ghana
# Weija Reservoir - North
lat_small = (5.5974, 5.6270)
lon_small = (-0.3900, -0.3371)
display_map(latitude = lat_small,longitude = lon_small)
from utils.data_cube_utilities.dc_chunker import create_geographic_chunks
geographic_chunks = create_geographic_chunks(
latitude=lat_small,
longitude=lon_small,
geographic_chunk_size=.05)
from utils.data_cube_utilities.clean_mask import landsat_clean_mask_full
from utils.data_cube_utilities.dc_mosaic import create_hdmedians_multiple_band_mosaic
measurements = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'pixel_qa']
product_chunks = []
for index, chunk in enumerate(geographic_chunks):
data = dc.load(measurements = measurements,
time = date_range,
platform = platform,
product = product,
longitude=chunk['longitude'],
latitude=chunk['latitude'])
# Mask out clouds and scan lines.
clean_mask = landsat_clean_mask_full(dc, data, product=product, platform=platform,
collection=collection, level=level)
# Create the mosaic.
product_chunks.append(create_hdmedians_multiple_band_mosaic(data, clean_mask=clean_mask, dtype=np.float32))
from utils.data_cube_utilities.dc_chunker import combine_geographic_chunks
final_composite = combine_geographic_chunks(product_chunks)
from utils.data_cube_utilities.dc_rgb import rgb
fig = plt.figure(figsize=figure_ratio(final_composite, fixed_width=8, fixed_height=8))
rgb(final_composite, bands=['red', 'green', 'blue'], fig=fig)
plt.title('True Color Geomedian Composite', fontsize=16)
plt.show()
fig = plt.figure(figsize=figure_ratio(final_composite, fixed_width=8, fixed_height=8))
rgb(final_composite, bands=['swir1', 'nir', 'red'], fig=fig)
plt.title('False Color Geomedian Composite', fontsize=16)
plt.show()
final_composite.swir1.plot(figsize = figure_ratio(final_composite, fixed_width=10,
fixed_height=10), cmap = 'magma')
plt.title('SWIR1 Composite', fontsize=16)
plt.show()
from utils.data_cube_utilities.import_export import export_slice_to_geotiff
import os
geotiff_dir = 'output/geotiffs/Clustering_Notebook'
if not os.path.exists(geotiff_dir):
os.makedirs(geotiff_dir)
export_slice_to_geotiff(final_composite, '{}/final_composite.tif'.format(geotiff_dir))
from utils.data_cube_utilities.aggregate import xr_scale_res
from utils.data_cube_utilities.dc_clustering import kmeans_cluster_dataset, get_frequency_counts
# Bands used for clustering
cluster_bands = ['red', 'green', 'blue', 'swir1']
classification_4 = kmeans_cluster_dataset(final_composite, cluster_bands, n_clusters=4)
freq_counts_4 = get_frequency_counts(classification_4)
classification_8 = kmeans_cluster_dataset(final_composite, cluster_bands, n_clusters=8)
freq_counts_8 = get_frequency_counts(classification_8)
classification_12 = kmeans_cluster_dataset(final_composite, cluster_bands, n_clusters=12)
freq_counts_12 = get_frequency_counts(classification_12)
# Define standard formatting.
def get_figsize_geospatial(fixed_width=8, fixed_height=14,
num_cols=1, num_rows=1):
return figure_ratio(final_composite,
fixed_width=fixed_width, fixed_height=fixed_height,
num_cols=num_cols, num_rows=num_rows)
xarray_imshow_params = dict(use_colorbar=False, use_legend=True,
fig_kwargs=dict(dpi=120, figsize=get_figsize_geospatial()))
from utils.data_cube_utilities.plotter_utils import xarray_imshow
for class_num, freq, fractional_freq in freq_counts_4:
# The `*_cluster_dataset()` functions set -1 as the cluster number for "rows" with missing data.
class_num, freq = int(class_num), int(freq)
class_mem_str = "in class {:d}".format(class_num) if class_num != -1 else "that had missing data"
print("There were {:d} data points {}, comprising {:.2%} "\
"of all data points.".format(int(freq), class_mem_str,
fractional_freq))
legend_labels = {v:"Cluster {}".format(v) if v != -1 else "Missing Data" for v in np.unique(classification_4)}
xarray_imshow(classification_4, **xarray_imshow_params, legend_labels=legend_labels)
plt.show()
for class_num, freq, fractional_freq in freq_counts_8:
# The `*_cluster_dataset()` functions set -1 as the cluster number for "rows" with missing data.
class_num, freq = int(class_num), int(freq)
class_mem_str = "in class {:d}".format(class_num) if class_num != -1 else "that had missing data"
print("There were {:d} data points {}, comprising {:.2%} "\
"of all data points.".format(int(freq), class_mem_str,
fractional_freq))
legend_labels = {v:"Cluster {}".format(v) if v != -1 else "Missing Data" for v in np.unique(classification_8)}
xarray_imshow(classification_8, **xarray_imshow_params, legend_labels=legend_labels)
plt.show()
for class_num, freq, fractional_freq in freq_counts_12:
# The `*_cluster_dataset()` functions set -1 as the cluster number for "rows" with missing data.
class_num, freq = int(class_num), int(freq)
class_mem_str = "in class {:d}".format(class_num) if class_num != -1 else "that had missing data"
print("There were {:d} data points {}, comprising {:.2%} "\
"of all data points.".format(int(freq), class_mem_str,
fractional_freq))
legend_labels = {v:"Cluster {}".format(v) if v != -1 else "Missing Data" for v in np.unique(classification_12)}
xarray_imshow(classification_12, **xarray_imshow_params, legend_labels=legend_labels)
plt.show()
from utils.data_cube_utilities.import_export import export_slice_to_geotiff
if not os.path.exists(geotiff_dir):
os.makedirs(geotiff_dir)
output_kmeans_cluster4_file_path = os.path.join(geotiff_dir, "cluster4_kmeans.tif")
output_kmeans_cluster8_file_path = os.path.join(geotiff_dir, "cluster8_kmeans.tif")
output_kmeans_cluster12_file_path = os.path.join(geotiff_dir, "cluster12_kmeans.tif")
export_slice_to_geotiff(classification_4.to_dataset(name='classification'),
output_kmeans_cluster4_file_path)
export_slice_to_geotiff(classification_8.to_dataset(name='classification'),
output_kmeans_cluster8_file_path)
export_slice_to_geotiff(classification_12.to_dataset(name='classification'),
output_kmeans_cluster12_file_path)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <span id="clustering_notebook_plat_prod">Choose Platform and Product ▴</span>
Step2: Choose product and platform
Step3: <span id="clustering_notebook_extents">Get the Maximum Extents of the Cube ▴</span>
Step4: <span id="clustering_notebook_define_extents">Define the Extents of the Analysis ▴</span>
Step5: Specify an area to analyze
Step6: Visualize the selected area
Step7: <span id="clustering_notebook_retrieve_data">Load Data from the Data Cube and Create a Composite ▴</span>
Step8: Create a geomedian composite
Step9: Combine the chunks to produce the final mosaic
Step10: <span id="clustering_notebook_examine_composite">Examine the Composite and Export as a GeoTIFF ▴</span>
Step11: False color
Step12: Example of a composited swir1 band
Step13: Export to GeoTIFF
Step14: <span id="clustering_notebook_cluster">Perform Clustering ▴</span>
Step15: <span id="clustering_notebook_visualize">Visualize the Clustered Data ▴</span>
Step16: <span id="clustering_notebook_export_clustered_data">Export the Clustered Data as a GeoTIFF ▴</span>
|
926 | <ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import numpy as np
import time
import matplotlib.pyplot as plt
import tensorflow as tf
import sys
sys.path.append('..')
import models.VAE as vae
import os
from io import BytesIO
import PIL.Image
import scipy.misc
import scipy.io
from IPython.display import clear_output, Image, display
np.random.seed(0)
# load data
import utils.DataReader as Data
# get data handler
data_type = 'mnist'
data_dir = '/home/mattw/Dropbox/git/dreamscape/data/'
if data_type is 'mnist':
data = Data.DataReaderMNIST(data_dir + 'mnist/', one_hot=True)
elif data_type is 'cifar':
data = Data.DataReaderCIFAR(data_dir + 'cifar/', one_hot=True)
# store processed images
saving = True
save_path = '/home/mattw/Desktop/test_movie'
# define model params
layers_encoder = [784, 400, 400]
layer_latent = 2
layers_decoder = [400, 400, 784]
# initialize model
net = vae.VAE(
layers_encoder=layers_encoder,
layer_latent=layer_latent,
layers_decoder=layers_decoder)
# define training params
batch_size = 128
use_gpu = 1
iters_per_image = np.logspace(0, 3, num=50).astype(int)
iters_disp = None
iters_ckpt = None
iters_summary = None
# image details
nx = ny = 20
x_values = np.linspace(-3, 3, nx)
y_values = np.linspace(-3, 3, ny)
# start the tensorflow session
config = tf.ConfigProto(device_count = {'GPU': use_gpu})
sess = tf.Session(config=config, graph=net.graph)
sess.run(net.init)
for epoch, iters_training in enumerate(iters_per_image):
# output updates
print('\rEpoch %03g of %03g - training for %05g iters' %
(epoch + 1, iters_per_image.size, iters_per_image[epoch]), end='')
# train model
net.train_iters(
sess,
data=data,
batch_size=batch_size,
iters_training=iters_training,
iters_disp=iters_disp)
# create latent state representation
canvas = np.empty((28*ny, 28*nx))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
z_mean = np.array([[xi, yi]])
x_mean = net.generate(sess, z_mean=z_mean)
canvas[(nx-i-1)*28:(nx-i)*28, j*28:(j+1)*28] = x_mean[0].reshape(28, 28)
# save output
if saving:
file_name = str('vae-mnist_epoch_%03i.jpg' % epoch)
file_path = '/'.join([save_path, file_name])
canvas = np.uint8(255*canvas)
PIL.Image.fromarray(canvas).save(file_path, 'jpeg')
sess.close()
%reload_ext watermark
%watermark -a "Matt Whiteway" -d -v -m -p numpy,tensorflow
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: GIF animation of latent space during training
Step2: to create a gif
|
927 | <ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append("/usr/local/lib/python3.4/dist-packages/")
sys.path.append("/usr/local/lib/python3.4/dist-packages/revscoring/")
sys.path.append("/usr/local/lib/python3.4/dist-packages/more_itertools/")
sys.path.append("/usr/local/lib/python3.4/dist-packages/deltas/")
!sudo pip3 install dependencies deltas
from revscoring.extractors import api
import mwapi
extractor = api.Extractor(mwapi.Session("https://en.wikipedia.org",
user_agent="Revscoring feature demo ahalfaker@wikimedia.org"))
from revscoring import Feature
chars_added_ratio_explicit = Feature(
"chars_added_ratio_explicit",
lambda a,c: a/max(c, 1), # Prevents divide by zero
depends_on=[wikitext.revision.diff.chars_added,
wikitext.revision.chars],
returns=float)
list(extractor.extract(123456789, [chars_added_ratio_explicit]))
from revscoring.features import modifiers
chars_added_ratio_implicit = (wikitext.revision.diff.chars_added /
modifiers.max(wikitext.revision.chars, 1))
list(extractor.extract(123456789, [chars_added_ratio_implicit]))
chars_added_ratio_explicit, chars_added_ratio_implicit
list(extractor.extract(662953550, [wikitext.revision.diff.datasources.segments_added,
wikitext.revision.diff.datasources.segments_removed]))
import mwparserfromhell as mwp
templates_added = Feature("templates_added",
lambda add_segments: sum(len(mwp.parse(s).filter_templates()) > 0 for s in add_segments),
depends_on=[wikitext.revision.diff.datasources.segments_added],
returns=int)
list(extractor.extract(662953550, [templates_added]))
from revscoring.dependencies import draw
print(draw(templates_added))
print(draw(wikitext.revision.diff.number_prop_delta_sum))
try:
list(extractor.extract(2, [wikitext.revision.diff.words_added]))
except Exception as e:
print(e)
try:
list(extractor.extract(262721924, [wikitext.revision.diff.words_added]))
except Exception as e:
print(e)
from revscoring.features import revision_oriented
try:
list(extractor.extract(172665816, [revision_oriented.revision.comment_matches("foo")]))
except Exception as e:
print(e)
from revscoring.features import temporal
try:
list(extractor.extract(591839757, [revision_oriented.revision.user.text_matches("foo")]))
except Exception as e:
print(e)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Extract features
Step2: There's easier ways that we can do this though. revscoring.Feature overloads simple mathematical operators to allow you to do math with features and get a feature returned. revscoring.features.modifiers contains a set of basic functions that do the same. This code roughly corresponds to what's going on above.
Step3: While the implicit pattern is quicker and easier than the explicit pattern, it's name can not be customized.
Step4: Extracting datasources
Step5: OK. Let's define a new feature for counting the number of templates added. I'll make use of mwparserfromhell to do this. See the docs.
Step6: Debugging
Step7: In the tree structure above, you can see how our new feature depends on wikitext.revision.diff.segments_added which depends on wikitext.revision.diff.operations which depends (as you might imagine) on the current and parent revision. Some features can get quite complicated.
Step8: The dependency injection system will only solve a unique dependency once for a given tree. So, even though <revision.parent.text> appears twice above, it will only be extracted once and then cached. This allows for multiple features to share large sections of their dependency trees -- and therefor minimize resource usage.
|
928 | <ASSISTANT_TASK:>
Python Code:
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
from collections import Counter
import numpy as np
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
positive_counts.most_common()
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size)
list(vocab)
import numpy as np
layer_0 = np.zeros((1,vocab_size))
layer_0
from IPython.display import Image
Image(filename='sentiment_network.png')
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
word2index
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
def get_target_for_label(label):
if(label == 'POSITIVE'):
return 1
else:
return 0
labels[0]
get_target_for_label(labels[0])
labels[1]
get_target_for_label(labels[1])
import time
import sys
import numpy as np
class zSentimentNetwork:
def __init__(self, reviews, labels, hidd_nodes = 10, learning_rate = .1):
# z random generator
np.random.seed(1)
self.pre_process_data(reviews, labels)
self.init_network(len(self.review_vocab), hidd_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden, and output layers:
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Start weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] += 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = zSentimentNetwork(reviews[:-1000], labels[:-1000], learning_rate=.1)
# evaluate model before training
mlp.test(reviews[-1000:], labels[-1000:])
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = zSentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
mlp.train(reviews[:-1000],labels[:-1000])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lesson
Step2: Project 1
Step3: Transforming Text into Numbers
Step4: Project 2
Step5: Project 3
|
929 | <ASSISTANT_TASK:>
Python Code:
from shenfun import *
print('hello world')
from shenfun import *
N = 8
C = FunctionSpace(N, 'Chebyshev', quad='GC', domain=[-2, 2])
L = FunctionSpace(N, 'Legendre')
x, w = C.points_and_weights()
print(L.points_and_weights())
C0 = FunctionSpace(N, 'Chebyshev', bc=(0, 0))
L0 = FunctionSpace(N, 'Legendre', bc=(0, 0))
H0 = FunctionSpace(N, 'Hermite')
La = FunctionSpace(N, 'Laguerre', bc=(0, None))
CN = FunctionSpace(N, 'Chebyshev', bc={'left': {'N': 0}, 'right': {'N': 0}})
LN = FunctionSpace(N, 'Legendre', bc={'left': {'N': 0}, 'right': {'N': 0}})
CB = FunctionSpace(N, 'Chebyshev', bc=(0, 0, 0, 0))
LB = FunctionSpace(N, 'Legendre', bc=(0, 0, 0, 0))
L0 = FunctionSpace(N, 'Legendre', bc=(0, 0))
C0 = FunctionSpace(N, 'Chebyshev', bc=(0, 0))
L1 = FunctionSpace(N, 'Legendre')
LL = TensorProductSpace(comm, (L0, L1)) # comm is MPI.COMM_WORLD
CL = TensorProductSpace(comm, (C0, L1))
V = VectorSpace(LL) # For vector valued functions
f = Array(LL)
print(f.shape)
L0 = FunctionSpace(N, 'Legendre', bc=(0, 0))
L1 = FunctionSpace(N, 'Chebyshev', bc={'left': ('N', 0), 'right': ('N', 0)})
F2 = FunctionSpace(N, 'Fourier', dtype='d')
T = TensorProductSpace(comm, (L0, L1, F2))
f = Array(T)
print(f.dtype)
help(project)
L0 = FunctionSpace(N, 'Legendre', bc=(0, 0))
L1 = FunctionSpace(N, 'Legendre')
u = TrialFunction(L0)
v = TestFunction(L0)
uh = Function(L0)
g = Array(L0)
du = grad(u) # vector valued expression
h = div(du) # scalar valued expression
A = inner(Dx(u, 0, 3), v)
print(A.diags().todense())
uj = Array(L0)
uj = uh.backward(uj)
uh = uj.forward(uh)
dudx = project(Dx(uh, 0, 1), L1) # Compute du/dx
wh = project(uj, L1)
import sympy as sp
x, y = sp.symbols('x,y')
ws = project(sp.sin(4*x), L1)
F0 = FunctionSpace(N, 'F', dtype='D')
F1 = FunctionSpace(N, 'F', dtype='D')
F2 = FunctionSpace(N, 'F', dtype='d')
FF = TensorProductSpace(comm, (F0, F1, F2))
uh = Function(FF)
ua = Array(FF)
ua[:] = np.random.random(ua.shape)
uh = ua.forward(uh)
du = div(grad(uh))
V = VectorSpace(FF)
vv = Function(V)
c = curl(vv)
ch = project(c, V)
A = inner(grad(u), grad(v))
dict(A)
print(A.diags().todense())
# Solve Poisson's equation
import matplotlib.pyplot as plt
from sympy import symbols, sin, cos, lambdify
from shenfun import *
# Use sympy to compute manufactured solution
x, y = symbols("x,y")
ue = sin(6*np.pi*x)*(1-x**2) # `ue` is the manufactured solution
fe = ue.diff(x, 2) # `fe` is Poisson's right hand side for `ue`
SD = FunctionSpace(20, 'L', bc=(0, 0))
u = TrialFunction(SD)
v = TestFunction(SD)
b = inner(v, Array(SD, buffer=fe)) # Array is initialized with `fe`
A = inner(v, div(grad(u)))
uh = Function(SD)
uh = A.solve(b, uh) # Very fast solver due to Jie Shen
print(uh.backward()-Array(SD, buffer=ue))
plt.plot(SD.mesh(), uh.backward())
L0 = FunctionSpace(N, 'Legendre', bc=(0, 0))
F1 = FunctionSpace(N, 'Fourier', dtype='d')
TP = TensorProductSpace(comm, (L0, F1))
u = TrialFunction(TP)
v = TestFunction(TP)
A = inner(grad(u), grad(v))
print(A)
A = inner(grad(u), grad(v)) # <- list of two TPMatrices
print(A[0].mats)
print('Or as dense matrices:')
for mat in A[0].mats:
print(mat.diags().todense())
print(A[1].mats[0].diags().todense())
#print(A[1].scale) # l^2
from sympy import symbols, sin, cos, lambdify
from shenfun import *
# Use sympy to compute manufactured solution
x, y, z = symbols("x,y,z")
ue = (cos(4*x) + sin(2*y) + sin(4*z))*(1-x**2)
fe = ue.diff(x, 2) + ue.diff(y, 2) + ue.diff(z, 2)
C0 = FunctionSpace(32, 'Chebyshev', bc=(0, 0))
F1 = FunctionSpace(32, 'Fourier', dtype='D')
F2 = FunctionSpace(32, 'Fourier', dtype='d')
T = TensorProductSpace(comm, (C0, F1, F2))
u = TrialFunction(T)
v = TestFunction(T)
# Assemble left and right hand
f_hat = inner(v, Array(T, buffer=fe))
A = inner(v, div(grad(u)))
# Solve
solver = chebyshev.la.Helmholtz(*A) # Very fast solver due to Jie Shen
u_hat = Function(T)
u_hat = solver(f_hat, u_hat)
assert np.linalg.norm(u_hat.backward()-Array(T, buffer=ue)) < 1e-12
print(u_hat.shape)
X = T.local_mesh()
ua = u_hat.backward()
plt.contourf(X[2][0, 0, :], X[0][:, 0, 0], ua[:, 2], 100)
plt.colorbar()
import subprocess
subprocess.check_output('mpirun -np 4 python poisson3D.py', shell=True)
uj = Array(SD)
#uj[:] = np.random.random(uj.shape)
uj = uh.backward(uj)
wh = Function(SD)
wh = SD.forward(uj*uj, wh)
N = (40, 40)
family = 'Legendre'
D0X = FunctionSpace(N[0], 'Legendre', bc=(0, 0))
#D1Y = FunctionSpace(N[1], 'Legendre', bc=(1, 0)) # Regular lid
D1Y = FunctionSpace(N[1], 'Legendre', bc=(0, (1-x)**2*(1+x)**2)) # Regularized lid
D0Y = FunctionSpace(N[1], 'Legendre', bc=(0, 0))
PX = FunctionSpace(N[0], 'Legendre')
PY = FunctionSpace(N[1], 'Legendre')
# All required spaces
V1 = TensorProductSpace(comm, (D0X, D1Y))
V0 = TensorProductSpace(comm, (D0X, D0Y))
Q = TensorProductSpace(comm, (PX, PY), modify_spaces_inplace=True)
V = VectorSpace([V1, V0])
W = CompositeSpace([V0, V0])
VQ = CompositeSpace([V, Q])
PX.slice = lambda: slice(0, PX.N-2)
PY.slice = lambda: slice(0, PY.N-2)
# All required test and trial functions
up = TrialFunction(VQ)
vq = TestFunction(VQ)
u, p = up
v, q = vq
# Assemble matrices
A = inner(grad(v), -grad(u))
G = inner(div(v), p)
D = inner(q, div(u))
# Create Block matrix
sol = la.BlockMatrixSolver(A+G+D)
# Functions to hold solution and rhs
up_hat = Function(VQ).set_boundary_dofs()
fh_hat = Function(VQ)
# Solve Stokes problem. Note constraint for pressure
up_hat = sol(fh_hat, u=up_hat, constraints=((2, 0, 0),))
# Move solution to Array in real space
up = up_hat.backward()
u_, p_ = up
X = Q.local_mesh(True)
plt.quiver(X[0], X[1], u_[0], u_[1])
%matplotlib notebook
plt.figure(figsize=(6,4))
plt.spy(sol.mat.diags(), markersize=0.5)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Spectral Galerkin method
Step2: Jie Shen's bases with Dirichlet bcs
Step3: Shen's bases with Neumann $u'(\pm 1) = 0$
Step4: Shen's biharmonic bases $u(\pm 1) = u'(\pm 1) = 0$
Step5: Multidimensional tensor product spaces
Step6: Challenge 1
Step7: Operators in shenfun
Step8: The shenfun Function represents the solution
Step9: Projections
Step10: Challenge 2
Step11: Implementation matches mathematics
Step12: A diagonal stiffness matrix!
Step13: 2D - still closely matching mathematics
Step14: ?
Step15: 3D Poisson (with MPI and Fourier x 2)
Step16: Contour plot of slice with constant y
Step17: Run with MPI distribution of arrays
Step18: Note that Fourier bases are especially attractive because of features easily handled with MPI
Step19: Challenge 3
Step20: Implementation Stokes - matrices and solve
Step21: Sparsity pattern of block matrix
|
930 | <ASSISTANT_TASK:>
Python Code:
# Set up feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.sql_advanced.ex4 import *
print("Setup Complete")
# Fill in your answer
query_to_optimize = ____
# Check your answer
q_1.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_1.hint()
#_COMMENT_IF(PROD)_
q_1.solution()
#%%RM_IF(PROD)%%
query_to_optimize = 3
q_1.check()
# Line below will give you a hint
#_COMMENT_IF(PROD)_
q_2.hint()
# View the solution (Run this code cell to receive credit!)
q_2.solution()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercises
Step2: 2) Make it easier to find Mitzie!
|
931 | <ASSISTANT_TASK:>
Python Code:
from astropy.io import fits as fits
fitsimage=fits.open('filename.fits')
image=np.flipud(fitsimage[0].data)
import matplotlib.pyplot as plt
plt.imshow(image)
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now the image is just a numpy array (matrix) that can be indexed like any other array. The np.flipup function ("flip up-down") was used so that when you display the array it will have the same orientation as when you look at the fits image with DS9. Once you read in the image, apply each filter to the image and display the image. To display, you will need to use the matplotlib module. For example, to display the image above, you could use
|
932 | <ASSISTANT_TASK:>
Python Code:
import os
import pandas as pd
import pandas_profiling as pd_profiling
import altair as alt
def read_field_type(x):
'''
Para facilitar la lectura de los dataframes con los tipos de columna correspondientes.
'''
if x in ['String']:
return str
elif x in ['Integer', 'Long']:
return int
else:
return str
spend_gob_names = pd.read_csv(os.path.join('data',
'dataset_3_Revenue_and_ pending_(Central_Government)_Real_DICTIONARY.csv'),
sep=';',
encoding='iso-8859-1')
spend_gob_names
spend_gob = pd.read_csv(os.path.join('data',
'dataset_3_Revenue_and_ pending_(Central_Government)_1990-2017 (real).csv'),
sep=';',
encoding='iso-8859-1',
dtype=dict(zip(spend_gob_names['Field name'], spend_gob_names['Field type'].apply(read_field_type))))
spend_gob.columns = spend_gob.columns.str.lower()
spend_gob['real_amount'] = spend_gob['real_amount'].str.replace(',', '.').astype(float)
spend_gob.head(5)
spend_gob[spend_gob['real_amount'] < 0]
spend_gob['nivel2']
for col in s
cobre = spend_gob.query("nivel3 == 'Cobre bruto'")
alt.Chart(cobre).mark_bar().encode(
x='periodo:O',
y='sum(real_amount):Q',
color='nivel3'
)
alt.Chart(spend_gob).mark_area().encode(
x='periodo:O',
y='sum(real_amount):Q',
color='nivel3',
column='nivel2'
)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Estadística Descriptiva
Step2: Ejemplito
|
933 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import shapefile
from functools import reduce
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
sf = shapefile.Reader("Data/cb_2015_us_zcta510_500k/cb_2015_us_zcta510_500k")
def find_max_and_min(shape_records):
points = map(lambda x: x.shape.points, shape_records)
points = reduce(lambda x, y: x + y, points)
points = np.array(points)
lon_min, lon_max = points[:, 0].min() - .05, points[:, 0].max() + .05
lat_min, lat_max = points[:, 1].min() - .05, points[:, 1].max() + .05
return lon_min, lon_max, lat_min, lat_max
def sub_polys_in_poly(poly_points):
polys = []
poly = []
for index, point in enumerate(poly_points):
poly.append(point)
if len(poly) >= 2 and poly[0] == poly[len(poly)-1]:
polys.append(poly)
poly = []
return polys
def plotShapes(plt, shape_records, colors):
lon_min, lon_max, lat_min, lat_max = find_max_and_min(shape_records)
map = Basemap(projection='merc',
lat_0 = np.average([lat_min, lat_max]), lon_0 = np.average([lon_min, lon_max]),
resolution = 'c', area_thresh = 0.1,
llcrnrlon=lon_min, llcrnrlat=lat_min,
urcrnrlon=lon_max, urcrnrlat=lat_max)
map.drawcoastlines()
map.drawstates()
map.fillcontinents(color = 'coral')
map.drawmapboundary()
for index, shape_record in enumerate(shape_records):
polys = sub_polys_in_poly(shape_record.shape.points)
for poly in polys:
points = np.array(poly)
x, y = map(points[:, 0], points[:,1])
map.plot(x, y, 'o-', color=colors[index])
shape_records = {}
for shape_record in sf.shapeRecords():
zip_code = shape_record.record[0]
shape_records[zip_code] = shape_record
# def of record fields
sf.fields
# number of shape records
len(shape_records)
# object toomsuba & cuba are shape records, each of them has a shape and a record property
toomsuba = shape_records['39364']
cuba = shape_records['36907']
ithaca = shape_records['14850']
toomsuba.record
plt.figure(figsize=(12, 12))
plotShapes(plt, [toomsuba, cuba], ['b', 'y'])
plt.show()
plt.figure(figsize=(12, 12))
plotShapes(plt, [ithaca], ['b'])
plt.show()
from shapely.geometry import Polygon
from shapely.ops import cascaded_union
# A Simple union example
triangle0 = Polygon([(0,0), (2,0), (0,2)])
triangle1 = Polygon([(1,0), (1,2), (3,0)])
triangle0.union(triangle1)
polygon0 = Polygon(toomsuba.shape.points)
polygon1 = Polygon(cuba.shape.points)
union_poly = polygon0.union(polygon1)
polygon0
polygon1
union_poly
# get coordinates of unoined shape
union_coords = union_poly.exterior.coords.xy
union_coords = list(zip(union_coords[0], union_coords[1]))
union_coords # [(lon, lat)]
# from osgeo import ogr
# dataSource = ogr.Open("cb_2015_us_zcta510_500k/cb_2015_us_zcta510_500k.shp")
# layer = dataSource.GetLayer()
# print(layer.GetGeomType()) # 3 means polygons
# print(layer.GetFeatureCount())
# geometries = []
# for feature in layer:
# geom = feature.GetGeometryRef()
# geometries.append(geom)
# geo0 = geometries[0]
# geo1 = geometries[1]
# union_poly = geo0.Union(geo1)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading shape & records
Step2: Metadata
Step3: Read zip codes
Step4: Shapely Union - Works
Step5: OSGEO Unions - Doesn't work
|
934 | <ASSISTANT_TASK:>
Python Code:
k_nom = 1.0545
k_f_1144 = 1.04149
fuel_reactivity = (k_f_1144 - k_nom) / k_nom / 400
print(fuel_reactivity)
k_f_g_1144 = 1.02315
total_reactivity = (k_f_g_1144 - k_nom) / k_nom / 400
print(total_reactivity)
graph_reactivity = total_reactivity - fuel_reactivity
print(graph_reactivity)
from numpy import abs
print(abs(total_reactivity + 6.96e-5) / 6.96e-5 * 100)
print(abs(fuel_reactivity + 3.28e-5) / 3.28e-5 * 100)
print(abs(graph_reactivity + 3.68e-5) / 3.68e-5 * 100)
from numpy import exp
alpha_fuel_faeh = 1.18e-4
alpha_fuel_kel = 1.8 * alpha_fuel_faeh
alpha_graph_faeh = 1.0e-5
alpha_graph_kel = 1.8 * alpha_graph_faeh
rho0_fuel = 2.146
rho0_graph = 1.86
T0 = 922
rho1144_fuel = rho0_fuel * exp(-alpha_fuel_kel * (1144 - T0))
rho1144_graph = rho0_graph * exp(-alpha_graph_kel * (1144 - T0))
print(rho1144_fuel)
print(rho1144_graph)
print(100 * .00272 / 1.02316)
tot_gen_rate = 1.02331
tot_fiss_rate = 4.2e-1
tot_gen_rate / tot_fiss_rate
b1_remxs = 2.37419e-3
b1_nsf = 3.66701e-3
b1_diff = 9.85681e-1
b1_mat_buckl = (b1_nsf - b1_remxs) / b1_diff
b1_k_inf = b1_nsf / b1_remxs
print(b1_mat_buckl)
print(b1_k_inf)
remxs = 2.21079e-3
nsf = 3.35432e-3
diff = 5.31584e-1
mat_buckl = (nsf - remxs) / diff
k_inf = nsf / remxs
print(mat_buckl)
print(k_inf)
b1_73_remxs = 2.38026e-3
b1_73_nsf = 3.67837e-3
b1_73_diff = 9.85825e-1
mat_buckl_73 = (b1_73_nsf - b1_73_remxs) / b1_73_diff
k_inf_73 = b1_73_nsf / b1_73_remxs
print(mat_buckl_73)
print(k_inf_73)
40 * 20000
(381.99 + 160.60) / 4
381.99 + 160.60
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1144 K fuel and graphite
Step2: Summary
Step3: msre_homogeneous
Step4: 2/27/17
Step5: Above value perfectly matches one-group nubar
Step6: Above matches perfectly with Serpent output B1_BUCKLING. So how does this compare with the original calculation where the material buckling was actually higher and yet the reactor was still sub-critical??
Step7: So to move the reactor to a state of criticality (from a sub-critical state), the medium was made less absorptive, less fissile, and more diffusive. More particularly, $k_{\infty}$ was increased and the medium was made more diffusive. Ok, that seems like it could work.
Step8: All the above results are very close to the b1 calculation results for the 57 cm reactor. So my question
|
935 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import time
import h5py
import keras
import matplotlib.pyplot as plt
import sys
from keras.layers import (Input, Dense, Lambda, Flatten, Reshape, BatchNormalization, Activation,
Dropout, Conv1D, UpSampling1D, MaxPooling1D, ZeroPadding1D, LeakyReLU)
from keras.engine.topology import Layer
from keras.optimizers import Adam
from keras.models import Model
from keras import backend as K
plt.switch_backend('agg')
# Define edges of detectors (for APOGEE)
blue_chip_begin = 322
blue_chip_end = 3242
green_chip_begin = 3648
green_chip_end = 6048
red_chip_begin = 6412
red_chip_end = 8306
# function for loading data
def load_train_data_weighted(data_file,indices=None):
# grab all
if indices is None:
with h5py.File(data_file,"r") as F:
ap_spectra = F['spectrum'][:]
ap_err_spectra = F['error_spectrum'][:]
# grab a batch
else:
with h5py.File(data_file, "r") as F:
indices_bool = np.ones((len(F['spectrum']),),dtype=bool)
indices_bool[:] = False
indices_bool[indices] = True
ap_spectra = F['spectrum'][indices_bool,:]
ap_err_spectra = F['error_spectrum'][indices_bool,:]
# combine chips
ap_spectra = np.hstack((ap_spectra[:,blue_chip_begin:blue_chip_end],
ap_spectra[:,green_chip_begin:green_chip_end],
ap_spectra[:,red_chip_begin:red_chip_end]))
# set nan values to zero
ap_spectra[np.isnan(ap_spectra)]=0.
ap_err_spectra = np.hstack((ap_err_spectra[:,blue_chip_begin:blue_chip_end],
ap_err_spectra[:,green_chip_begin:green_chip_end],
ap_err_spectra[:,red_chip_begin:red_chip_end]))
return ap_spectra,ap_err_spectra
# function for reshaping spectra into appropriate format for CNN
def cnn_reshape(spectra):
return spectra.reshape(spectra.shape[0],spectra.shape[1],1)
img_cols, img_chns = 7214, 1
num_fluxes=7214
input_shape=(num_fluxes,1)
# z_dims is the dimension of the latent space
z_dims = 64
batch_size = 64
epsilon_std = 1.0
learning_rate = 0.001
decay = 0.0
padding=u'same'
kernel_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.01)
bias_init = keras.initializers.Zeros()
# zero-augmentation layer (a trick I use to input chunks of zeros into the input spectra)
class ZeroAugmentLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(ZeroAugmentLayer, self).__init__(**kwargs)
def zero_agument(self, x_real, zero_mask):
return x_real*zero_mask
def call(self, inputs):
x_real = inputs[0]
zero_mask = inputs[1]
x_augmented = self.zero_agument(x_real, zero_mask)
return x_augmented
# a function for creating the zero-masks used during training
def create_zero_mask(spectra,min_chunks,max_chunks,chunk_size,dataset=None,ones_padded=False):
if dataset is None:
zero_mask = np.ones_like(spectra)
elif dataset=='apogee':
zero_mask = np.ones((spectra.shape[0],7214))
elif dataset=='segue':
zero_mask = np.ones((spectra.shape[0],3688))
num_spec = zero_mask.shape[0]
len_spec = zero_mask.shape[1]
num_bins = len_spec/chunk_size
remainder = len_spec%chunk_size
spec_sizes = np.array([chunk_size for i in range(num_bins)])
spec_sizes[-1]=spec_sizes[-1]+remainder
num_bins_removed = np.random.randint(min_chunks,max_chunks+1,size=(num_spec,))
for i, mask in enumerate(zero_mask):
bin_indx_removed = np.random.choice(num_bins, num_bins_removed[i], replace=False)
for indx in bin_indx_removed:
if indx==0:
mask[indx*spec_sizes[indx]:(indx+1)*spec_sizes[indx]]=0.
else:
mask[indx*spec_sizes[indx-1]:indx*spec_sizes[indx-1]+spec_sizes[indx]]=0.
return zero_mask
def build_encoder(input_1,input_2):
# zero-augment input spectrum
x = ZeroAugmentLayer()([input_1,input_2])
# first conv block
x = Conv1D(filters=16, kernel_size=8, strides=1, kernel_initializer=kernel_init,
bias_initializer=bias_init, padding=padding)(x)
x = LeakyReLU(0.1)(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
# second conv bloack
x = Conv1D(filters=16, kernel_size=8, strides=1, kernel_initializer=kernel_init,
bias_initializer=bias_init, padding=padding)(x)
x = LeakyReLU(0.1)(x)
x = BatchNormalization()(x)
# maxpooling layer and flatten
x = MaxPooling1D(pool_size=4, strides=4, padding='valid')(x)
x = Flatten()(x)
x = Dropout(0.2)(x)
# intermediate dense block
x = Dense(256)(x)
x = LeakyReLU(0.3)(x)
x = BatchNormalization()(x)
x = Dropout(0.3)(x)
# latent distribution output
z_mean = Dense(z_dims)(x)
z_log_var = Dense(z_dims)(x)
return Model([input_1,input_2],[z_mean,z_log_var])
# function for obtaining a latent sample given a distribution
def sampling(args, latent_dim=z_dims, epsilon_std=epsilon_std):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(z_dims,),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
def build_decoder(inputs):
# input fully-connected block
x = Dense(256)(inputs)
x = LeakyReLU(0.1)(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
# intermediate fully-connected block
w = input_shape[0] // (2 ** 3)
x = Dense(w * 16)(x)
x = LeakyReLU(0.1)(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
# reshape for convolutional blocks
x = Reshape((w, 16))(x)
# first deconv block
x = UpSampling1D(size=4)(x)
x = Conv1D(kernel_initializer=kernel_init,bias_initializer=bias_init,padding="same",
filters=16,kernel_size=8)(x)
x = LeakyReLU(0.1)(x)
x = BatchNormalization()(x)
x = Dropout(0.1)(x)
# zero-padding to get x in the right dimension to create the spectra
x = ZeroPadding1D(padding=(2,1))(x)
# second deconv block
x = UpSampling1D(size=2)(x)
x = Conv1D(kernel_initializer=kernel_init,bias_initializer=bias_init,padding="same",
filters=16,kernel_size=8)(x)
x = LeakyReLU(0.1)(x)
x = BatchNormalization()(x)
# output conv layer
x = Conv1D(kernel_initializer=kernel_init,bias_initializer=bias_init,padding="same",
filters=1,kernel_size=8,activation='linear')(x)
return Model(inputs,x)
# encoder and predictor input placeholders
input_spec = Input(shape=input_shape)
input_mask = Input(shape=input_shape)
# error spectra placeholder
input_err_spec = Input(shape=input_shape)
# decoder input placeholder
input_z = Input(shape=(z_dims,))
model_name='vae_test'
start_e = 0
# if you want to continue training from a certain epoch, you can uncomment the load models lines
# and comment out the build_encoder, build_decoder lines
'''
encoder = keras.models.load_model('models/encoder_'+model_name+'_epoch_'+str(start_e)+'.h5',
custom_objects={'ZeroAugmentLayer':ZeroAugmentLayer})
decoder = keras.models.load_model('models/decoder_'+model_name+'_epoch_'+str(start_e)+'.h5',
custom_objects={'ZeroAugmentLayer':ZeroAugmentLayer})
'''
# encoder model
encoder = build_encoder(input_spec, input_mask)
# decoder layers
decoder = build_decoder(input_z)
#'''
encoder.summary()
decoder.summary()
# outputs for encoder
z_mean, z_log_var = encoder([input_spec, input_mask])
# sample from latent distribution given z_mean and z_log_var
z = Lambda(sampling, output_shape=(z_dims,))([z_mean, z_log_var])
# outputs for decoder
output_spec = decoder(z)
# loss for evaluating the regenerated spectra and the latent distribution
class VAE_LossLayer_weighted(Layer):
__name__ = u'vae_labeled_loss_layer'
def __init__(self, **kwargs):
self.is_placeholder = True
super(VAE_LossLayer_weighted, self).__init__(**kwargs)
def lossfun(self, x_true, x_pred, z_avg, z_log_var, x_err):
mse = K.mean(K.square((x_true - x_pred)/x_err))
kl_loss_x = K.mean(-0.5 * K.sum(1.0 + z_log_var - K.square(z_avg) - K.exp(z_log_var), axis=-1))
return mse + kl_loss_x
def call(self, inputs):
# inputs for the layer:
x_true = inputs[0]
x_pred = inputs[1]
z_avg = inputs[2]
z_log_var = inputs[3]
x_err = inputs[4]
# calculate loss
loss = self.lossfun(x_true, x_pred, z_avg, z_log_var, x_err)
# add loss to model
self.add_loss(loss, inputs=inputs)
# returned value not really used for anything
return x_true
# dummy loss to give zeros, hence no gradients to train
# the real loss is computed as the layer shown above and therefore this dummy loss is just
# used to satisfy keras notation when compiling the model
def zero_loss(y_true, y_pred):
return K.zeros_like(y_true)
# create loss layer
vae_loss = VAE_LossLayer_weighted()([input_spec, output_spec, z_mean, z_log_var, input_err_spec])
# build trainer with spectra, zero-masks, and error spectra as inputs. output is the final loss layer
vae = Model(inputs=[input_spec, input_mask, input_err_spec], outputs=[vae_loss])
# compile trainer
vae.compile(loss=[zero_loss],
optimizer=Adam(lr=1.0e-4, beta_1=0.5))
vae.summary()
# a model that encodes and then decodes a spectrum (this is used to plot the intermediate results during training)
gen_x_to_x = Model([input_spec,input_mask], output_spec)
gen_x_to_x.compile(loss='mse',
optimizer=Adam(lr=1.0e-4, beta_1=0.5))
# a function to display the time remaining or elapsed
def time_format(t):
m, s = divmod(t, 60)
m = int(m)
s = int(s)
if m == 0:
return u'%d sec' % s
else:
return u'%d min' % (m)
# function for training on a batch
def train_on_batch(x_batch,x_err_batch):
# create zero-augmentation mask for batch
zero_mask = create_zero_mask(x_batch,0,3,1030,dataset=None,ones_padded=False)
# train on batch
loss = [vae.train_on_batch([cnn_reshape(x_batch),
cnn_reshape(zero_mask),cnn_reshape(x_err_batch)],
cnn_reshape(x_batch))]
losses = {'vae_loss': loss[0]}
return losses
def fit_model(model_name, data_file, epochs, reporter):
# get the number of spectra in the data_file
with h5py.File(data_file, "r") as F:
num_data_ap = len(F['spectrum'])
# lets use 90% of the samples for training
num_data_train_ap = int(num_data_ap*0.9)
# the remainder will be grabbed for testing the model throughout training
test_indices_range_ap = [num_data_train_ap,num_data_ap]
# loop through the number of epochs
for e in xrange(start_e,epochs):
# create a randomized array of indices to grab batches of the spectra
perm_ap = np.random.permutation(num_data_train_ap)
start_time = time.time()
# loop through the batches
losses_=[]
for b in xrange(0, num_data_train_ap, batchsize):
# determine current batch size
bsize = min(batchsize, num_data_train_ap - b)
# grab a batch of indices
indx_batch = perm_ap[b:b+bsize]
# load a batch of data
x_batch, x_err_batch= load_train_data_weighted(data_file,indices=indx_batch)
# train on batch
losses = train_on_batch(x_batch,x_err_batch)
losses_.append(losses)
# Print current status
ratio = 100.0 * (b + bsize) / num_data_train_ap
print unichr(27) + u"[2K",; sys.stdout.write(u'')
print u'\rEpoch #%d | %d / %d (%6.2f %%) ' % \
(e + 1, b + bsize, num_data_train_ap, ratio),; sys.stdout.write(u'')
for k in reporter:
if k in losses:
print u'| %s = %5.3f ' % (k, losses[k]),; sys.stdout.write(u'')
# Compute ETA
elapsed_time = time.time() - start_time
eta = elapsed_time / (b + bsize) * (num_data_train_ap - (b + bsize))
print u'| ETA: %s ' % time_format(eta),; sys.stdout.write(u'')
sys.stdout.flush()
print u''
# Print epoch status
ratio = 100.0
print unichr(27) + u"[2K",; sys.stdout.write(u'')
print u'\rEpoch #%d | %d / %d (%6.2f %%) ' % \
(e + 1, num_data_train_ap, num_data_train_ap, ratio),; sys.stdout.write(u'')
losses_all = {}
for k in losses_[0].iterkeys():
losses_all[k] = tuple(d[k] for d in losses_)
for k in reporter:
if k in losses_all:
losses_all[k]=np.sum(losses_all[k])/len(losses_)
for k in reporter:
if k in losses_all:
print u'| %s = %5.3f ' % (k, losses_all[k]),; sys.stdout.write(u'')
# save loss to evaluate progress
myfile = open(model_name+'.txt', 'a')
for k in reporter:
if k in losses:
myfile.write("%s," % losses[k])
myfile.write("\n")
myfile.close()
# Compute Time Elapsed
elapsed_time = time.time() - start_time
eta = elapsed_time
print u'| TE: %s ' % time_format(eta),; sys.stdout.write(u'')
#sys.stdout.flush()
print('\n')
# save models
encoder.save('models/encoder_'+model_name+'_epoch_'+str(e)+'.h5')
decoder.save('models/decoder_'+model_name+'_epoch_'+str(e)+'.h5')
# plot results for a test set to evaluate how the vae is able to reproduce a spectrum
test_sample_indices = np.random.choice(range(test_indices_range_ap[0],test_indices_range_ap[1]), 5, replace=False)
sample_orig,_, = load_train_data_weighted(data_file,indices=test_sample_indices)
zero_mask_test = create_zero_mask(sample_orig,0,3,1030)
test_x = gen_x_to_x.predict([cnn_reshape(sample_orig),cnn_reshape(zero_mask_test)])
sample_orig_aug = sample_orig*zero_mask_test
sample_diff = sample_orig-test_x.reshape(test_x.shape[0],test_x.shape[1])
# save test results
fig, axes = plt.subplots(20,1,figsize=(70, 20))
for i in range(len(test_sample_indices)):
# original spectrum
axes[i*4].plot(sample_orig[i],c='r')
axes[i*4].set_ylim((0.4,1.2))
# input zero-augmented spectrum
axes[1+4*i].plot(sample_orig_aug[i],c='g')
axes[1+4*i].set_ylim((0.4,1.2))
# regenerated spectrum
axes[2+4*i].plot(test_x[i],c='b')
axes[2+4*i].set_ylim((0.4,1.2))
# residual between original and regenerated spectra
axes[3+4*i].plot(sample_diff[i],c='m')
axes[3+4*i].set_ylim((-0.3,0.3))
# save results
plt.savefig('results/test_sample_ap_'+model_name+'_epoch_'+str(e)+'.jpg')
plt.close('all')
reporter=['vae_loss']
epochs=30
batchsize=64
if start_e>0:
start_e=start_e+1
data_file = '/data/stars/aspcapStar_combined_main_dr14.h5'
fit_model(model_name,data_file, epochs,reporter)
import numpy as np
import h5py
import keras
import matplotlib.pyplot as plt
import sys
from keras.layers import (Input, Lambda)
from keras.engine.topology import Layer
from keras import backend as K
%matplotlib inline
# Define edges of detectors (for APOGEE)
blue_chip_begin = 322
blue_chip_end = 3242
green_chip_begin = 3648
green_chip_end = 6048
red_chip_begin = 6412
red_chip_end = 8306
# function for loading data
def load_train_data_weighted(data_file,indices=None):
# grab all
if indices is None:
with h5py.File(data_file,"r") as F:
ap_spectra = F['spectrum'][:]
ap_err_spectra = F['error_spectrum'][:]
# grab a batch
else:
with h5py.File(data_file, "r") as F:
indices_bool = np.ones((len(F['spectrum']),),dtype=bool)
indices_bool[:] = False
indices_bool[indices] = True
ap_spectra = F['spectrum'][indices_bool,:]
ap_err_spectra = F['error_spectrum'][indices_bool,:]
# combine chips
ap_spectra = np.hstack((ap_spectra[:,blue_chip_begin:blue_chip_end],
ap_spectra[:,green_chip_begin:green_chip_end],
ap_spectra[:,red_chip_begin:red_chip_end]))
# set nan values to zero
ap_spectra[np.isnan(ap_spectra)]=0.
ap_err_spectra = np.hstack((ap_err_spectra[:,blue_chip_begin:blue_chip_end],
ap_err_spectra[:,green_chip_begin:green_chip_end],
ap_err_spectra[:,red_chip_begin:red_chip_end]))
return ap_spectra,ap_err_spectra
# zero-augmentation layer (a trick I use to input chunks of zeros into the input spectra)
class ZeroAugmentLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(ZeroAugmentLayer, self).__init__(**kwargs)
def zero_agument(self, x_real, zero_mask):
return x_real*zero_mask
def call(self, inputs):
x_real = inputs[0]
zero_mask = inputs[1]
x_augmented = self.zero_agument(x_real, zero_mask)
return x_augmented
# a function for creating the zero-masks used during training
def create_zero_mask(spectra,min_chunks,max_chunks,chunk_size,dataset=None,ones_padded=False):
if dataset is None:
zero_mask = np.ones_like(spectra)
elif dataset=='apogee':
zero_mask = np.ones((spectra.shape[0],7214))
elif dataset=='segue':
zero_mask = np.ones((spectra.shape[0],3688))
num_spec = zero_mask.shape[0]
len_spec = zero_mask.shape[1]
num_bins = len_spec/chunk_size
remainder = len_spec%chunk_size
spec_sizes = np.array([chunk_size for i in range(num_bins)])
spec_sizes[-1]=spec_sizes[-1]+remainder
num_bins_removed = np.random.randint(min_chunks,max_chunks+1,size=(num_spec,))
for i, mask in enumerate(zero_mask):
bin_indx_removed = np.random.choice(num_bins, num_bins_removed[i], replace=False)
for indx in bin_indx_removed:
if indx==0:
mask[indx*spec_sizes[indx]:(indx+1)*spec_sizes[indx]]=0.
else:
mask[indx*spec_sizes[indx-1]:indx*spec_sizes[indx-1]+spec_sizes[indx]]=0.
return zero_mask
# function for reshaping spectra into appropriate format for CNN
def cnn_reshape(spectra):
return spectra.reshape(spectra.shape[0],spectra.shape[1],1)
losses = np.zeros((1,))
with open("vae_test.txt", "r") as f:
for i,line in enumerate(f):
currentline = np.array(line.split(",")[0],dtype=float)
if i ==0:
losses[0]=currentline.reshape((1,))
else:
losses = np.hstack((losses,currentline.reshape((1,))))
plt.plot(losses[0:16],label='vae_loss')
plt.legend()
plt.show()
# function for encoding a spectrum into the latent space
def encode_spectrum(model_name,epoch,spectra):
encoder = keras.models.load_model('models/encoder_'+model_name+'_epoch_'+str(epoch)+'.h5',
custom_objects={'ZeroAugmentLayer':ZeroAugmentLayer})
z_avg,z_log_var = encoder.predict([cnn_reshape(spectra),cnn_reshape(np.ones_like(spectra))])
return z_avg, z_log_var
data_file = '/data/stars/aspcapStar_combined_main_dr14.h5'
test_range = [0,30000]
test_sample_indices = np.random.choice(range(0,30000), 5000, replace=False)
sample_x,_, = load_train_data_weighted(data_file,indices=test_sample_indices)
model_name = 'vae_test'
epoch=16
z_avg, z_log_var = encode_spectrum(model_name,epoch,sample_x)
from tsne import bh_sne
perplex=80
t_data = z_avg
# convert data to float64 matrix. float64 is need for bh_sne
t_data = np.asarray(t_data).astype('float64')
t_data = t_data.reshape((t_data.shape[0], -1))
# perform t-SNE embedding
vis_data = bh_sne(t_data, perplexity=perplex)
# separate 2D into x and y axes information
vis_x = vis_data[:, 0]
vis_y = vis_data[:, 1]
fig = plt.figure(figsize=(10, 10))
synth_ap = plt.scatter(vis_x, vis_y, marker='o', c='r',label='APOGEE', alpha=0.4)
plt.tick_params(
axis='x',
which='both',
bottom='off',
top='off',
labelbottom='off')
plt.tick_params(
axis='y',
which='both',
right='off',
left='off',
labelleft='off')
plt.legend(fontsize=30)
plt.tight_layout()
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: load data
Step2: set some model hyper-parameters
Step3: Zero-Augmentation
Step4: build encoder
Step5: build decoder
Step6: build models
Step7: create loss function
Step8: build and compile model trainer
Step9: train model
Step10: analyze results
Step11: t-sne
|
936 | <ASSISTANT_TASK:>
Python Code:
from IPython.lib.display import YouTubeVideo
YouTubeVideo("FytuB8nFHPQ", width=400, height=300)
from __future__ import absolute_import, division, print_function
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('poster')
sns.set_style('whitegrid')
# sns.set_style('darkgrid')
plt.rcParams['figure.figsize'] = 12, 8 # plotsize
import numpy as np
import pandas as pd
from pandas.tools.plotting import scatter_matrix
from sklearn.datasets import load_boston
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv("../data/coal_prod_cleaned.csv")
df.head()
plt.scatter(df['Average_Employees'],
df.Labor_Hours)
plt.xlabel("Number of Employees")
plt.ylabel("Total Hours Worked");
from IPython.display import YouTubeVideo
YouTubeVideo("uHPcshgTotE", width=560, height=315)
import bqplot as bq
sample_df = df.sample(100)
x_sc = bq.LinearScale()
y_sc = bq.LinearScale()
ax_x = bq.Axis(label='Number of Employees', scale=x_sc, grid_lines='solid')
ax_y = bq.Axis(label='Total Hours Worked', scale=y_sc, orientation='vertical', grid_lines='solid')
line = bq.Scatter(x=sample_df.Average_Employees,
y=sample_df.Labor_Hours,
scales={'x': x_sc, 'y': y_sc},
interactions={'click': 'select'},
selected_style={'opacity': 1.0, 'fill': 'DarkOrange', 'stroke': 'Red'},
unselected_style={'opacity': 0.5})
fig = bq.Figure(axes=[ax_x, ax_y], marks=[line], title='BQPlot Example')
fig
line.selected
line.selected = [23, 3]
import bqplot.pyplot as plt
import numpy as np
x = np.linspace(0, 2, 50)
y = x**2
fig = plt.figure()
scatter = plt.scatter(x, y)
plt.show()
fig.animation_duration = 5000
scatter.y = x**.5
scatter.selected_style = {'stroke':'red', 'fill': 'orange'}
plt.brush_selector();
scatter.selected
scatter.selected = [1,2,10,40]
import ipyvolume as ipv
import numpy as np
ipv.example_ylm()
N = 1000
x, y, z = np.random.random((3, N))
fig = ipv.figure()
scatter = ipv.scatter(x, y, z, marker='box')
ipv.show()
scatter.x = scatter.x + 0.1
scatter.color = "green"
scatter.size = 5
scatter.color = np.random.random((N,3))
scatter.size = 2
ex = ipv.datasets.animated_stream.fetch().data
ex.shape
ex[:, ::, ::4].shape
ipv.figure()
ipv.style.use('dark')
quiver = ipv.quiver(*ipv.datasets.animated_stream.fetch().data[:,::,::4], size=5)
ipv.animation_control(quiver, interval=200)
ipv.show()
ipv.style.use('light')
ipv.style.use('light')
quiver.geo = "cat"
N = 1000*1000
x, y, z = np.random.random((3, N)).astype('f4')
ipv.figure()
s = ipv.scatter(x, y, z, size=0.2)
ipv.show()
ipv.save("bqplot.html", )
!open bqplot.html
colors = sns.color_palette(n_colors=df.Year.nunique())
color_dict = {key: value
for key, value in zip(sorted(df.Year.unique()), colors)}
color_dict
for year in sorted(df.Year.unique()[[0, 2, -1]]):
plt.scatter(df[df.Year == year].Labor_Hours,
df[df.Year == year].Production_short_tons,
c=color_dict[year],
s=50,
label=year,
)
plt.xlabel("Total Hours Worked")
plt.ylabel("Total Amount Produced")
plt.legend()
plt.savefig("ex1.png")
import matplotlib as mpl
plt.style.available
mpl.style.use('seaborn-colorblind')
for year in sorted(df.Year.unique()[[0, 2, -1]]):
plt.scatter(df[df.Year == year].Labor_Hours,
df[df.Year == year].Production_short_tons,
# c=color_dict[year],
s=50,
label=year,
)
plt.xlabel("Total Hours Worked")
plt.ylabel("Total Amount Produced")
plt.legend();
# plt.savefig("ex1.png")
df_dict = load_boston()
features = pd.DataFrame(data=df_dict.data, columns = df_dict.feature_names)
target = pd.DataFrame(data=df_dict.target, columns = ['MEDV'])
df = pd.concat([features, target], axis=1)
df.head()
# Target variable
fig, ax = plt.subplots(figsize=(6, 4))
sns.distplot(df.MEDV, ax=ax, rug=True, hist=False)
fig, ax = plt.subplots(figsize=(10,7))
sns.kdeplot(df.LSTAT,
df.MEDV,
ax=ax)
fig, ax = plt.subplots(figsize=(10, 10))
scatter_matrix(df[['MEDV', 'LSTAT', 'CRIM', 'RM', 'NOX', 'DIS']], alpha=0.2, diagonal='hist', ax=ax);
sns.pairplot(data=df,
vars=['MEDV', 'LSTAT', 'CRIM', 'RM', 'NOX', 'DIS'],
plot_kws={'s':20, 'alpha':0.5}
);
players = pd.read_csv("../data/raw_players.csv.gz", compression='gzip')
players.head()
weight_categories = ["vlow_weight",
"low_weight",
"mid_weight",
"high_weight",
"vhigh_weight",
]
players['weightclass'] = pd.qcut(players['weight'],
len(weight_categories),
weight_categories)
players.head()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: BQPlot
Step3: Quantile cuts
|
937 | <ASSISTANT_TASK:>
Python Code:
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2022"
from colors import ColorsCorpusReader
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import torch
from torch_color_describer import (
ContextualColorDescriber, create_example_dataset)
import utils
from utils import START_SYMBOL, END_SYMBOL, UNK_SYMBOL
utils.fix_random_seeds()
COLORS_SRC_FILENAME = os.path.join(
"data", "colors", "filteredCorpus.csv")
corpus = ColorsCorpusReader(
COLORS_SRC_FILENAME,
word_count=None,
normalize_colors=True)
examples = list(corpus.read())
# Should be 46994:
len(examples)
ex1 = next(corpus.read())
ex1.display(typ='speaker')
ex1.display(typ='listener')
ex1.display()
ex1.colors
ex1.speaker_context
ex1.contents
ex3 = examples[2]
ex3.contents
ex3.parse_turns()
ex1.parse_turns()
print("Condition type:", examples[1].condition)
examples[1].display()
print("Condition type:", examples[3].condition)
examples[3].display()
print("Condition type:", examples[2].condition)
examples[2].display()
ex1.condition
pd.Series([ex.condition for ex in examples]).value_counts()
tiny_contexts, tiny_words, tiny_vocab = create_example_dataset(
group_size=2, vec_dim=2)
tiny_vocab
tiny_words
tiny_contexts
toy_color_seqs, toy_word_seqs, toy_vocab = create_example_dataset(
group_size=50, vec_dim=2)
toy_color_seqs_train, toy_color_seqs_test, toy_word_seqs_train, toy_word_seqs_test = \
train_test_split(toy_color_seqs, toy_word_seqs)
toy_mod = ContextualColorDescriber(toy_vocab, max_iter=200)
_ = toy_mod.fit(toy_color_seqs_train, toy_word_seqs_train)
toy_preds = toy_mod.predict(toy_color_seqs_test)
toy_preds[0]
toy_correct = sum(1 for x, p in zip(toy_word_seqs_test, toy_preds) if x == p)
toy_correct / len(toy_word_seqs_test)
toy_mod.listener_accuracy(toy_color_seqs_test, toy_word_seqs_test)
bleu_score, predicted_texts = toy_mod.corpus_bleu(toy_color_seqs_test, toy_word_seqs_test)
bleu_score
toy_perp = toy_mod.perplexities(toy_color_seqs_test, toy_word_seqs_test)
toy_perp[0]
toy_proba = toy_mod.predict_proba(toy_color_seqs_test, toy_word_seqs_test)
# 4 tokens, each assigned a distribution over 5 vocab items:
print(toy_word_seqs_test[0])
toy_proba[0].shape
for timestep in toy_proba[0]:
print(dict(zip(toy_vocab, timestep)))
best_mod = utils.fit_classifier_with_hyperparameter_search(
toy_color_seqs_train,
toy_word_seqs_train,
toy_mod,
cv=2,
scoring=None,
param_grid={'hidden_dim': [10, 20]})
dev_corpus = ColorsCorpusReader(COLORS_SRC_FILENAME, word_count=2)
dev_examples = list(dev_corpus.read())
len(dev_examples)
dev_cols, dev_texts = zip(*[[ex.colors, ex.contents] for ex in dev_examples])
dev_word_seqs = [[START_SYMBOL] + text.split() + [END_SYMBOL] for text in dev_texts]
dev_cols_train, dev_cols_test, dev_word_seqs_train, dev_word_seqs_test = \
train_test_split(dev_cols, dev_word_seqs)
dev_vocab = sorted({w for toks in dev_word_seqs_train for w in toks})
dev_vocab += [UNK_SYMBOL]
dev_mod = ContextualColorDescriber(
dev_vocab,
embed_dim=10,
hidden_dim=10,
early_stopping=True)
%time _ = dev_mod.fit(dev_cols_train, dev_word_seqs_train)
dev_mod_eval = dev_mod.evaluate(dev_cols_test, dev_word_seqs_test)
dev_mod_eval['listener_accuracy']
dev_mod_eval['corpus_bleu']
import torch.nn as nn
from torch_color_describer import Encoder
class LSTMEncoder(Encoder):
def __init__(self, color_dim, hidden_dim):
super().__init__(color_dim, hidden_dim)
self.rnn = nn.LSTM(
input_size=self.color_dim,
hidden_size=self.hidden_dim,
batch_first=True)
import torch.nn as nn
from torch_color_describer import Encoder, Decoder
class LSTMDecoder(Decoder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.rnn = nn.LSTM(
input_size=self.embed_dim,
hidden_size=self.hidden_dim,
batch_first=True)
from torch_color_describer import EncoderDecoder
class LSTMContextualColorDescriber(ContextualColorDescriber):
def build_graph(self):
# Use the new Encoder:
encoder = LSTMEncoder(
color_dim=self.color_dim,
hidden_dim=self.hidden_dim)
# Use the new Decoder:
decoder = LSTMDecoder(
vocab_size=self.vocab_size,
embed_dim=self.embed_dim,
embedding=self.embedding,
hidden_dim=self.hidden_dim)
return EncoderDecoder(encoder, decoder)
lstm_mod = LSTMContextualColorDescriber(
toy_vocab,
embed_dim=10,
hidden_dim=10)
_ = lstm_mod.fit(toy_color_seqs_train, toy_word_seqs_train)
lstm_mod.listener_accuracy(toy_color_seqs_test, toy_word_seqs_test)
import torch.nn as nn
from torch_color_describer import Encoder, Decoder
class DeepEncoder(Encoder):
def __init__(self, *args, num_layers=2, **kwargs):
super().__init__(*args, **kwargs)
self.num_layers = num_layers
self.rnn = nn.GRU(
input_size=self.color_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
batch_first=True)
class DeepDecoder(Decoder):
def __init__(self, *args, num_layers=2, **kwargs):
super().__init__(*args, **kwargs)
self.num_layers = num_layers
self.rnn = nn.GRU(
input_size=self.embed_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
batch_first=True)
from torch_color_describer import EncoderDecoder
class DeepContextualColorDescriber(ContextualColorDescriber):
def __init__(self, *args, num_layers=2, **kwargs):
self.num_layers = num_layers
super().__init__(*args, **kwargs)
def build_graph(self):
encoder = DeepEncoder(
color_dim=self.color_dim,
hidden_dim=self.hidden_dim,
num_layers=self.num_layers) # The new piece is this argument.
decoder = DeepDecoder(
vocab_size=self.vocab_size,
embed_dim=self.embed_dim,
embedding=self.embedding,
hidden_dim=self.hidden_dim,
num_layers=self.num_layers) # The new piece is this argument.
return EncoderDecoder(encoder, decoder)
mod_deep = DeepContextualColorDescriber(
toy_vocab,
embed_dim=10,
hidden_dim=10)
_ = mod_deep.fit(toy_color_seqs_train, toy_word_seqs_train)
mod_deep.listener_accuracy(toy_color_seqs_test, toy_word_seqs_test)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Contents
Step2: The Stanford English Colors in Context corpus (SCC) is included in the data distribution for this course. If you store the data in a non-standard place, you'll need to update the following
Step3: The corpus
Step4: The two keyword arguments have their default values here.
Step5: We can verify that we read in the same number of examples as reported in Monroe et al. 2017
Step6: ColorsCorpusExample instances
Step7: These objects have a lot of attributes and methods designed to help you study the corpus and use it for our machine learning tasks. Let's review some highlights.
Step8: This is the original order of patches for the speaker. The target happens to be the leftmost patch, as indicated by the black box around it.
Step9: The listener isn't shown the target, of course, so no patches are highlighted.
Step10: This is the representation order we use for our machine learning models.
Step11: In this display order, the third element is the target color and the first two are the distractors. The attributes speaker_context and listener_context return the same colors but in the order that those players saw them. For example
Step12: Utterance texts
Step13: There are cases where the speaker made a sequences of utterances for the same trial. We follow Monroe et al. 2017 in concatenating these into a single utterance. To preserve the original information, the individual turns are separated by " ### ". Example 3 is the first with this property – let's check it out
Step14: The method parse_turns will parse this into individual turns
Step15: For examples consisting of a single turn, parse_turns returns a list of length 1
Step16: Far, Split, and Close conditions
Step17: Split condition
Step18: Close condition
Step19: These conditions go from easiest to hardest when it comes to reliable communication. In the Far condition, the context is hardly relevant, whereas the nature of the distractors reliably shapes the speaker's choices in the other two conditions.
Step20: The following verifies that we have the same number of examples per condition as reported in Monroe et al. 2017
Step21: Toy problems for development work
Step22: Each member of tiny_contexts contains three vectors. This is meant to be an easy problem, so the final (target) vector always has values that unambiguously determine which utterance is produced. Thus, the model basically just needs to learn to ignore the distractors and find the association between the target vector and the corresponding sequence.
Step23: ContextualColorDescriber is a subclass of TorchModelBase, so all of the optimization parameters from that model are available here; see torch_model_base.py for full details.
Step24: Predicting sequences
Step25: We can then check that we predicted all correct sequences
Step26: For real problems, this is too stringent a requirement, since there are generally many equally good descriptions. This insight gives rise to metrics like BLEU, METEOR, ROUGE, CIDEr, and others, which seek to relax the requirement of an exact match with the test sequence. These are reasonable options to explore, but we will instead adopt a communcation-based evaluation, as discussed in the next section.
Step27: BLEU scores
Step28: For discussion of BLEU scores, see the evaluation metrics notebook.
Step29: You can use predict_proba to see the full probability distributions assigned to test examples
Step30: Cross-validation
Step31: Baseline SCC model
Step32: Here we extract the raw colors and texts (as strings)
Step33: To tokenize the examples, we'll just split on whitespace, taking care to add the required boundary symbols
Step34: We'll use a random train–test split
Step35: Our vocab is determined by the train set, and we take care to include the $UNK token
Step36: And now we're ready to train a model
Step37: And finally an evaluation in terms of listener accuracy and BLEU scores. The evaluate method combines these
Step38: Modifying the core model
Step39: Step 2
Step40: Step 3
Step41: Here's an example run
Step42: Illustration
Step43: Step 2
Step44: An example/test run
|
938 | <ASSISTANT_TASK:>
Python Code:
import welly
ls ../data/*.LAS
import lasio
l = lasio.read('../data/P-129.LAS') # Line 1.
l
l.header['Well'] # Line 2.
l.header['Parameter']['EKB']
l.data
l.curves.GR # Line 3.
l['GR'] # Line 4.
import matplotlib.pyplot as plt
plt.figure(figsize=(15,3))
plt.plot(l['DEPT'], l['GR'])
plt.show()
l.df().head() # Line 5.
from welly import Well
w = Well.from_las('../data/P-129.LAS') # Line 6.
w
w.df().head()
gr = w.data['GR'] # Line 7.
gr
gr.basis
gr.to_basis(start=300, stop=1000).plot() # Line 8.
sm = gr.smooth(window_length=15, samples=False) # Line 9.
sm.plot()
print("Data shape: {}".format(w.las.data.shape))
w.las.data
w.data.keys()
keys=['CALI', 'DT', 'DTS', 'RHOB', 'SP']
w.plot(tracks=['TVD']+keys)
X, basis = w.data_as_matrix(keys=keys, start=275, stop=1850, step=0.5, return_basis=True)
w.data['CALI'].shape
X.shape
plt.figure(figsize=(15,3))
plt.plot(X.T[0])
plt.show()
w.location
import re
def transform_ll(text):
Parses malformed lat and lon so they load properly.
def callback(match):
d = match.group(1).strip()
m = match.group(2).strip()
s = match.group(3).strip()
c = match.group(4).strip()
if c.lower() in ('w', 's') and d[0] != '-':
d = '-' + d
return ' '.join([d, m, s])
pattern = re.compile(r.+?([-0-9]+?).? ?([0-9]+?).? ?([\.0-9]+?).? +?([NESW]), re.I)
text = pattern.sub(callback, text)
return welly.utils.dms2dd([float(i) for i in text.split()])
print(transform_ll(Lat = 45* 12' 34.237" N))
remap = {
'LATI': 'LOC', # Use LOC for the parameter LATI.
'LONG': 'UWI', # Use UWI for the parameter LONG.
'LOC': None, # Use nothing for the parameter SECT.
'SECT': None, # Use nothing for the parameter SECT.
'RANG': None, # Use nothing for the parameter RANG.
'TOWN': None, # Use nothing for the parameter TOWN.
}
funcs = {
'LATI': transform_ll, # Pass LATI through this function before loading.
'LONG': transform_ll, # Pass LONG through it too.
'UWI': lambda x: "No UWI, fix this!"
}
w = Well.from_las('../data/P-129.LAS', remap=remap, funcs=funcs)
w.location.latitude, w.location.longitude
w.uwi
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Load the LAS file with lasio
Step2: That's it! But the object itself doesn't tell us much — it's really just a container
Step3: 2. Look at the WELL section of the header
Step4: You can go in and find the KB if you know what to look for
Step5: 3. Look at the curve data
Step6: Or we can go after a single curve object
Step7: And there's a shortcut to its data
Step8: ...so it's easy to make a plot against depth
Step9: 4. Inspect the curves as a pandas dataframe
Step10: 5. Load the LAS file with welly
Step11: welly Wells know how to display some basics
Step12: And the Well object also has lasio's access to a pandas DataFrame
Step13: 6. Look at welly's Curve object
Step14: One important thing about Curves is that each one knows its own depths — they are stored as a property called basis. (It's not actually stored, but computed on demand from the start depth, the sample interval (which must be constant for the whole curve) and the number of samples in the object.)
Step15: 7. Plot part of a curve
Step16: 8. Smooth a curve
Step17: 9. Export a set of curves as a matrix
Step18: But we might want to do some other things, such as specify which curves you want (optionally using aliases like GR1, GRC, NGC, etc for GR), resample the data, or specify a start and stop depth — welly can do all this stuff. This method is also wrapped by Project.data_as_matrix() which is nice because it ensures that all the wells are exported at the same sample interval.
Step19: So CALI had 12,718 points in it... since we downsampled to 0.5 m and removed the top and tail, we should have substantially fewer points
Step20: 10+. BONUS
Step23: Let's look at some of the header
Step25: Make sure that works!
|
939 | <ASSISTANT_TASK:>
Python Code:
import graphlab
'''Check GraphLab Create version'''
from distutils.version import StrictVersion
assert (StrictVersion(graphlab.version) >= StrictVersion('1.8.5')), 'GraphLab Create must be version 1.8.5 or later.'
from em_utilities import *
wiki = graphlab.SFrame('people_wiki.gl/').head(5000)
wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text'])
tf_idf, map_index_to_word = sframe_to_scipy(wiki, 'tf_idf')
tf_idf = normalize(tf_idf)
for i in range(5):
doc = tf_idf[i]
print(np.linalg.norm(doc.todense()))
from sklearn.cluster import KMeans
np.random.seed(5)
num_clusters = 25
# Use scikit-learn's k-means to simplify workflow
kmeans_model = KMeans(n_clusters=num_clusters, n_init=5, max_iter=400, random_state=1, n_jobs=-1)
kmeans_model.fit(tf_idf)
centroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_
means = [centroid for centroid in centroids]
cluster_assignment
num_docs = tf_idf.shape[0]
weights = []
for i in xrange(num_clusters):
# Compute the number of data points assigned to cluster i:
num_assigned = cluster_assignment[cluster_assignment == i].shape[0] # YOUR CODE HERE
w = float(num_assigned) / num_docs
weights.append(w)
covs = []
for i in xrange(num_clusters):
member_rows = tf_idf[cluster_assignment==i]
cov = (member_rows.power(2) - 2*member_rows.dot(diag(means[i]))).sum(axis=0).A1 / member_rows.shape[0] \
+ means[i]**2
cov[cov < 1e-8] = 1e-8
covs.append(cov)
out = EM_for_high_dimension(tf_idf, means, covs, weights, cov_smoothing=1e-10)
out['loglik']
len(out['means'])
# Fill in the blanks
def visualize_EM_clusters(tf_idf, means, covs, map_index_to_word):
print('')
print('==========================================================')
num_clusters = len(means)
for c in xrange(num_clusters):
print('Cluster {0:d}: Largest mean parameters in cluster '.format(c))
print('\n{0: <12}{1: <12}{2: <12}'.format('Word', 'Mean', 'Variance'))
# The k'th element of sorted_word_ids should be the index of the word
# that has the k'th-largest value in the cluster mean. Hint: Use np.argsort().
sorted_word_ids = np.argsort(-means[c])
for i in sorted_word_ids[:5]:
print '{0: <12}{1:<10.2e}{2:10.2e}'.format(map_index_to_word['category'][i],
means[c][i],
covs[c][i])
print '\n=========================================================='
'''By EM'''
visualize_EM_clusters(tf_idf, out['means'], out['covs'], map_index_to_word)
np.random.seed(5) # See the note below to see why we set seed=5.
num_clusters = len(means)
num_docs, num_words = tf_idf.shape
random_means = []
random_covs = []
random_weights = []
for k in range(num_clusters):
# Create a numpy array of length num_words with random normally distributed values.
# Use the standard univariate normal distribution (mean 0, variance 1).
# YOUR CODE HERE
mean = np.random.normal(0, 1, num_words)
# Create a numpy array of length num_words with random values uniformly distributed between 1 and 5.
# YOUR CODE HERE
cov = np.random.uniform(1,6,num_words)
# Initially give each cluster equal weight.
# YOUR CODE HERE
weight = 1
random_means.append(mean)
random_covs.append(cov)
random_weights.append(weight)
out_random_init = EM_for_high_dimension(tf_idf, random_means, random_covs, random_weights, cov_smoothing=1e-5)
out_random_init['loglik']
# YOUR CODE HERE. Use visualize_EM_clusters, which will require you to pass in tf_idf and map_index_to_word.
visualize_EM_clusters(tf_idf, out_random_init['means'], out_random_init['covs'], map_index_to_word)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We also have a Python file containing implementations for several functions that will be used during the course of this assignment.
Step2: Load Wikipedia data and extract TF-IDF features
Step3: Using a utility we provide, we will create a sparse matrix representation of the documents. This is the same utility function you used during the previous assignment on k-means with text data.
Step4: As in the previous assignment, we will normalize each document's TF-IDF vector to be a unit vector.
Step5: We can check that the length (Euclidean norm) of each row is now 1.0, as expected.
Step6: EM in high dimensions
Step7: Initializing cluster weights
Step8: Initializing covariances
Step9: Running EM
Step10: Interpret clustering results
Step11: Quiz Question. Select all the topics that have a cluster in the model created above. [multiple choice]
Step12: Quiz Question
Step13: Quiz Question
|
940 | <ASSISTANT_TASK:>
Python Code:
def fact(n ) :
if n == 1 or n == 0 :
return 1
else :
return n * fact(n - 1 )
def findValue(n , r , a ) :
k =(a - 1 ) // fact(n )
answer = k
for i in range(1 , n + 1 ) :
answer = answer *(n + r - i )
answer = answer + 1
return answer
N = 1
A = 2
R = 3
print(findValue(N , R , A ) )
<END_TASK>
| <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
941 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
D = 0.9
nusigf = 0.70
siga = 0.066
#Lx = np.pi*((nusigf-siga)/D)**(-0.5)
Lx = 15.0
N = 55;
h = Lx/(N-1)
x = np.zeros(N)
for i in range(N-1):
x[i+1] = x[i] + h
L = np.zeros((N,N))
A = np.zeros((N,N))
M = np.zeros((N,N))
for i in range(N):
L[i][i] = L[i][i] + (-2*(-D/(h**2)))
for i in range(1,N):
L[i][i-1] = L[i][i-1] + (1*(-D/h**2))
for i in range(N-1):
L[i][i+1] = L[i][i+1] + (1*(-D/h**2))
#Generate flux vector
#Tolerance, k-effective and alpha initial guesses, alpha-eigenvalue modifier
tol = 1e-10
k = 1.00
alpha = 0.0
evm = 0.01
#Alpha-eigenvalue outer iteration
for j in range(1,100):
kprev = k
A = np.zeros((N,N))
phi0 = np.ones((N,1))
phi0[0] = 0
phi0[N-1] = 0
k = 1.0
for i in range(N):
A[i][i] = A[i][i] + siga + alpha
M = L + A
M[0][0] = 1
M[0][1] = 0
M[N-1][N-1] = 1
M[N-1][N-2] = 0
#k-effective inner iteration
for i in range(100):
kold = k
psi = np.linalg.solve(M,nusigf*phi0)
k = sum(nusigf*psi)/sum(nusigf*phi0)
phi0 = (1/k)*psi
phi0[0] = 0
phi0[N-1] = 0
residual = np.abs(k-kold)
if residual <= tol:
break
#Modify alpha-eigenvalue after first iteration, or linearly extrapolate new guess such that k equals 1
#Reminder: does not calculate both k and alpha
if j == 2:
alpha_prev = alpha
alpha = alpha + evm
elif j > 2:
#print "alpha-alpha_prev = ", alpha-alpha_prev
#print "alpha = ", alpha
#print "alpha_prev = ", alpha_prev
#print "j = ", j
if abs(alpha - alpha_prev) < tol:
break
alpha_new = alpha_prev + (1-kprev)/(k-kprev)*(alpha-alpha_prev)
alpha_prev = alpha
alpha = alpha_new
else:
continue
#if abs(k-1) < tol:
# print "alpha = ", alpha
# print "k-effective = ", k
# break
#print("abs(k-1) = %.20f" % abs(k-1))
print("alpha = %.15f" % alpha)
print('k-effective = %.15f' % k)
#plt.plot(x,phi0)
#plt.xlabel('Slab (cm)')
#plt.ylabel('Neutron Flux')
#plt.grid()
#print " alpha = ", (k-1)/k * sum(nusigf*phi0)/sum(phi0)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Material Properties
Step2: Slab Geometry Width and Discretization
Step3: Generation of Leakage Matrix
Step4: Algorithm
|
942 | <ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
from mne.time_frequency import psd_multitaper
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog-proj.fif'
tmin, tmax = 0, 60 # use the first 60s of data
# Setup for reading the raw data (to save memory, crop before loading)
raw = io.read_raw_fif(raw_fname).crop(tmin, tmax).load_data()
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Add SSP projection vectors to reduce EOG and ECG artifacts
projs = read_proj(proj_fname)
raw.add_proj(projs, remove_existing=True)
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
raw.plot_psd(area_mode='range', tmax=10.0, show=False, average=True)
# Pick MEG magnetometers in the Left-temporal region
selection = read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
# Let's just look at the first few channels for demonstration purposes
picks = picks[:4]
plt.figure()
ax = plt.axes()
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=False, ax=ax, color=(0, 0, 1), picks=picks,
show=False, average=True)
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks,
show=False, average=True)
# And now do the same with SSP + notch filtering
# Pick all channels for notch since the SSP projection mixes channels together
raw.notch_filter(np.arange(60, 241, 60), n_jobs=1, fir_design='firwin')
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks,
show=False, average=True)
ax.set_title('Four left-temporal magnetometers')
plt.legend(ax.lines[::3], ['Without SSP', 'With SSP', 'SSP + Notch'])
f, ax = plt.subplots()
psds, freqs = psd_multitaper(raw, low_bias=True, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, proj=True, picks=picks,
n_jobs=1)
psds = 10 * np.log10(psds)
psds_mean = psds.mean(0)
psds_std = psds.std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD', xlabel='Frequency',
ylabel='Power Spectral Density (dB)')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Plot the raw PSD
Step3: Plot a cleaned PSD
Step4: Alternative functions for PSDs
|
943 | <ASSISTANT_TASK:>
Python Code:
import time
def time_usage(func):
def wrapper(*args, **kwargs):
beg_ts = time.time ()
func(*args, **kwargs)
end_ts = time.time ()
print("elapsed time: %f" % (end_ts - beg_ts))
return wrapper
@time_usage
def test():
for i in range(0, 100000):
pass
test()
import time
def new_timer(id=''):
beg_ts = time.time ()
def finish():
end_ts = time.time ()
print("%s elapsed time: %f" % (id, end_ts - beg_ts))
return end_ts - beg_ts
return finish
mytimer = new_timer('TIMERID')
for i in range(0, 100000):
pass
mytimer()
import timeit
timeit.timeit('"-".join(str(n) for n in range(100))', number=10000)
timeit.timeit('"-".join([str(n) for n in range(100)])', number=10000)
timeit.timeit('"-".join(map(str, range(100)))', number=10000)
import sys
print(sys.version)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To always time execution of a function, just add the decorator to its definition...
Step2: ...and whenever the function is run it will print the time spent in it
Step3: Timing code segments
Step4: The way it works is as follows
Step5: Timing snippets using timeit
Step6: Licence and version
|
944 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
an_array = np.array([3, 33, 333]) # Create a rank 1 array
print(type(an_array)) # The type of an ndarray is: "<class 'numpy.ndarray'>"
import numpy as np
my_array = np.array([2, 22, 456])
print(type(my_array))
# test the shape of the array we just created, it should have just one dimension (Rank 1)
print(an_array.shape)
print(my_array.shape)
# because this is a 1-rank array, we need only one index to accesss each element
print(an_array[0], an_array[1], an_array[2])
an_array[0] =888 # ndarrays are mutable, here we change an element of the array
print(an_array)
another = np.array([[11,12,13],[21,22,23]]) # Create a rank 2 array
print(another) # print the array
print("The shape is 2 rows, 3 columns: ", another.shape) # rows x columns
print("Accessing elements [0,0], [0,1], and [1,0] of the ndarray: ", another[0, 0], ", ",another[0, 1],", ", another[1, 0])
import numpy as np
# create a 2x2 array of zeros
ex1 = np.zeros((2,2))
print(ex1)
# create a 2x2 array filled with 9.0
ex2 = np.full((2,2), 9.0)
print(ex2)
# create a 2x2 matrix with the diagonal 1s and the others 0
ex3 = np.eye(2,2)
print(ex3)
# create an array of ones
ex4 = np.ones((1,2))
print(ex4)
# notice that the above ndarray (ex4) is actually rank 2, it is a 2x1 array
print(ex4.shape)
# which means we need to use two indexes to access an element
print()
print(ex4[0,1])
# create an array of random floats between 0 and 1
ex5 = np.random.random((2,2))
print(ex5)
import numpy as np
# Rank 2 array of shape (3, 4)
an_array = np.array([[11,12,13,14], [21,22,23,24], [31,32,33,34]])
print(an_array)
a_slice = an_array[:2, 1:3]
print(a_slice)
print("Before:", an_array[0, 1]) #inspect the element at 0, 1
a_slice[0, 0] = 1000 # a_slice[0, 0] is the same piece of data as an_array[0, 1]
print("After:", an_array[0, 1])
# Create a Rank 2 array of shape (3, 4)
an_array = np.array([[11,12,13,14], [21,22,23,24], [31,32,33,34]])
print(an_array)
# Using both integer indexing & slicing generates an array of lower rank
row_rank1 = an_array[1, :] # Rank 1 view
print(row_rank1, row_rank1.shape) # notice only a single []
# Slicing alone: generates an array of the same rank as the an_array
row_rank2 = an_array[1:2, :] # Rank 2 view
print(row_rank2, row_rank2.shape) # Notice the [[ ]]
#We can do the same thing for columns of an array:
print()
col_rank1 = an_array[:, 1]
col_rank2 = an_array[:, 1:2]
print(col_rank1, col_rank1.shape) # Rank 1
print()
print(col_rank2, col_rank2.shape) # Rank 2
# Create a new array
an_array = np.array([[11,12,13], [21,22,23], [31,32,33], [41,42,43]])
print('Original Array:')
print(an_array)
# Create an array of indices
col_indices = np.array([0, 1, 2, 0])
print('\nCol indices picked : ', col_indices)
row_indices = np.arange(4)
print('\nRows indices picked : ', row_indices)
# Examine the pairings of row_indices and col_indices. These are the elements we'll change next.
for row,col in zip(row_indices,col_indices):
print(row, ", ",col)
# Select one element from each row
print('Values in the array at those indices: ',an_array[row_indices, col_indices])
# Change one element from each row using the indices selected
an_array[row_indices, col_indices] += 100000
print('\nChanged Array:')
print(an_array)
# Prueba
my_array = np.array([[10,21,31],[10,20,30],[13,43,54]])
print(my_array[:2,])
# create a 3x2 array
an_array = np.array([[11,12], [21, 22], [31, 32]])
print(an_array)
# create a filter which will be boolean values for whether each element meets this condition
filter = (an_array > 15)
filter
# we can now select just those elements which meet that criteria
print(an_array[filter])
# For short, we could have just used the approach below without the need for the separate filter array.
an_array[(an_array % 2 == 0)]
an_array[an_array % 2 == 0] +=100
print(an_array)
ex1 = np.array([11, 12]) # Python assigns the data type
print(ex1.dtype)
ex2 = np.array([11.0, 12.0]) # Python assigns the data type
print(ex2.dtype)
ex3 = np.array([11, 21], dtype=np.int64) #You can also tell Python the data type
print(ex3.dtype)
# you can use this to force floats into integers (using floor function)
ex4 = np.array([11.1,12.7], dtype=np.int64)
print(ex4.dtype)
print()
print(ex4)
# you can use this to force integers into floats if you anticipate
# the values may change to floats later
ex5 = np.array([11, 21], dtype=np.float64)
print(ex5.dtype)
print()
print(ex5)
x = np.array([[1,20],[30,2]], dtype=np.int)
y = np.array([[4,8],[6,5]], dtype=np.float64)
print(x)
print()
print(y)
# add
print(x + y) # The plus sign works
print()
print(np.add(x, y)) # so does the numpy function "add"
# subtract
print(x - y)
print()
print(np.subtract(x, y))
# multiply
print(x * y)
print()
print(np.multiply(x, y))
# divide
print(x / y)
print()
print(np.divide(x, y))
# square root
print(np.sqrt(x))
# exponent (e ** x)
print(np.exp(x))
# setup a random 2 x 4 matrix
arr = 10 * np.random.randn(2,4)
print(arr)
# compute the mean for all elements
print(arr.mean())
# compute the means by row
print(arr.mean(axis = 1))
# compute the means by column
print(arr.mean(axis = 0))
# sum all the elements
print(arr.sum())
# compute the medians
print(np.median(arr, axis = 1))
# create a 10 element array of randoms
unsorted = np.random.randn(10)
print(unsorted)
# create copy and sort
sorted = np.array(unsorted)
sorted.sort()
print(sorted)
print()
print(unsorted)
# inplace sorting
unsorted.sort()
print(unsorted)
array = np.array([1,2,1,4,2,1,4,2])
print(np.unique(array))
s1 = np.array(['desk','chair','bulb'])
s2 = np.array(['lamp','bulb','chair'])
print(s1, s2)
print( np.intersect1d(s1, s2) )
print( np.union1d(s1, s2) )
print( np.setdiff1d(s1, s2) )# elements in s1 that are not in s2
print( np.in1d(s1, s2) )#which element of s1 is also in s2
import numpy as np
start = np.zeros((4,3))
print(start)
# create a rank 1 ndarray with 3 values
add_rows = np.array([1, 0, 2])
print(add_rows)
y = start + add_rows # add to each row of 'start' using broadcasting
print(y)
# create an ndarray which is 4 x 1 to broadcast across columns
add_cols = np.array([[0,1,2,3]])
add_cols = add_cols.T
print(add_cols)
# add to each column of 'start' using broadcasting
y = start + add_cols
print(y)
# this will just broadcast in both dimensions
add_scalar = np.array([1])
print(start+add_scalar)
# create our 3x4 matrix
arrA = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]])
print(arrA)
# create our 4x1 array
arrB = [0,1,0,2]
print(arrB)
# add the two together using broadcasting
print(arrA + arrB)
from numpy import arange
from timeit import Timer
size = 1000000
timeits = 1000
# create the ndarray with values 0,1,2...,size-1
nd_array = arange(size)
print( type(nd_array) )
# timer expects the operation as a parameter,
# here we pass nd_array.sum()
timer_numpy = Timer("nd_array.sum()", "from __main__ import nd_array")
print("Time taken by numpy ndarray: %f seconds" %
(timer_numpy.timeit(timeits)/timeits))
# create the list with values 0,1,2...,size-1
a_list = list(range(size))
print (type(a_list) )
# timer expects the operation as a parameter, here we pass sum(a_list)
timer_list = Timer("sum(a_list)", "from __main__ import a_list")
print("Time taken by list: %f seconds" %
(timer_list.timeit(timeits)/timeits))
x = np.array([ 23.23, 24.24] )
np.save('an_array', x)
np.load('an_array.npy')
np.savetxt('array.txt', X=x, delimiter=',')
!cat array.txt
np.loadtxt('array.txt', delimiter=',')
# determine the dot product of two matrices
x2d = np.array([[1,1],[1,1]])
y2d = np.array([[2,2],[2,2]])
print(x2d.dot(y2d))
print()
print(np.dot(x2d, y2d))
# determine the inner product of two vectors
a1d = np.array([9 , 9 ])
b1d = np.array([10, 10])
print(a1d.dot(b1d))
print()
print(np.dot(a1d, b1d))
# dot produce on an array and vector
print(x2d.dot(a1d))
print()
print(np.dot(x2d, a1d))
# sum elements in the array
ex1 = np.array([[11,12],[21,22]])
print(np.sum(ex1)) # add all members
print(np.sum(ex1, axis=0)) # columnwise sum
print(np.sum(ex1, axis=1)) # rowwise sum
# random array
x = np.random.randn(8)
x
# another random array
y = np.random.randn(8)
y
# returns element wise maximum between two arrays
np.maximum(x, y)
# grab values from 0 through 19 in an array
arr = np.arange(20)
print(arr)
# reshape to be a 4 x 5 matrix
arr.reshape(4,5)
# transpose
ex1 = np.array([[11,12],[21,22]])
ex1.T
x_1 = np.array([1,2,3,4,5])
y_1 = np.array([11,22,33,44,55])
filter = np.array([True, False, True, False, True])
out = np.where(filter, x_1, y_1)
print(out)
mat = np.random.rand(5,5)
mat
np.where( mat > 0.5, 1000, -1)
arr_bools = np.array([ True, False, True, True, False ])
arr_bools.any()
arr_bools.all()
Y = np.random.normal(size = (1,5))[0]
print(Y)
Z = np.random.randint(low=2,high=50,size=4)
print(Z)
np.random.permutation(Z) #return a new ordering of elements in Z
np.random.uniform(size=4) #uniform distribution
np.random.normal(size=4) #normal distribution
K = np.random.randint(low=2,high=50,size=(2,2))
print(K)
print()
M = np.random.randint(low=2,high=50,size=(2,2))
print(M)
np.vstack((K,M))
np.hstack((K,M))
np.concatenate([K, M], axis = 0)
np.concatenate([K, M.T], axis = 1)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <p style="font-family
Step2: <p style="font-family
Step3: <p style="font-family
Step4: Use array slicing to get a subarray consisting of the first 2 rows x 2 columns.
Step5: When you modify a slice, you actually modify the underlying array.
Step6: <p style="font-family
Step7: <p style="font-family
Step8: <p style="font-family
Step9: Notice that the filter is a same size ndarray as an_array which is filled with True for each element whose corresponding element in an_array which is greater than 15 and False for those elements whose value is less than 15.
Step10: What is particularly useful is that we can actually change elements in the array applying a similar logical filter. Let's add 100 to all the even values.
Step11: <p style="font-family
Step12: <p style="font-family
Step13: <p style="font-family
Step14: <p style="font-family
Step15: <p style="font-family
Step16: <p style="font-family
Step17: <p style="font-family
Step18: Example from the slides
Step19: <p style="font-family
Step20: <p style="font-family
Step21: <p style="font-family
Step22: <p style="font-family
Step23: <p style="font-family
Step24: <p style="font-family
Step25: <p style="font-family
Step26: <p style="font-family
Step27: <p style="font-family
Step28: <p style="font-family
Step29: <p style="font-family
Step30: <p style="font-family
|
945 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import seaborn
except ImportError:
pass
pd.options.display.max_rows = 10
df = pd.DataFrame({'key':['A','B','C','A','B','C','A','B','C'],
'data': [0, 5, 10, 5, 10, 15, 10, 15, 20]})
df
df.groupby('key').aggregate('sum') # np.sum
df.groupby('key').sum()
df.groupby(lambda x: x % 2).mean()
cast = pd.read_csv('data/cast.csv')
cast.head()
titles = pd.read_csv('data/titles.csv')
titles.head()
# %load snippets/04b - Advanced groupby operations8.py
# %load snippets/04b - Advanced groupby operations9.py
# %load snippets/04b - Advanced groupby operations10.py
# %load snippets/04b - Advanced groupby operations11.py
# %load snippets/04b - Advanced groupby operations12.py
# %load snippets/04b - Advanced groupby operations13.py
# %load snippets/04b - Advanced groupby operations15.py
df
df.groupby('key').transform('mean')
def normalize(group):
return (group - group.mean()) / group.std()
df.groupby('key').transform(normalize)
df.groupby('key').transform('sum')
# %load snippets/04b - Advanced groupby operations21.py
# %load snippets/04b - Advanced groupby operations22.py
# %load snippets/04b - Advanced groupby operations23.py
# %load snippets/04b - Advanced groupby operations24.py
s = 'Bradwurst'
s.startswith('B')
s = pd.Series(['Bradwurst', 'Kartoffelsalat', 'Sauerkraut'])
s.str.startswith('B')
# %load snippets/04b - Advanced groupby operations29.py
# %load snippets/04b - Advanced groupby operations30.py
# %load snippets/04b - Advanced groupby operations31.py
# %load snippets/04b - Advanced groupby operations32.py
titles.title.value_counts().head()
# %load snippets/04b - Advanced groupby operations34.py
# %load snippets/04b - Advanced groupby operations35.py
# %load snippets/04b - Advanced groupby operations36.py
# %load snippets/04b - Advanced groupby operations37.py
# %load snippets/04b - Advanced groupby operations38.py
# %load snippets/04b - Advanced groupby operations39.py
# %load snippets/04b - Advanced groupby operations40.py
# %load snippets/04b - Advanced groupby operations41.py
# %load snippets/04b - Advanced groupby operations42.py
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Recap
Step2: Using the filtering and reductions operations we have seen in the previous notebooks, we could do something like
Step3: Pandas does not only let you group by a column name. In df.groupby(grouper) can be many things
Step4: And now applying this on some real data
Step5: <div class="alert alert-success">
Step6: <div class="alert alert-success">
Step7: <div class="alert alert-success">
Step8: <div class="alert alert-success">
Step9: <div class="alert alert-success">
Step10: <div class="alert alert-success">
Step11: <div class="alert alert-success">
Step12: Transforms
Step13: <div class="alert alert-success">
Step14: <div class="alert alert-success">
Step15: Intermezzo
Step16: In pandas, those methods (together with some additional methods) are also available for string Series through the .str accessor
Step17: For an overview of all string methods, see
Step18: <div class="alert alert-success">
Step19: Value counts
Step20: <div class="alert alert-success">
Step21: <div class="alert alert-success">
Step22: <div class="alert alert-success">
Step23: <div class="alert alert-success">
Step24: <div class="alert alert-success">
Step25: <div class="alert alert-success">
Step26: <div class="alert alert-success">
Step27: <div class="alert alert-success">
|
946 | <ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
from sympy import init_printing
from sympy import S
from sympy import sin, cos, tanh, exp, pi, sqrt
from boutdata.mms import x, y, z, t
from boutdata.mms import DDX
import os, sys
# If we add to sys.path, then it must be an absolute path
common_dir = os.path.abspath('./../../../../common')
# Sys path is a list of system paths
sys.path.append(common_dir)
from CELMAPy.MES import get_metric, make_plot, BOUT_print
init_printing()
folder = '../properZ/'
metric = get_metric()
# Initialization
the_vars = {}
# We need Lx
from boututils.options import BOUTOptions
myOpts = BOUTOptions(folder)
Lx = eval(myOpts.geom['Lx'])
# Gaussian with sinus and parabola
# The skew sinus
# In cartesian coordinates we would like a sinus with with a wave-vector in the direction
# 45 degrees with respect to the first quadrant. This can be achieved with a wave vector
# k = [1/sqrt(2), 1/sqrt(2)]
# sin((1/sqrt(2))*(x + y))
# We would like 2 nodes, so we may write
# sin((1/sqrt(2))*(x + y)*(2*pi/(2*Lx)))
# Rewriting this to cylindrical coordinates, gives
# sin((1/sqrt(2))*(x*(cos(z)+sin(z)))*(2*pi/(2*Lx)))
# The gaussian
# In cartesian coordinates we would like
# f = exp(-(1/(2*w^2))*((x-x0)^2 + (y-y0)^2))
# In cylindrical coordinates, this translates to
# f = exp(-(1/(2*w^2))*(x^2 + y^2 + x0^2 + y0^2 - 2*(x*x0+y*y0) ))
# = exp(-(1/(2*w^2))*(rho^2 + rho0^2 - 2*rho*rho0*(cos(theta)*cos(theta0)+sin(theta)*sin(theta0)) ))
# = exp(-(1/(2*w^2))*(rho^2 + rho0^2 - 2*rho*rho0*(cos(theta - theta0)) ))
# A parabola
# In cartesian coordinates, we have
# ((x-x0)/Lx)^2
# Chosing this function to have a zero value at the edge yields in cylindrical coordinates
# ((x*cos(z)+Lx)/(2*Lx))^2
w = 0.8*Lx
rho0 = 0.3*Lx
theta0 = 5*pi/4
the_vars['f'] = sin((1/sqrt(2))*(x*(cos(z)+sin(z)))*(2*pi/(2*Lx)))*\
exp(-(1/(2*w**2))*(x**2 + rho0**2 - 2*x*rho0*(cos(z - theta0)) ))*\
((x*cos(z)+Lx)/(2*Lx))**2
the_vars['S'] = DDX(DDX(the_vars['f'], metric=metric), metric=metric)
make_plot(folder=folder, the_vars=the_vars, plot2d=True, include_aux=False)
BOUT_print(the_vars, rational=False)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initialize
Step2: Define the variables
Step3: Define the function to take the derivative of
Step4: Calculating the solution
Step5: Plot
Step6: Print the variables in BOUT++ format
|
947 | <ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
from sklearn import __version__ as sklearn_version
print('Sklearn version:', sklearn_version)
from sklearn import datasets
digits = datasets.load_digits()
print(digits.DESCR)
print(digits.data)
print(digits.target)
#Randomize and separate train & test
from sklearn.utils import shuffle
X, y = shuffle(digits.data, digits.target, random_state=0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
print('Accuracy tree: ', accuracy_score(y_test, clf.predict(X_test)))
def acc(i):
clf = RandomForestClassifier(max_features=i)
clf.fit(X_train, y_train)
return accuracy_score(y_test, clf.predict(X_test))
index = []
accuracy = []
for i in range(2,30):
index += [i]
accuracy += [acc(i)]
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(index,accuracy)
import numpy as np
from time import time
from scipy.stats import randint
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
# Define estimator. No parameters
clf = RandomForestClassifier(n_estimators=20)
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": randint(1, 11),
"min_samples_split": randint(2, 11),
"min_samples_leaf": randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
print(random_search.cv_results_)
# Utility function to report best scores
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
report(random_search.cv_results_)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
print('Accuracy tree: ', accuracy_score(y_test, clf.predict(X_test)))
# Configure model
from sklearn import svm
clf_svc = svm.SVC(gamma=0.001, C=100.)
clf_svc.fit(X_train, y_train)
print(accuracy_score(y_test, clf_svc.predict(X_test)))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: RandomForestClassifier
Step3: Hiperparameter optimization
Step4: Use of RandomizedSearchCV
|
948 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
data = np.array([4, 2, 5, 6, 7, 5, 4, 3, 5, 7])
bin_size = 3
bin_data_max = data[:(data.size // bin_size) * bin_size].reshape(-1, bin_size).max(axis=1)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
949 | <ASSISTANT_TASK:>
Python Code:
#general imports
import matplotlib.pyplot as plt
import pygslib
import numpy as np
import pandas as pd
#make the plots inline
%matplotlib inline
#get the data in gslib format into a pandas Dataframe
cluster= pygslib.gslib.read_gslib_file('../data/cluster.dat')
ydata = pygslib.gslib.read_gslib_file('../data/ydata.dat')
ydata.head()
cluster.head()
#view data in a 2D projection
plt.scatter(ydata['Xlocation'],ydata['Ylocation'], c=ydata['Secondary'],
alpha=1, s=15, marker =',', linewidths= (0,))
plt.scatter(cluster['Xlocation'],cluster['Ylocation'], c=cluster['Secondary'])
plt.colorbar()
plt.grid(True)
plt.show()
npoints = len(cluster['Secondary'])
ydata['Declustering Weight']=1
#using declustering wight
parameters_qpplt = {
'qqorpp' : 0, # Q-Q plot (qqorpp=0); P-P plot (qqorpp=1)
'npts' : npoints, # number of points to use on the Q-Q or P-P plot (should not exceed the smallest number of data in data1 / data2
'va1' : cluster['Secondary'], # array('d') with bounds (nd)
'va2' : ydata['Secondary'], # array('d') with bounds (nd)
'wt1' : cluster['Declustering Weight'],# array('d') with bounds (nd)
'wt2' : ydata['Declustering Weight']} # array('d') with bounds (nd)
vr1a,vr2a,error = pygslib.gslib.__plot.qpplt(**parameters_qpplt)
print ('error ? ', error != 0)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.plot (vr1a, vr2a, 'o')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel ('cluster|secundary')
ax.set_ylabel ('ydata|secundary')
ax.set_title ('QQ PLot')
plt.grid(True)
fig.show
npoints = len(cluster['Secondary'])
ydata['Declustering Weight']=1
#using declustering wight
parameters_qpplt = {
'qqorpp' : 0, # Q-Q plot (qqorpp=0); P-P plot (qqorpp=1)
'npts' : npoints, # number of points to use on the Q-Q or P-P plot (should not exceed the smallest number of data in data1 / data2
'va1' : cluster['Primary'], # array('d') with bounds (nd)
'va2' : ydata['Secondary'], # array('d') with bounds (nd)
'wt1' : cluster['Declustering Weight'],# array('d') with bounds (nd)
'wt2' : ydata['Declustering Weight']} # array('d') with bounds (nd)
vr1a,vr2a,error = pygslib.gslib.__plot.qpplt(**parameters_qpplt)
print ('error ? ', error != 0)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.plot (vr1a, vr2a, 'o')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel ('cluster|primary')
ax.set_ylabel ('ydata|secundary')
ax.set_title ('QQ PLot')
plt.grid(True)
fig.show
print (pygslib.gslib.__bicalib.__doc__)
parameters_bicalib = {
'vval' : ydata['Secondary'], # secondary data
'u' : cluster['Primary'], # calibration scatterplot (primary data)
'v' : cluster['Secondary'], # calibration scatterplot (secondary data)
'wt' : cluster['Declustering Weight'], # calibration scatterplot (weight data)
'cutu' : [0.5,1.,2.5,5.,10.], # thresholds on primary
'cutv' : [0.5,1.,2.5,5.,10.]} # thresholds on secondary
ssqu,avgu,umin,umax,ssqv,avgv,vmin,vmax, \
pdfrep,fract,yx,em,vm,nm,b,lcdf,error = pygslib.gslib.__bicalib.bicalib(**parameters_bicalib)
print ('error ? ', error != 0)
[str(i) for i in [1,2,3]]
U_cutoff= np.arange(len(parameters_bicalib['cutu'])) +1
index = [str(i) for i in parameters_bicalib['cutu']]
index.append('max')
columns = [str(i) for i in parameters_bicalib['cutv']]
columns.append('max')
uv_class= pd.DataFrame(pdfrep, index= index, columns = columns)
U_le_cutoff = pd.DataFrame ({'cutoff': parameters_bicalib['cutu'], 'total':nm[:,1],'mean': em[:,1], 'var' : vm[:,1]})
U_ge_cutoff = pd.DataFrame ({'cutoff': parameters_bicalib['cutu'], 'total':nm[:,0],'mean': em[:,0], 'var' : vm[:,0]})
print (' MARKOV-BAYES CALIBRATION REPORT ')
print (' ----------------------------------------')
print ('Primary variable: average =', avgu)
print (' variance =', ssqu)
print (' minimum =', umin)
print (' maximum =', umax)
print ('Secondary variable: average =', avgv)
print (' variance =', ssqv)
print (' minimum =', vmin)
print (' maximum =', vmax)
print (' Cutoffs on Primary Variable ')
for i in U_cutoff:
print (' U cutoff', i, ' cutoff = ', parameters_bicalib['cutu'][i-1], ' cdf = ' , fract[i-1])
print ('')
print ('Number within each bivariate (u,v) class: ??? max is differen ??? check this')
print (uv_class)
print ('')
print (' The cumulative frequency (local prior cdf) table:')
print (yx)
print ('')
print ('cutoff,total#,mean,Variance')
print (' for U(x) <= cutoff ')
print (U_le_cutoff)
print (' for U(x) > cutoff')
print (U_ge_cutoff)
print ('')
print (pd.DataFrame({'B(i) values:':b}))
names=['P_Thr_'+x for x in map (str , parameters_bicalib['cutu'])]
Ptr=pd.DataFrame(lcdf, columns= names)
print (Ptr.head())
print (Ptr.tail())
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting the data ready for work
Step2: Testing bicalib
Step3: Comparing results with the report file (bicalib.rep)
Step4: Note
Step5: expected results
|
950 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import chap01soln
import thinkstats2
import thinkplot
import nsfg
import numpy as np
import pandas as pd
resp = chap01soln.ReadFemResp()
fem = nsfg.ReadFemPreg()
pmf = thinkstats2.Pmf(resp.numkdhh, label='unbiased')
first = fem[fem.birthord == 1]
other = fem[fem.birthord != 1]
first_pmf = thinkstats2.Pmf(first.prglngth)
other_pmf = thinkstats2.Pmf(other.prglngth)
width = 0.45
thinkplot.PrePlot(2, cols=2)
thinkplot.Hist(first_pmf, align='right', width=width)
thinkplot.Hist(other_pmf, align='left',width=width)
thinkplot.Config(xlabel='weeks',
ylabel='probability',
axis=[27,46,0,0.6])
thinkplot.PrePlot(2)
thinkplot.SubPlot(2)
thinkplot.Pmfs([first_pmf,other_pmf])
thinkplot.Show(xlabel='weeks', axis=[27,46,0,0.6])
thinkplot.Pmf(pmf)
def BiasPmf(pmf, label=''):
Returns the Pmf with oversampling proportional to value.
If pmf is the distribution of true values, the result is the
distribution that would be seen if values are oversampled in
proportion to their values; for example, if you ask students
how big their classes are, large classes are oversampled in
proportion to their size.
Args:
pmf: Pmf object.
label: string label for the new Pmf.
Returns:
Pmf object
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
b_pmf = BiasPmf(pmf, label='biased')
thinkplot.Pmfs([pmf, b_pmf])
def PmfMean(pmf):
mean = 0
for x, p in pmf.Items():
mean += p*x
return mean
def PmfVar(pmf):
var = 0
mean = PmfMean(pmf)
for x, p in pmf.Items():
var += p*pow((x-mean),2)
return var
print PmfMean(pmf), pmf.Mean()
print PmfVar(pmf), pmf.Var()
df = pd.DataFrame(array, columns=list('AB'), index=list('abcd'))
df.loc['a':'c']
live = fem[fem.outcome == 1]
map = nsfg.MakePregMap(live)
diffs = []
for x, v in map.iteritems():
if len(v) > 1:
rows = fem.iloc[v]
other_avg_prglngth = rows[rows.birthord > 1].prglngth.mean()
diff = rows[rows.birthord == 1].prglngth.iloc[0] - other_avg_prglngth
diffs.append(abs(diff*7*24))
prg_pmf = thinkstats2.Pmf(diffs)
thinkplot.Pmf(prg_pmf)
thinkplot.Show(xlabel='Hours', cuts=range(200)*10)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a PMF of <tt>numkdhh</tt>, the number of children under 18 in the respondent's household.
Step2: Display the PMF.
Step4: Define <tt>BiasPmf</tt>.
Step5: Make a the biased Pmf of children in the household, as observed if you surveyed the children instead of the respondents.
Step6: Compute the means of the two Pmfs.
|
951 | <ASSISTANT_TASK:>
Python Code:
#importar los paquetes que se van a usar
import pandas as pd
import pandas_datareader.data as web
import numpy as np
import datetime
from datetime import datetime
import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
#algunas opciones para Python
pd.set_option('display.notebook_repr_html', True)
pd.set_option('display.max_columns', 6)
pd.set_option('display.max_rows', 10)
pd.set_option('display.width', 78)
pd.set_option('precision', 3)
def call_payoff(ST, K):
return max(0, ST-K)
call_payoff(25, 30)
def call_payoffs(STmin, STmax, K, step=1):
maturities = np.arange(STmin, STmax+step, step)
payoffs = np.vectorize(call_payoff)(maturities, K)
df = pd.DataFrame({'Strike': K, 'Payoff': payoffs}, index=maturities)
df.index.name = 'Precio de maduración'
return df
call_payoffs(10,25,15)
def plot_call_payoffs(STmin, STmax, K, step=1):
payoffs = call_payoffs(STmin, STmax, K, step)
plt.ylim(payoffs.Payoff.min() - 10, payoffs.Payoff.max() + 10)
plt.ylabel("Payoff")
plt.xlabel("Precio de maduración")
plt.title('Payoff call, Precio strike={0}'.format(K))
plt.xlim(STmin, STmax)
plt.plot(payoffs.index, payoffs.Payoff.values);
plot_call_payoffs(10, 25, 15)
def put_payoff(ST, K):
return max(0, K-ST)
put_payoff(25, 30)
def put_payoffs(STmin, STmax, K, step=1):
maturities = np.arange(STmin, STmax+step, step)
payoffs = np.vectorize(put_payoff)(maturities, K)
df = pd.DataFrame({'Strike': K, 'Payoff': payoffs}, index=maturities)
df.index.name = 'Precio de maduración'
return df
put_payoffs(10,25,15)
def plot_put_payoffs(STmin, STmax, K, step=1):
payoffs = put_payoffs(STmin, STmax, K, step)
plt.ylim(payoffs.Payoff.min() - 10, payoffs.Payoff.max() + 10)
plt.ylabel("Payoff")
plt.xlabel("Precio de maduración")
plt.title('Payoff put, Precio strike={0}'.format(K))
plt.xlim(STmin, STmax)
plt.plot(payoffs.index, payoffs.Payoff.values);
plot_put_payoffs(10, 25, 15)
def call_pnl_buyer(ct, K, STmin, STmax, step = 1):
maturities = np.arange(STmin, STmax+step, step)
payoffs = np.vectorize(call_payoff)(maturities, K)
df = pd.DataFrame({'Strike': K, 'Payoff': payoffs, 'Prima': ct, 'PnL': payoffs-ct}, index=maturities)
df.index.name = 'Precio de maduración'
return df
call_pnl_buyer(12, 15, 10, 35)
def call_pnl_seller(ct, K, STmin, STmax, step = 1):
maturities = np.arange(STmin, STmax+step, step)
payoffs = np.vectorize(call_payoff)(maturities, K)
df = pd.DataFrame({'Strike': K, 'Payoff': payoffs, 'Prima': ct, 'PnL': ct-payoffs}, index=maturities)
df.index.name = 'Precio de maduración'
return df
call_pnl_seller(12, 15, 10, 35)
def call_pnl_combined(ct, K, STmin, STmax, step = 1):
maturities = np.arange(STmin, STmax+step, step)
payoffs = np.vectorize(call_payoff)(maturities, K)
df = pd.DataFrame({'Strike': K, 'Payoff': payoffs, 'Prima': ct, 'PnLcomprador': payoffs-ct, 'PnLvendedor': ct-payoffs}, index=maturities)
df.index.name = 'Precio de maduración'
return df
call_pnl_combined(12, 15, 10, 35)
def put_pnl_buyer(ct, K, STmin, STmax, step = 1):
maturities = np.arange(STmin, STmax+step, step)
payoffs = np.vectorize(put_payoff)(maturities, K)
df = pd.DataFrame({'Strike': K, 'Payoff': payoffs, 'Prima': ct, 'PnL': payoffs-ct}, index=maturities)
df.index.name = 'Precio de maduración'
return df
put_pnl_buyer(2, 15, 10, 30)
def put_pnl_seller(ct, K, STmin, STmax, step = 1):
maturities = np.arange(STmin, STmax+step, step)
payoffs = np.vectorize(put_payoff)(maturities, K)
df = pd.DataFrame({'Strike': K, 'Payoff': payoffs, 'Prima': ct, 'PnL': ct-payoffs}, index=maturities)
df.index.name = 'Precio de maduración'
return df
put_pnl_seller(2, 15, 10, 30)
def put_pnl_combined(ct, K, STmin, STmax, step = 1):
maturities = np.arange(STmin, STmax+step, step)
payoffs = np.vectorize(put_payoff)(maturities, K)
df = pd.DataFrame({'Strike': K, 'Payoff': payoffs, 'Prima': ct, 'PnLcomprador': payoffs-ct, 'PnLvendedor': ct-payoffs}, index=maturities)
df.index.name = 'Precio de maduración'
return df
put_pnl_combined(2, 15, 10, 30)
def plot_pnl(pnl_df, okind, who):
plt.ylim(pnl_df.Payoff.min() - 10, pnl_df.Payoff.max() + 10)
plt.ylabel("Ganancia/pérdida")
plt.xlabel("Precio de maduración")
plt.title('Ganancia y pérdida de una opción {0} para el {1}, Prima={2}, Strike={3}'.format(okind, who, pnl_df.Prima.iloc[0],
pnl_df.Strike.iloc[0]))
plt.ylim(pnl_df.PnL.min()-3, pnl_df.PnL.max() + 3)
plt.xlim(pnl_df.index[0], pnl_df.index[len(pnl_df.index)-1])
plt.plot(pnl_df.index, pnl_df.PnL)
plt.axhline(0, color='g');
plot_pnl(call_pnl_buyer(12, 15, 10, 35), "call", "comprador")
plot_pnl(call_pnl_seller(12, 15, 10, 35), "call", "vendedor")
plot_pnl(put_pnl_buyer(2, 15, 10, 30), "put", "comprador")
plot_pnl(call_pnl_buyer(3, 30, 25, 40), "call", "comprador")
plot_pnl(call_pnl_seller(1, 35, 25, 40), "call", "comprador")
def bull_call(ct_short, K_short, ct_long, K_long, STmin, STmax, step = 1):
maturities = np.arange(STmin, STmax+step, step)
payoffs_cl = np.vectorize(call_payoff)(maturities, K_long)
payoffs_cs = np.vectorize(call_payoff)(maturities, K_short)
df = pd.DataFrame({'Strike': K_long, 'Payoff': payoffs_cs, 'Prima': ct_long-ct_short, 'PnL': payoffs_cl-ct_long+(ct_short-payoffs_cs)}, index=maturities)
df.index.name = 'Precio de maduración'
return df
plot_pnl(bull_call(1, 35, 3, 30, 25, 40), "call", "comprador")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Gráficos del Pay Off de opciones Call y Put europeas
Step2: 2. Gráficos de estrategias
Step3: Vende en $\$1$ una opción de compra a tres meses con un precio de $\$35$
Step4: El beneficio derivado de esta estrategia de diferencial alcista es de $\$5$ si el precio de la acción esta por encima de $\$35$ y de cero si está por debajo de $\$30$. Si el precio de la acción está entre $\$30$ y $\$35$, el beneficio es la cantidad en la cual el precio de ejercicio excede a $\$30$. El costo de la estategia es de $\$2$, por tanto la utilidad es de
|
952 | <ASSISTANT_TASK:>
Python Code:
print(2 ** 6)
print(-3)
print(2 * 6)
print(6 / 2)
print(14 % 3)
print(2 + 6)
print(6 - 2)
# Operación especial que realiza la división y solo muestra el valor entero (sin decimales)
print(14 // 3)
# Primero se ejecuta la elevación del número 2 seis veces (64) y luego se le suma 7
print(2**6+7)
# Primero se ejecuta la multiplicación y luego la resta
print(8-6*9)
# Para clarificar las cosas se recomienda el uso de paréntesis para separar las operaciones según su orden de ejecución.
# También es necesario usarlos cuando dos operaciones tengan el mismo valor de ejecución, para obtener el resultado esperado.
# Ejemplos en el uso de los paréntesis:
print(9+1-2+3*6)
print((9+1-3+2)*6)
print(((9+1)-(3+2))*6)
# Los tipos de datos pueden combinarse según nos convenga sin que suponga un problema
# tipo int
sueldo_bruto = 35000
#tipo float
retenciones = 17.329
neto = sueldo_bruto-(sueldo_bruto*0.17329)
# Para concatenar variables de tipo string con variables de otro tipo, hay que convertir la variable NO string a string
# Es decir, realizar un casting de variables
print("Tu sueldo neto es de " + str(neto))
# FUNCIONES Y CASTEO DE VARIABLES
# Valor absoluto
abs ( 2 - 7 )
# Valor máximo
max (1 , 5 , 8 , 7)
# Valor mínimo
min( -1 , 1 , 0 )
# Redondeo de un numero float
round( 18.6 )
# Casteo de un float a un int
int (18.6 )
# Casteo de un int a un float
float ( 1 )
# Convierte un entero a una cadena de caracteres
str ( 256568 )
# Se pueden concatenar funciones mediante el uso de comas
abs ( 2 - 7 ), max (1 , 5 , 8 , 7), min( -1 , 1 , 0 ), round( 18.6 ), int (18.6 ), float ( 1 ), str ( 256568 )
# la comparación devuelve un valor lógico
a = True
b = False
print(a == b)
# Uso de AND, OR, NOT
# con AND debe cumplir las dos condiciones para devolver un TRUE, sino será FALSE
5 * 4 <= 100 and 3 > 9
# con OR debe cumplir al menos una condiciones para devolver un TRUE, sino será FALSE
5 * 4 > 100 or 3 < 9
# con NOT no debe cumplir ninguna condición para devolver un TRUE, sino será FALSE
not 5 * 4 > 10
# Siendo TRUE = 1 el resultado será 5
(True + 4) * True
# Siendo FALSE = 0 y TRUE = 1 el resultado será 2
(False + 2) * True
larga = Los Reyes Católicos fue la denominación que recibieron los esposos
Fernando II de Aragón e Isabel I de Castilla, soberanos de la Corona de
Castilla (1474-1504) y de la Corona de Aragón (1479-1516).
Los Reyes accedieron al trono de Castilla tras la Guerra de Sucesión
Castellana (1475-1479) contra los partidarios de la princesa Juana la
Beltraneja, hija del rey Enrique IV de Castilla. En 1479 Fernando heredó el
trono de Aragón al morir su padre, el rey Juan II de Aragón. Isabel y Fernando
reinaron juntos hasta la muerte de ella en 1504. Entonces Fernando quedó
únicamente como rey de Aragón, pasando Castilla a su hija Juana, apodada
"la Loca", y a su marido Felipe de Austria, apodado "el Hermoso", Archiduque
de Austria, duque de Borgoña y conde de Flandes. Sin embargo Fernando no
renunció a controlar Castilla y, tras morir Felipe en 1506 y ser declarada
Juana incapaz, consiguió ser nombrado regente del reino hasta su muerte en 1516
print(larga)
# El uso de dobles comillas en cadenas de caracteres debe respetar un formato concreto, puesto que " es un caracter especial.
# La sintáxis debe ser como en ejemplo, teniendo en cuenta que / es otro caracter especial y lo que va entre / / se representa
# tal y como se escribe integramente.
comillas = "Las \"comillas\" son caracteres epeciales"
comillas
especial = "Hay otros caracteres especiales como el salto de línea.\nSirve para representar el fín de línea en ficheros."
print(especial)
texto = "Cambio: %d %s son %.5f %s"
print(texto % (1, "dolar" , 0.89465444 , "euros"))
print(texto % (1, "euro", 1.11775, "dólares"))
radio = 4.323343
pi = 3.141592
area = pi * radio ** 2
print ('El área de un círculo de radio %.6f es %.6f' % (radio, area))
# El formato de DATETIME es con notación americana (año, mes, dia, hora, minutos, segundos)
from datetime import datetime, date, time
dt = datetime(2011, 10, 5, 16,50,0)
print(dt.day, dt.month, dt.year)
texto = "La fecha es: %d/%d/%d"
texto2 = "Y son las: %d:%d:%d"
print(texto % (dt.day, dt.month, dt.year))
print(texto2 % (dt.hour, dt.minute, dt.second))
# Un ejemplo extrayendo la fecha actual del sistema y presentandola en formato STRING
from datetime import datetime, date, time
fecha = datetime.now()
fecha.strftime("La fecha de hoy es: "'%d/%m/%Y %H:%M:%S')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Variables en Python
Step2: <br />
Step3: El valor True se corresponde con el valor entero 1, mientras que el el valor False se corresponde con el valor entero 0. Así, es posible realizar operaciones aritméticas con valores booleanos.
Step5: <br />
Step6: <br />
Step7: <br />
|
953 | <ASSISTANT_TASK:>
Python Code:
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
smoothing_steps = 7
# Read evoked data
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
# Read inverse solution
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
inv = read_inverse_operator(fname_inv)
# Apply inverse solution, set pick_ori='vector' to obtain a
# :class:`mne.VectorSourceEstimate` object
snr = 3.0
lambda2 = 1.0 / snr ** 2
stc = apply_inverse(evoked, inv, lambda2, 'dSPM', pick_ori='vector')
# Use peak getter to move visualization to the time point of the peak magnitude
_, peak_time = stc.magnitude().get_peak(hemi='lh')
brain = stc.plot(
initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir,
smoothing_steps=smoothing_steps)
# You can save a brain movie with:
# brain.save_movie(time_dilation=20, tmin=0.05, tmax=0.16, framerate=10,
# interpolation='linear', time_viewer=True)
stc_max, directions = stc.project('pca', src=inv['src'])
# These directions must by design be close to the normals because this
# inverse was computed with loose=0.2
print('Absolute cosine similarity between source normals and directions: '
f'{np.abs(np.sum(directions * inv["source_nn"][2::3], axis=-1)).mean()}')
brain_max = stc_max.plot(
initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir,
time_label='Max power', smoothing_steps=smoothing_steps)
brain_normal = stc.project('normal', inv['src'])[0].plot(
initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir,
time_label='Normal', smoothing_steps=smoothing_steps)
fname_inv_fixed = (
data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-fixed-inv.fif')
inv_fixed = read_inverse_operator(fname_inv_fixed)
stc_fixed = apply_inverse(
evoked, inv_fixed, lambda2, 'dSPM', pick_ori='vector')
brain_fixed = stc_fixed.plot(
initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir,
smoothing_steps=smoothing_steps)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot the source estimate
Step2: Plot the activation in the direction of maximal power for this data
Step3: The normal is very similar
Step4: You can also do this with a fixed-orientation inverse. It looks a lot like
|
954 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
#load the files
train = pd.read_csv('input/train.csv')
test = pd.read_csv('input/test.csv')
data = pd.concat([train, test])
#size of training dataset
train_samples = train.shape[0]
#print some of them
data.head()
# remove the Id feature, because is not useful for price predictions
data.drop(['Id'],1, inplace=True);
data.info()
print("Size training: {}".format(train.shape[0]))
print("Size testing: {}".format(test.shape[0]))
datanum = data.select_dtypes([np.number])
datanum.columns[datanum.isnull().any()].tolist()
#number of row without NaN
print(datanum.shape[0] - datanum.dropna().shape[0])
#list of columns with NaN
datanum.columns[datanum.isnull().any()].tolist()
#Filling with the mean
datanum_no_nan = datanum.fillna(datanum.dropna().mean())
#check
datanum_no_nan.columns[datanum_no_nan.isnull().any()].tolist()
import matplotlib.pyplot as plt
%matplotlib inline
# All numeric features except the 'SalePrice'
datanum_no_nan.drop(['SalePrice'], axis=1).head(30).plot(legend=False);
# SalePrice
datanum_no_nan['SalePrice'].head(30).plot(legend=False);
# Showing SalePrice distribution
data.SalePrice.hist(bins=50)
# Transforming to non-skewed SalePrice
data.SalePrice = data.SalePrice.apply(np.log)
data.SalePrice.hist(bins=50)
#Squeeze the data using standard scaler: z = (x - mean)/ std
from sklearn import preprocessing
scaler = preprocessing.StandardScaler()
columns = datanum_no_nan.columns.drop('SalePrice')
print("Features: {}".format(columns))
#make a copy
data_norm = datanum_no_nan
data_norm[columns] = scaler.fit_transform(datanum_no_nan[columns])
print("Train shape: {}".format(data_norm.shape))
data_norm.drop(['SalePrice'], axis=1).head(30).plot(legend=False);
# Correlation features
data_norm.corr()['SalePrice'].sort_values(ascending=False).head(10)
high_corr_feat_names = data_norm.corr()['SalePrice'].sort_values(ascending=False).head(10).axes[0].tolist()
high_corr_feat_names.remove('SalePrice')
data_norm_high_corr = data_norm[high_corr_feat_names]
#heatmap between the most correlated features
import seaborn as sns
fig = plt.figure(figsize=(7, 5))
sns.heatmap(data_norm_high_corr.corr());
#plotting distributions of numeric features
data_norm_high_corr.hist(bins=50, figsize=(22,16));
#Relationships between correlated features
for feature in high_corr_feat_names:
data.plot.scatter(feature, 'SalePrice');
from sklearn.model_selection import KFold
y = np.array(data['SalePrice'])
X = np.array(data_norm_high_corr)
#split by idx
idx = train_samples
X_train, X_test = X[:idx], X[idx:]
y_train, y_test = y[:idx], y[idx:]
print("Shape X train: {}".format(X_train.shape))
print("Shape y train: {}".format(y_train.shape))
print("Shape X test: {}".format(X_test.shape))
print("Shape y test: {}".format(y_test.shape))
kf = KFold(n_splits=3, random_state=9, shuffle=True)
print(kf)
#plotting PCA
from sklearn.decomposition import PCA
def getX_PCA(X):
pca = PCA(n_components=1)
return pca.fit(X).transform(X)
def plotPCA(X, y):
pca = PCA(n_components=1)
X_r = pca.fit(X).transform(X)
plt.plot(X_r, y, 'x')
from sklearn.covariance import EllipticEnvelope
# fit the model
ee = EllipticEnvelope(contamination=0.1,
assume_centered=True,
random_state=9)
ee.fit(X_train)
pred = ee.predict(X_train)
X_train_orig = X_train
y_train_orig = y_train
X_bad = X_train[pred != 1]
y_bad = y_train[pred != 1]
X_train = X_train[pred == 1]
y_train = y_train[pred == 1]
print("Number samples: {}".format(X_train.shape[0]))
#after removing anomalies
plt.scatter(getX_PCA(X_train), y_train)
plt.scatter(getX_PCA(X_bad), y_bad)
# Get polynomial features
from sklearn.preprocessing import PolynomialFeatures
poly = preprocessing.PolynomialFeatures(degree=2)
X_train_orig = poly.fit_transform(X_train_orig)
X_train = poly.fit_transform(X_train)
X_test = poly.fit_transform(X_test)
# Linear regression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lr = LinearRegression()
#
batch = 0
for train_idx, val_idx in kf.split(X_train, y_train):
X_t, X_v = X_train[train_idx], X_train[val_idx]
y_t, y_v = y_train[train_idx], y_train[val_idx]
#training
lr.fit(X_t, y_t)
#calculate costs
t_error = mean_squared_error(y_t, lr.predict(X_t))**0.5
v_error = mean_squared_error(y_v, lr.predict(X_v))**0.5
print("{}) Training error: {:.2f} Validation error: {:.2f} Score: {:.2f}"
.format(batch, t_error, v_error, lr.score(X_v, y_v)))
batch += 1
#Scores
print("Training score: {:.4f}".format(lr.score(X_train_orig, y_train_orig)))
#RMSLE
rmsle = mean_squared_error(y_train_orig, lr.predict(X_train_orig))**0.5
print("RMSLE: {:.4f}".format(rmsle))
# Plotting the results
plt.scatter(lr.predict(X_train_orig), y_train_orig)
# Gradient boosting
from sklearn import ensemble
params = {'n_estimators': 100, 'max_depth': X_train.shape[1], 'min_samples_split': 5,
'learning_rate': 0.1, 'loss': 'ls', 'random_state':9, 'warm_start':True}
gbr = ensemble.GradientBoostingRegressor(**params)
batch = 0
for train_idx, val_idx in kf.split(X_train, y_train):
X_t, X_v = X_train[train_idx], X_train[val_idx]
y_t, y_v = y_train[train_idx], y_train[val_idx]
#training
gbr.fit(X_t, y_t)
#calculate costs
t_error = mean_squared_error(y_t, gbr.predict(X_t))**0.5
v_error = mean_squared_error(y_v, gbr.predict(X_v))**0.5
print("{}) Training error: {:.2f} Validation error: {:.2f} Score: {:.2f}"
.format(batch, t_error, v_error, gbr.score(X_v, y_v)))
batch += 1
#Scores
print("Training score: {:.4f}".format(gbr.score(X_train_orig, y_train_orig)))
#RMSLE
rmsle = mean_squared_error(y_train_orig, gbr.predict(X_train_orig))**0.5
print("RMSLE: {:.4f}".format(rmsle))
# Plotting the results
plt.scatter(gbr.predict(X_train_orig), y_train_orig)
# AdaBoost
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
abr = AdaBoostRegressor(DecisionTreeRegressor(max_depth=X_train.shape[1]),
n_estimators=100, random_state=9)
batch = 0
for train_idx, val_idx in kf.split(X_train, y_train):
X_t, X_v = X_train[train_idx], X_train[val_idx]
y_t, y_v = y_train[train_idx], y_train[val_idx]
#training
abr.fit(X_t, y_t)
#calculate costs
t_error = mean_squared_error(y_t, abr.predict(X_t))**0.5
v_error = mean_squared_error(y_v, abr.predict(X_v))**0.5
print("{}) Training error: {:.2f} Validation error: {:.2f} Score: {:.2f}"
.format(batch, t_error, v_error, abr.score(X_v, y_v)))
batch += 1
#Scores
print("Training score: {:.4f}".format(abr.score(X_train_orig, y_train_orig)))
#RMSLE
rmsle = mean_squared_error(y_train_orig, abr.predict(X_train_orig))**0.5
print("RMSLE: {:.4f}".format(rmsle))
# Plotting the results
plt.scatter(abr.predict(X_train_orig), y_train_orig)
from sklearn.linear_model import LinearRegression
slr = LinearRegression()
sclr = preprocessing.StandardScaler()
def features_level1(X):
X0 = lr.predict(X)
X1 = gbr.predict(X)
X2 = abr.predict(X)
Xt = np.array([X0, X1, X2]).T
return sclr.fit_transform(Xt)
def stack_training(X, y):
slr.fit(features_level1(X), y)
def stack_predict(X):
return slr.predict(features_level1(X))
def stack_score(X, y):
return slr.score(features_level1(X), y)
#
batch = 0
kf = KFold(n_splits=3, random_state=9)
for train_idx, val_idx in kf.split(X_train, y_train):
X_t, X_v = X_train[train_idx], X_train[val_idx]
y_t, y_v = y_train[train_idx], y_train[val_idx]
#training
stack_training(X_t, y_t)
#calculate costs
t_error = mean_squared_error(y_t, stack_predict(X_t))**0.5
v_error = mean_squared_error(y_v, stack_predict(X_v))**0.5
print("{}) Training error: {:.2f} Validation error: {:.2f} Score: {:.2f}"
.format(batch, t_error, v_error, stack_score(X_v, y_v)))
batch += 1
rmsle = mean_squared_error(y_train_orig, stack_predict(X_train_orig))**0.5
print("RMSLE: {:.4f}".format(rmsle))
# Plotting the results
plt.scatter(stack_predict(X_train_orig), y_train_orig)
def avg_predict(X):
return (lr.predict(X) + gbr.predict(X) + abr.predict(X))/3
predictions = avg_predict(X_train_orig)
RMSLE = mean_squared_error(y_train_orig, predictions)**0.5
print("RMSLE: {:.3f}".format(RMSLE))
# Plotting the results
plt.scatter(avg_predict(X_train_orig), y_train_orig)
from sklearn.metrics import mean_squared_error
import random
RMSLE_lr = mean_squared_error(y_train, lr.predict(X_train))**0.5
RMSLE_gbr = mean_squared_error(y_train, gbr.predict(X_train))**0.5
RMSLE_abr = mean_squared_error(y_train, abr.predict(X_train))**0.5
RMSLE_avg = mean_squared_error(y_train, stack_predict(X_train))**0.5
RMSLE_stack = mean_squared_error(y_train, avg_predict(X_train))**0.5
print("RMSLE lr: {:.3f}".format(RMSLE_lr))
print("RMSLE gbr: {:.3f}".format(RMSLE_gbr))
print("RMSLE abr: {:.3f}".format(RMSLE_abr))
print("RMSLE average: {:.3f}".format(RMSLE_avg))
print("RMSLE stacked: {:.3f}".format(RMSLE_stack))
import os
predict = avg_predict(X_test)
#predict = stack_predict(X_test)
#predict = lr.predict(X_test)
#predictions are logs, return to the value
predict = np.exp(predict)
file = "Id,SalePrice" + os.linesep
startId = 1461
for i in range(len(X_test)):
file += "{},{}".format(startId, (int)(predict[i])) + os.linesep
startId += 1
#print(file)
# Save to file
with open('attempt.txt', 'w') as f:
f.write(file)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First problem
Step2: Find if there's null values
Step3: NaN values will be filled with the mean of the feature they belong. It's a good way to replace NaN values in order to avoid modifying the distribution of the feature but it could cheat the predicted results. We'll assume that.
Step4: Normalizing
Step5: The 'SalePrice' has a skewed graph. We can stabilize it applying a logarithmic operation because we know that all the values are positive.
Step6: Selecting good features...
Step7: Showing relationships
Step8: Splitting dataset in train and test
Step9: Anomaly Detection
Step10: Polynomial Features
Step11: Models
Step12: Stacked Model
Step13: Averaged Model
Step14: Evaluation
Step15: Get Predictions
|
955 | <ASSISTANT_TASK:>
Python Code:
import h2o
# Start an H2O Cluster on your local machine
h2o.init()
# This will not actually do anything since it's a fake IP address
# h2o.init(ip="123.45.67.89", port=54321)
csv_url = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wisc/wisc-diag-breast-cancer-shuffled.csv"
data = h2o.import_file(csv_url)
data.shape
data.head()
data.columns
columns = ["id", "diagnosis", "area_mean"]
data[columns].head()
data['diagnosis']
data['diagnosis'].unique()
data['diagnosis'].nlevels()
data['diagnosis'].levels()
data.isna()
data['diagnosis'].isna()
data['diagnosis'].isna().sum()
data.isna().sum()
# TO DO: Insert a bar chart or something showing the proportion of M to B in the response.
data['diagnosis'].table()
n = data.shape[0] # Total number of training samples
data['diagnosis'].table()['Count']/n
y = 'diagnosis'
x = data.columns
del x[0:1]
x
train, test = data.split_frame(ratios=[0.75], seed=1)
train.shape
test.shape
# Import H2O GBM:
from h2o.estimators.gbm import H2OGradientBoostingEstimator
model = H2OGradientBoostingEstimator(distribution='bernoulli',
ntrees=100,
max_depth=4,
learn_rate=0.1)
model.train(x=x, y=y, training_frame=train, validation_frame=test)
print(model)
perf = model.model_performance(test)
perf.auc()
cvmodel = H2OGradientBoostingEstimator(distribution='bernoulli',
ntrees=100,
max_depth=4,
learn_rate=0.1,
nfolds=5)
cvmodel.train(x=x, y=y, training_frame=data)
ntrees_opt = [5,50,100]
max_depth_opt = [2,3,5]
learn_rate_opt = [0.1,0.2]
hyper_params = {'ntrees': ntrees_opt,
'max_depth': max_depth_opt,
'learn_rate': learn_rate_opt}
from h2o.grid.grid_search import H2OGridSearch
gs = H2OGridSearch(H2OGradientBoostingEstimator, hyper_params = hyper_params)
gs.train(x=x, y=y, training_frame=train, validation_frame=test)
print(gs)
# print out the auc for all of the models
for g in gs:
print(g.model_id + " auc: " + str(g.auc()))
#TO DO: Compare grid search models
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you already have an H2O cluster running that you'd like to connect to (for example, in a multi-node Hadoop environment), then you can specify the IP and port of that cluster as follows
Step2: Download Data
Step3: Explore Data
Step4: Now let's take a look at the top of the frame
Step5: The first two columns contain an ID and the resposne. The "diagnosis" column is the response. Let's take a look at the column names. The data contains derived features from the medical images of the tumors.
Step6: To select a subset of the columns to look at, typical Pandas indexing applies
Step7: Now let's select a single column, for example -- the response column, and look at the data more closely
Step8: It looks like a binary response, but let's validate that assumption
Step9: We can query the categorical "levels" as well ('B' and 'M' stand for "Benign" and "Malignant" diagnosis)
Step10: Since "diagnosis" column is the response we would like to predict, we may want to check if there are any missing values, so let's look for NAs. To figure out which, if any, values are missing, we can use the isna method on the diagnosis column. The columns in an H2O Frame are also H2O Frames themselves, so all the methods that apply to a Frame also apply to a single column.
Step11: The isna method doesn't directly answer the question, "Does the diagnosis column contain any NAs?", rather it returns a 0 if that cell is not missing (Is NA? FALSE == 0) and a 1 if it is missing (Is NA? TRUE == 1). So if there are no missing values, then summing over the whole column should produce a summand equal to 0.0. Let's take a look
Step12: Great, no missing labels.
Step13: The next thing I may wonder about in a binary classification problem is the distribution of the response in the training data. Is one of the two outcomes under-represented in the training set? Many real datasets have what's called an "imbalanace" problem, where one of the classes has far fewer training examples than the other class. Let's take a look at the distribution, both visually and numerically.
Step14: Ok, the data is not exactly evenly distributed between the two classes -- there are almost twice as many Benign samples as there are Malicious samples. However, this level of imbalance shouldn't be much of an issue for the machine learning algos. (We will revisit this later in the modeling section below).
Step15: Machine Learning in H2O
Step16: Split H2O Frame into a train and test set
Step17: Train and Test a GBM model
Step18: We first create a model object of class, "H2OGradientBoostingEstimator". This does not actually do any training, it just sets the model up for training by specifying model parameters.
Step19: The model object, like all H2O estimator objects, has a train method, which will actually perform model training. At this step we specify the training and (optionally) a validation set, along with the response and predictor variables.
Step20: Inspect Model
Step21: Model Performance on a Test Set
Step22: Cross-validated Performance
Step23: Grid Search
Step24: Define an "H2OGridSearch" object by specifying the algorithm (GBM) and the hyper parameters
Step25: An "H2OGridSearch" object also has a train method, which is used to train all the models in the grid.
Step26: Compare Models
|
956 | <ASSISTANT_TASK:>
Python Code:
faceshape = (80,80)
ws = []
hs = []
data = []
for (path, label) in labelled_paths:
im = cv2.imread(path, 0)
faces = face_cascade.detectMultiScale(im, 1.3, 5)
if faces == ():
#print 'missed a face'
continue
else:
(x,y,w,h) = faces.tolist()[0]
cropped = im[y:y+h, x:x+w]
reshaped = cv2.resize(cropped, faceshape, interpolation=cv2.INTER_LINEAR)
data.append(reshaped.flatten())
#labels.append(label)
#ws.append(w)
#hs.append(h)
#(np.mean(ws), np.std(ws), np.mean(hs), np.std(hs))
labels0 = np.zeros(len(data))
df0 = pd.DataFrame(data)
# test!
im = Image.fromarray(reshaped)
#im.show()
reshaped.flatten()
data = []
for path in glob('gedfaces/*'):
imc = cv2.imread(path, 1)
im = cv2.cvtColor(imc, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(im, 1.3, 5)
if faces == ():
print 'missed a face'
continue
else:
(x,y,w,h) = faces.tolist()[0]
cropped = im[y:y+h, x:x+w]
reshaped = cv2.resize(cropped, faceshape, interpolation=cv2.INTER_LINEAR)
data.append(reshaped.flatten())
#labels.append(label)
labels1 = np.ones(len(data))
df1 = pd.DataFrame(data)
data = []
for path in glob('other_faces/*'):
imc = cv2.imread(path, 1)
im = cv2.cvtColor(imc, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(im, 1.3, 5)
if faces == ():
print 'missed a face'
continue
else:
(x,y,w,h) = faces.tolist()[0]
cropped = im[y:y+h, x:x+w]
reshaped = cv2.resize(cropped, faceshape, interpolation=cv2.INTER_LINEAR)
data.append(reshaped.flatten())
#labels.append(label)
labels2 = np.zeros(len(data))
df2 = pd.DataFrame(data)
labels = np.concatenate((labels0, labels1, labels2))
df = df0.append(df1).append(df2)
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
# shuffle data!
indices = np.random.permutation(labels.shape[0])
labels_sh, df_sh = labels[indices], df.iloc[indices]
df_sh = df_sh.reset_index(drop=True)
%%time
c_range = np.logspace(-3, 3, 13)
train_scores, valid_scores = validation_curve(model, df_sh, labels_sh, "C", c_range, scoring='f1', cv=3, n_jobs=4)
vd = pd.DataFrame({'sc_train': train_scores.mean(1), 'sc_val': valid_scores.mean(1)}, index=c_range)
vd.plot(kind='line')
%%time
n_train, train_scores, valid_scores = learning_curve(model, df_sh, labels_sh, train_sizes=np.linspace(0.3,1,10), scoring='f1', cv=3, n_jobs=4)
ld = pd.DataFrame({'sc_train': train_scores.mean(1), 'sc_val': valid_scores.mean(1)}, index=n_train)
ld.plot(kind='line')
model.fit(df_sh, labels_sh)
import pickle
pickle.dump(model, open( "model_lgrg.p", "wb" ))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now load my face into the thingie!
Step2: Join the data.
Step3: Training logistic regression
Step4: Fit and save final model
|
957 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
plt.style.use('fivethirtyeight')
# plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Helvetica'
plt.rcParams['font.monospace'] = 'Consolas'
plt.rcParams['font.size'] = 16
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['legend.fontsize'] = 16
plt.rcParams['figure.titlesize'] = 20
plt.rcParams['lines.linewidth'] = 2
%matplotlib inline
# for auto-reloading external modules
%load_ext autoreload
%autoreload 2
# Set up the file directory and names
DIR = '../input/'
X_TRAIN = DIR + 'train-images-idx3-ubyte.pkl'
Y_TRAIN = DIR + 'train-labels-idx1-ubyte.pkl'
X_TEST = DIR + 't10k-images-idx3-ubyte.pkl'
Y_TEST = DIR + 't10k-labels-idx1-ubyte.pkl'
print('Loading pickle files')
X_train = pickle.load( open( X_TRAIN, "rb" ) )
y_train = pickle.load( open( Y_TRAIN, "rb" ) )
X_test = pickle.load( open( X_TEST, "rb" ) )
y_test = pickle.load( open( Y_TEST, "rb" ) )
n_train = X_train.shape[0]
n_test = X_test.shape[0]
print('Train images shape {}, labels shape {}'.format(X_train.shape, y_train.shape))
print('Test images shape {}, labels shape {}'.format(X_test.shape, y_test.shape))
# Check a few training values at random as a sanity check
def show_label_images(X, y):
'''Shows random images in a grid'''
num = 9
images = np.random.randint(0, X.shape[0], num)
print('Showing training image indexes {}'.format(images))
fig, axes = plt.subplots(3,3, figsize=(6,6))
for idx, val in enumerate(images):
r, c = divmod(idx, 3)
axes[r][c].imshow(X[images[idx]])
axes[r][c].annotate('Label: {}'.format(y[val]), xy=(1, 1))
axes[r][c].xaxis.set_visible(False)
axes[r][c].yaxis.set_visible(False)
show_label_images(X_train, y_train)
# Now do the same for the training dataset
show_label_images(X_test, y_test)
# # Training label distribution
y_train_df = pd.DataFrame(y_train, columns=['class'])
y_train_df.plot.hist(legend=False)
hist_df = pd.DataFrame(y_train_df['class'].value_counts(normalize=True))
hist_df.index.name = 'class'
hist_df.columns = ['train']
# Test label distribution
y_test_df = pd.DataFrame(y_test, columns=['class'])
y_test_df.plot.hist(legend=False, bins=10)
test_counts = y_test_df['class'].value_counts(normalize=True)
hist_df['test'] = test_counts
hist_df['diff'] = np.abs(hist_df['train'] - hist_df['test'])
hist_df.sort_values('diff', ascending=False)['diff'].plot.bar()
# Final quick check of datatypes
assert X_train.dtype == np.uint8
assert y_train.dtype == np.uint8
assert X_test.dtype == np.uint8
assert y_test.dtype == np.uint8
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading pickle files
Step2: Sample training images with labels
Step3: Sample test images with labels
Step4: The class distribution is pretty evenly split between the classes. 1 is the most popular class with 11.24% of instances, and at the other end 5 is the least frequent class, with 9.04% of instances
Step5: The distribution looks very similar between training and test datasets.
Step6: The largest difference is 0.0040% in the number 2 class.
|
958 | <ASSISTANT_TASK:>
Python Code:
%pip --quiet install objax
import math
import random
import numpy as np
import tensorflow_datasets as tfds
import objax
from objax.zoo.wide_resnet import WideResNet
base_learning_rate = 0.1 # Learning rate
lr_decay_epochs = 30 # How often to decay learning rate
lr_decay_factor = 0.2 # By how much to decay learning rate
weight_decay = 0.0005 # Weight decay
batch_size = 128 # Batch size
num_train_epochs = 100 # Number of training epochs
wrn_width = 2 # Width of WideResNet
wrn_depth = 28 # Depth of WideResNet
# Augmentation function for input data
def augment(x): # x is NCHW
Random flip and random shift augmentation of image batch.
if random.random() < .5:
x = x[:, :, :, ::-1] # Flip the batch images about the horizontal axis
# Pixel-shift all images in the batch by up to 4 pixels in any direction.
x_pad = np.pad(x, [[0, 0], [0, 0], [4, 4], [4, 4]], 'reflect')
rx, ry = np.random.randint(0, 4), np.random.randint(0, 4)
x = x_pad[:, :, rx:rx + 32, ry:ry + 32]
return x
# Data
data = tfds.as_numpy(tfds.load(name='cifar10', batch_size=-1))
x_train = data['train']['image'].transpose(0, 3, 1, 2) / 255.0
y_train = data['train']['label']
x_test = data['test']['image'].transpose(0, 3, 1, 2) / 255.0
y_test = data['test']['label']
del data
# Model
model = WideResNet(nin=3, nclass=10, depth=wrn_depth, width=wrn_width)
weight_decay_vars = [v for k, v in model.vars().items() if k.endswith('.w')]
# Optimizer
opt = objax.optimizer.Momentum(model.vars(), nesterov=True)
# Prediction operation
predict_op = objax.nn.Sequential([objax.ForceArgs(model, training=False), objax.functional.softmax])
predict_op = objax.Jit(predict_op)
# Loss and training op
@objax.Function.with_vars(model.vars())
def loss_fn(x, label):
logit = model(x, training=True)
xe_loss = objax.functional.loss.cross_entropy_logits_sparse(logit, label).mean()
wd_loss = sum((v ** 2).sum() for v in weight_decay_vars)
return xe_loss + weight_decay * wd_loss
loss_gv = objax.GradValues(loss_fn, model.vars())
@objax.Function.with_vars(model.vars() + loss_gv.vars() + opt.vars())
def train_op(x, y, learning_rate):
grads, loss = loss_gv(x, y)
opt(learning_rate, grads)
return loss
train_op = objax.Jit(train_op)
print(model.vars())
def lr_schedule(epoch):
return base_learning_rate * math.pow(lr_decay_factor, epoch // lr_decay_epochs)
num_train_examples = x_train.shape[0]
num_test_examples = x_test.shape[0]
for epoch in range(num_train_epochs):
# Training
example_indices = np.arange(num_train_examples)
np.random.shuffle(example_indices)
for idx in range(0, num_train_examples, batch_size):
x = x_train[example_indices[idx:idx + batch_size]]
y = y_train[example_indices[idx:idx + batch_size]]
loss = train_op(augment(x), y, lr_schedule(epoch))[0]
# Eval
accuracy = 0
for idx in range(0, num_test_examples, batch_size):
x = x_test[idx:idx + batch_size]
y = y_test[idx:idx + batch_size]
p = predict_op(x)
accuracy += (np.argmax(p, axis=1) == y).sum()
accuracy /= num_test_examples
print(f'Epoch {epoch+1:3} -- train loss {loss:.3f} test accuracy {accuracy*100:.1f}', flush=True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameters
Step3: Setup dataset and model
Step4: Model parameters
Step5: Training loop
|
959 | <ASSISTANT_TASK:>
Python Code:
import graphlab as gl
train = gl.SFrame.read_csv("../data/train.csv")
test = gl.SFrame.read_csv("../data/test.csv")
desc = gl.SFrame.read_csv("../data/product_descriptions.csv")
# merge train with description
train = train.join(desc, on = 'product_uid', how = 'left')
# merge test with description
test = test.join(desc, on = 'product_uid', how = 'left')
first_doc = train[0]
first_doc
middle_doc = train[37033]
middle_doc
last_doc = train[-1]
last_doc
train['search_term_word_count'] = gl.text_analytics.count_words(train['search_term'])
ranked3doc = train[train['relevance'] == 3]
print ranked3doc.head()
len(ranked3doc)
words_search = gl.text_analytics.tokenize(ranked3doc['search_term'], to_lower = True)
words_description = gl.text_analytics.tokenize(ranked3doc['product_description'], to_lower = True)
words_title = gl.text_analytics.tokenize(ranked3doc['product_title'], to_lower = True)
wordsdiff_desc = []
wordsdiff_title = []
puid = []
search_term = []
ws_count = []
ws_count_used_desc = []
ws_count_used_title = []
for item in xrange(len(ranked3doc)):
ws = words_search[item]
pd = words_description[item]
pt = words_title[item]
diff = set(ws) - set(pd)
if diff is None:
diff = 0
wordsdiff_desc.append(diff)
diff2 = set(ws) - set(pt)
if diff2 is None:
diff2 = 0
wordsdiff_title.append(diff2)
puid.append(ranked3doc[item]['product_uid'])
search_term.append(ranked3doc[item]['search_term'])
ws_count.append(len(ws))
ws_count_used_desc.append(len(ws) - len(diff))
ws_count_used_title.append(len(ws) - len(diff2))
differences = gl.SFrame({"puid" : puid,
"search term": search_term,
"diff desc" : wordsdiff_desc,
"diff title" : wordsdiff_title,
"ws count" : ws_count,
"ws count used desc" : ws_count_used_desc,
"ws count used title" : ws_count_used_title})
differences.sort(['ws count used desc', 'ws count used title'])
print "No terms used in description : " + str(len(differences[differences['ws count used desc'] == 0]))
print "No terms used in title : " + str(len(differences[differences['ws count used title'] == 0]))
print "No terms used in description and title : " + str(len(differences[(differences['ws count used desc'] == 0) &
(differences['ws count used title'] == 0)]))
import matplotlib.pyplot as plt
%matplotlib inline
train_search_tfidf = gl.text_analytics.tf_idf(train['search_term_word_count'])
train['search_tfidf'] = train_search_tfidf
train['product_desc_word_count'] = gl.text_analytics.count_words(train['product_description'])
train_desc_tfidf = gl.text_analytics.tf_idf(train['product_desc_word_count'])
train['desc_tfidf'] = train_desc_tfidf
train['product_title_word_count'] = gl.text_analytics.count_words(train['product_title'])
train_title_tfidf = gl.text_analytics.tf_idf(train['product_title_word_count'])
train['title_tfidf'] = train_title_tfidf
train['distance'] = train.apply(lambda x: gl.distances.cosine(x['search_tfidf'],x['desc_tfidf']))
train['distance2'] = train.apply(lambda x: gl.distances.cosine(x['search_tfidf'],x['title_tfidf']))
model1 = gl.linear_regression.create(train, target = 'relevance', features = ['distance', 'distance2'], validation_set = None)
#let's take a look at the weights before we plot
model1.get("coefficients")
test['search_term_word_count'] = gl.text_analytics.count_words(test['search_term'])
test_search_tfidf = gl.text_analytics.tf_idf(test['search_term_word_count'])
test['search_tfidf'] = test_search_tfidf
test['product_desc_word_count'] = gl.text_analytics.count_words(test['product_description'])
test_desc_tfidf = gl.text_analytics.tf_idf(test['product_desc_word_count'])
test['desc_tfidf'] = test_desc_tfidf
test['product_title_word_count'] = gl.text_analytics.count_words(test['product_title'])
test_title_tfidf = gl.text_analytics.tf_idf(test['product_title_word_count'])
test['title_tfidf'] = test_title_tfidf
test['distance'] = test.apply(lambda x: gl.distances.cosine(x['search_tfidf'],x['desc_tfidf']))
test['distance2'] = test.apply(lambda x: gl.distances.cosine(x['search_tfidf'],x['title_tfidf']))
'''
predictions_test = model1.predict(test)
test_errors = predictions_test - test['relevance']
RSS_test = sum(test_errors * test_errors)
print RSS_test
'''
output
submission = gl.SFrame(test['id'])
submission.add_column(output)
submission.rename({'X1': 'id', 'X2':'relevance'})
submission['relevance'] = submission.apply(lambda x: 3.0 if x['relevance'] > 3.0 else x['relevance'])
submission['relevance'] = submission.apply(lambda x: 1.0 if x['relevance'] < 1.0 else x['relevance'])
submission['relevance'] = submission.apply(lambda x: str(x['relevance']))
submission.export_csv('../data/submission.csv', quote_level = 3)
#gl.canvas.set_target('ipynb')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data from CSV files
Step2: Data merging
Step3: Let's explore some data
Step4: 'angle bracket' search term is not contained in the body. 'angle' would be after stemming however 'bracket' is not.
Step5: only 'wood' is present from search term
Step6: 'sheer' and 'courtain' are present and that's all
Step7: TF-IDF with linear regression
|
960 | <ASSISTANT_TASK:>
Python Code:
# Read data
in_xls = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\TOC_Trends_Analysis_2015'
r'\Swedish_Ca_Data\Missing_Data_25_Swedish_Sites.xlsx')
smhi_df = pd.read_excel(in_xls, sheetname='salar_data')
resa_df = pd.read_excel(in_xls, sheetname='from_resa_10-02-2017')
# Get just the cols of interest
smhi_df = smhi_df[['stn_code', 'stn_name', 'date', 'depth_m', 'ca_mekv/l', 'mg_mekv/l']]
resa_df = resa_df[['stn_code', 'stn_name', 'date', 'depth1', 'depth2', 'ca_uekv/l', 'mg_uekv/l']]
smhi_df.head(10)
resa_df.head(10)
# Join datasets
df = pd.merge(resa_df, smhi_df, how='inner',
left_on=['stn_code', 'date'],
right_on=['stn_code', 'date'])
df.head()
print len(smhi_df)
print len(df)
# Convert Mg to ueq/l and round to nearest whole number
df['mg_old_ueq/l'] = (df['mg_uekv/l'] + 0.5).astype(int)
df['mg_new_ueq/l'] = (df['mg_mekv/l']*1000 + 0.5).astype(int)
print 'All Mg values match:', (df['mg_old_ueq/l'] == df['mg_new_ueq/l']).all()
df.head()
# Connect to db
r2_func_path = r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\Upload_Template\useful_resa2_code.py'
resa2 = imp.load_source('useful_resa2_code', r2_func_path)
engine, conn = resa2.connect_to_resa2()
# Get all water samples for these sites
stns = tuple(df['stn_code'].unique())
sql = ('SELECT station_id, sample_date, water_sample_id '
'FROM resa2.water_samples '
'WHERE station_id IN %s' % str(stns))
samp_df = pd.read_sql_query(sql, engine)
# Join
df = pd.merge(df, samp_df, how='left',
left_on=['date', 'stn_code'],
right_on=['sample_date', 'station_id'])
df.head()
# Extract columns of interest
df = df[['water_sample_id', 'ca_mekv/l']]
# Rename columns
df.columns = ['sample_id', 'value']
# Add new columns
df['method_id'] = 10551
df['approved'] = 'YES'
df['entered_by'] = 'JES'
df['entered_date'] = pd.datetime(2017, 2, 10)
# Reorder columns
df = df[['sample_id', 'method_id', 'value', 'approved', 'entered_by', 'entered_date']]
df.head(10)
# Write to database
df.to_sql(name='WATER_CHEMISTRY_VALUES2',
schema='RESA2',
con=engine,
index=False,
if_exists='append')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Join data to identify common water samples
Step2: Next. check how much of the data downloaded by Salar is already in the database.
Step3: So, the recent data downloaded by Salar includes 2412 records for these 25 stations, of which 1910 are already in RESA2. Let's check if the values for Mg are comparable.
Step4: Excellent - the values in the RESA2 database for Mg match the values in the data Salar has recently downloaded.
Step5: Next, get the sample IDs associated with these 25 sites and join them to the data above.
|
961 | <ASSISTANT_TASK:>
Python Code:
from poppy.creatures import PoppyTorso
poppy=PoppyTorso(simulator='vrep')
# oui
for i in range(0,3):
poppy.head_y.goto_position(15,0.5,wait=True)
poppy.head_y.goto_position(-15,0.5,wait=True)
print i
poppy.head_y.goto_position(-15,0.1,wait=True)
import time
position_start = poppy.head_y.present_position
poppy.head_y.goal_position = 10
time.sleep(1)
poppy.head_y.goal_position = -15
time.sleep(1)
poppy.head_y.goal_position = position_start
# d'accord
# pas d'accord
# ne sait pas
messager.reset_simulation()
import pypot
poppy.stop_simulation()
pypot.vrep.close_all_connections()
from poppy.creatures import PoppyTorso
poppy=PoppyTorso(simulator='vrep')
import pypot
poppy.stop_simulation()
pypot.vrep.close_all_connections()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Trouvons une méthode pour communiquer.
Step2: Super, alors, on va décréter que
Step3: Tu as raté? c'est pas grâve, recommmence, essaies ces lignes pour redémarrer
Step4: Encore buger ? essaies celles-ci
Step5: Tu as fini? coupes la simulation ici
|
962 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from landlab import RasterModelGrid
from landlab.components import TaylorNonLinearDiffuser
# define parameters
L = 50.0 # distance from base to ridgeline, m
dx = 2.0 # node spacing, m
D = 0.01 # diffusion-like coefficient, m2/y
U = 0.0001 # uplift rate, m/y
H = 100.0 # initial soil thickness, m
num_steps = 20000 # number of time steps
# time step size (calculate using Courant condition for linear diffusion)
dt = 0.1 * dx * dx / D
# prediction
predicted_crest_height = 0.5 * (U / D) * L * L
print("Crest height should be " + str(predicted_crest_height))
# create grid
grid = RasterModelGrid((3, 51), xy_spacing=dx)
grid.set_closed_boundaries_at_grid_edges(False, True, False, True)
# create elevation field
elev = grid.add_zeros("topographic__elevation", at="node") # this is eta
# instantiate component
tnld = TaylorNonLinearDiffuser(grid, linear_diffusivity=D, nterms=1)
# run the model in a time loop with uplift applied
for i in range(num_steps):
elev[grid.core_nodes] += U * dt
tnld.run_one_step(dt)
midrow = np.arange(51, 102, dtype=int)
plt.plot(grid.x_of_node[midrow], elev[midrow])
plt.xlabel("Distance (m)")
plt.ylabel("Elevation (m)")
print(np.amax(elev))
U = 0.0005 # uplift rate, m/yr
Sc = 0.6 # critical slope gradient, m/m
num_steps = 2000 # number of time steps
# create grid
grid = RasterModelGrid((3, 51), xy_spacing=dx)
grid.set_closed_boundaries_at_grid_edges(False, True, False, True)
# create fields
elev = grid.add_zeros("topographic__elevation", at="node") # this is eta
# instantiate component
tnld = TaylorNonLinearDiffuser(
grid, linear_diffusivity=D, slope_crit=Sc, dynamic_dt=True, nterms=2
)
# run the model in a time loop with uplift applied
for i in range(num_steps):
elev[grid.core_nodes] += U * dt
tnld.run_one_step(dt)
plt.plot(grid.x_of_node[midrow], elev[midrow])
plt.xlabel("Distance (m)")
plt.ylabel("Elevation (m)")
active_link_midpts = (
grid.x_of_node[grid.node_at_link_tail[grid.active_links]] + 0.5 * dx
)
plt.plot(active_link_midpts, grid.at_link["soil__flux"][grid.active_links])
plt.grid(True)
plt.xlabel("Distance (m)")
plt.ylabel("Soil flux (m2/yr)")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1
Step2: Example 2
Step3: The resulting hill is taller (due to the higher uplift rate) and no longer has uniform convexity.
|
963 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bnu', 'sandbox-2', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
964 | <ASSISTANT_TASK:>
Python Code:
%pylab inline
import sys
sys.path.append("/home/darlan/cvs_files/pyphysim")
# xxxxxxxxxx Import Statements xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
from pyphysim.simulations.core import SimulationRunner, SimulationParameters, SimulationResults, Result
from pyphysim.comm import modulators, channels
from pyphysim.util.conversion import dB2Linear
from pyphysim.util import misc
# from pyphysim.ia import ia
import numpy as np
from pprint import pprint
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxx Parameters xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#params = SimulationParameters.load_from_config_file('ia_config_file.txt')
K = 3
Nr = 3
Nt = 3
Ns = 2
M = 4
modulator = "PSK"
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxx Results base name xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
base_name = 'results_{M}-{modulator}_{Nr}x{Nt}_({Ns})_MaxIter_[5_(5)_120]'.format(M=M, modulator=modulator, Nr=Nr, Nt=Nt, Ns=Ns)
base_name_no_iter = 'results_{M}-{modulator}_{Nr}x{Nt}_({Ns})'.format(M=M, modulator=modulator, Nr=Nr, Nt=Nt, Ns=Ns) # Used only for the closed form algorithm, which is not iterative
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
alt_min_results = SimulationResults.load_from_file(
'ia_alt_min_{0}.pickle'.format(base_name))
closed_form_results = SimulationResults.load_from_file(
'ia_closed_form_{0}.pickle'.format(base_name_no_iter))
# closed_form_first_results = SimulationResults.load_from_file(
# 'ia_closed_form_first_init_{0}.pickle'.format(base_name))
max_sinrn_results = SimulationResults.load_from_file(
"ia_max_sinr_{0}_['random'].pickle".format(base_name))
# min_leakage_results = SimulationResults.load_from_file(
# 'ia_min_leakage_{0}.pickle'.format(base_name))
mmse_results = SimulationResults.load_from_file(
"ia_mmse_{0}_['random'].pickle".format(base_name))
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Helper function to get the number of repetitions for a given set of transmit parameters
def get_num_runned_reps(sim_results_object, fixed_params=dict()):
all_runned_reps = np.array(sim_results_object.runned_reps)
indexes = sim_results_object.params.get_pack_indexes(fixed_params)
return all_runned_reps[indexes]
# Helper function to get the number of IA runned iterations for a given set of transmit parameters
def get_num_mean_ia_iterations(sim_results_object, fixed_params=dict()):
return sim_results_object.get_result_values_list('ia_runned_iterations', fixed_params)
SNR_alt_min = np.array(alt_min_results.params['SNR'])
SNR_closed_form = np.array(closed_form_results.params['SNR'])
SNR_max_SINR = np.array(max_sinrn_results.params['SNR'])
# SNR_min_leakage = np.array(min_leakage_results.params['SNR'])
SNR_mmse = np.array(mmse_results.params['SNR'])
def plot_ber(max_iterations, ax=None):
ber_alt_min = alt_min_results.get_result_values_list(
'ber',
fixed_params={'max_iterations': max_iterations})
ber_CF_alt_min = alt_min_results.get_result_values_confidence_intervals(
'ber',
P=95,
fixed_params={'max_iterations': max_iterations})
ber_errors_alt_min = np.abs([i[1] - i[0] for i in ber_CF_alt_min])
ber_closed_form = closed_form_results.get_result_values_list(
'ber',
fixed_params={'max_iterations': max_iterations})
ber_CF_closed_form = closed_form_results.get_result_values_confidence_intervals(
'ber',
P=95,
fixed_params={'max_iterations': max_iterations})
ber_errors_closed_form = np.abs([i[1] - i[0] for i in ber_CF_closed_form])
# ber_closed_form_first = closed_form_first_results.get_result_values_list('ber')
# ber_CF_closed_form_first = closed_form_first_results.get_result_values_confidence_intervals('ber', P=95)
# ber_errors_closed_form_first = np.abs([i[1] - i[0] for i in ber_CF_closed_form_first])
ber_max_sinr = max_sinrn_results.get_result_values_list(
'ber',
fixed_params={'max_iterations': max_iterations})
ber_CF_max_sinr = max_sinrn_results.get_result_values_confidence_intervals(
'ber',
P=95,
fixed_params={'max_iterations': max_iterations})
ber_errors_max_sinr = np.abs([i[1] - i[0] for i in ber_CF_max_sinr])
# ber_min_leakage = min_leakage_results.get_result_values_list('ber')
# ber_CF_min_leakage = min_leakage_results.get_result_values_confidence_intervals('ber', P=95)
# ber_errors_min_leakage = np.abs([i[1] - i[0] for i in ber_CF_min_leakage])
ber_mmse = mmse_results.get_result_values_list(
'ber',
fixed_params={'max_iterations': max_iterations})
ber_CF_mmse = mmse_results.get_result_values_confidence_intervals(
'ber',
P=95,
fixed_params={'max_iterations': max_iterations})
ber_errors_mmse = np.abs([i[1] - i[0] for i in ber_CF_mmse])
if ax is None:
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.errorbar(SNR_alt_min, ber_alt_min, ber_errors_alt_min, fmt='-r*', elinewidth=2.0, label='Alt. Min.')
ax.errorbar(SNR_closed_form, ber_closed_form, ber_errors_closed_form, fmt='-b*', elinewidth=2.0, label='Closed Form')
ax.errorbar(SNR_max_SINR, ber_max_sinr, ber_errors_max_sinr, fmt='-g*', elinewidth=2.0, label='Max SINR')
# ax.errorbar(SNR, ber_min_leakage, ber_errors_min_leakage, fmt='-k*', elinewidth=2.0, label='Min Leakage.')
ax.errorbar(SNR_mmse, ber_mmse, ber_errors_mmse, fmt='-m*', elinewidth=2.0, label='MMSE.')
ax.set_xlabel('SNR')
ax.set_ylabel('BER')
title = 'BER for Different Algorithms ({max_iterations} Max Iterations)\nK={K}, Nr={Nr}, Nt={Nt}, Ns={Ns}, {M}-{modulator}'.replace("{max_iterations}", str(max_iterations))
ax.set_title(title.format(**alt_min_results.params.parameters))
ax.set_yscale('log')
leg = ax.legend(fancybox=True, shadow=True, loc='lower left', bbox_to_anchor=(0.01, 0.01), ncol=4)
ax.grid(True, which='both', axis='both')
# Lets plot the mean number of ia iterations
ax2 = ax.twinx()
mean_alt_min_ia_terations = get_num_mean_ia_iterations(alt_min_results, {'max_iterations': max_iterations})
mean_max_sinrn_ia_terations = get_num_mean_ia_iterations(max_sinrn_results, {'max_iterations': max_iterations})
mean_mmse_ia_terations = get_num_mean_ia_iterations(mmse_results, {'max_iterations': max_iterations})
ax2.plot(SNR_alt_min, mean_alt_min_ia_terations, '--r*')
ax2.plot(SNR_max_SINR, mean_max_sinrn_ia_terations, '--g*')
ax2.plot(SNR_mmse, mean_mmse_ia_terations, '--m*')
# Horizontal line with the max alowed ia iterations
ax2.hlines(max_iterations, SNR_alt_min[0], SNR_alt_min[-1], linestyles='dashed')
ax2.set_ylim(0, max_iterations*1.1)
ax2.set_ylabel('IA Mean Iterations')
# Set the X axis limits
ax.set_xlim(SNR_alt_min[0], SNR_alt_min[-1])
# Set the Y axis limits
ax.set_ylim(1e-6, 1)
fig, ax = subplots(2,2,figsize=(20,15))
plot_ber(5, ax[0,0])
plot_ber(10, ax[0,1])
plot_ber(15, ax[1,0])
plot_ber(20, ax[1,1])
fig, ax = subplots(2,2,figsize=(20,15))
plot_ber(25, ax[0,0])
plot_ber(30, ax[0,1])
plot_ber(35, ax[1,0])
plot_ber(40, ax[1,1])
fig, ax = subplots(2,2,figsize=(20,15))
plot_ber(45, ax[0,0])
plot_ber(50, ax[0,1])
plot_ber(55, ax[1,0])
plot_ber(60, ax[1,1])
def plot_capacity(max_iterations, ax=None):
# xxxxx Plot Sum Capacity (all) xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
sum_capacity_alt_min = alt_min_results.get_result_values_list(
'sum_capacity',
fixed_params={'max_iterations': max_iterations})
sum_capacity_CF_alt_min = alt_min_results.get_result_values_confidence_intervals(
'sum_capacity',
P=95,
fixed_params={'max_iterations': max_iterations})
sum_capacity_errors_alt_min = np.abs([i[1] - i[0] for i in sum_capacity_CF_alt_min])
sum_capacity_closed_form = closed_form_results.get_result_values_list(
'sum_capacity',
fixed_params={'max_iterations': max_iterations})
sum_capacity_CF_closed_form = closed_form_results.get_result_values_confidence_intervals(
'sum_capacity',
P=95,
fixed_params={'max_iterations': max_iterations})
sum_capacity_errors_closed_form = np.abs([i[1] - i[0] for i in sum_capacity_CF_closed_form])
sum_capacity_max_sinr = max_sinrn_results.get_result_values_list(
'sum_capacity',
fixed_params={'max_iterations': max_iterations})
sum_capacity_CF_max_sinr = max_sinrn_results.get_result_values_confidence_intervals(
'sum_capacity',
P=95,
fixed_params={'max_iterations': max_iterations})
sum_capacity_errors_max_sinr = np.abs([i[1] - i[0] for i in sum_capacity_CF_max_sinr])
# sum_capacity_min_leakage = min_leakage_results.get_result_values_list('sum_capacity')
# sum_capacity_CF_min_leakage = min_leakage_results.get_result_values_confidence_intervals('sum_capacity', P=95)
# sum_capacity_errors_min_leakage = np.abs([i[1] - i[0] for i in sum_capacity_CF_min_leakage])
sum_capacity_mmse = mmse_results.get_result_values_list(
'sum_capacity',
fixed_params={'max_iterations': max_iterations})
sum_capacity_CF_mmse = mmse_results.get_result_values_confidence_intervals(
'sum_capacity',
P=95,
fixed_params={'max_iterations': max_iterations})
sum_capacity_errors_mmse = np.abs([i[1] - i[0] for i in sum_capacity_CF_mmse])
if ax is None:
fig, ax = plt.subplots(nrows=1, ncols=1)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
ax.errorbar(SNR_alt_min, sum_capacity_alt_min, sum_capacity_errors_alt_min, fmt='-r*', elinewidth=2.0, label='Alt. Min.')
ax.errorbar(SNR_closed_form, sum_capacity_closed_form, sum_capacity_errors_closed_form, fmt='-b*', elinewidth=2.0, label='Closed Form')
ax.errorbar(SNR_max_SINR, sum_capacity_max_sinr, sum_capacity_errors_max_sinr, fmt='-g*', elinewidth=2.0, label='Max SINR')
# ax.errorbar(SNR, sum_capacity_min_leakage, sum_capacity_errors_min_leakage, fmt='-k*', elinewidth=2.0, label='Min Leakage.')
ax.errorbar(SNR_mmse, sum_capacity_mmse, sum_capacity_errors_mmse, fmt='-m*', elinewidth=2.0, label='MMSE.')
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
ax.set_xlabel('SNR')
ax.set_ylabel('Sum Capacity')
title = 'Sum Capacity for Different Algorithms ({max_iterations} Max Iterations)\nK={K}, Nr={Nr}, Nt={Nt}, Ns={Ns}, {M}-{modulator}'.replace("{max_iterations}", str(max_iterations))
ax.set_title(title.format(**alt_min_results.params.parameters))
#leg = ax.legend(fancybox=True, shadow=True, loc=2)
leg = ax.legend(fancybox=True, shadow=True, loc='lower right', bbox_to_anchor=(0.99, 0.01), ncol=4)
ax.grid(True, which='both', axis='both')
# Lets plot the mean number of ia iterations
ax2 = ax.twinx()
mean_alt_min_ia_terations = get_num_mean_ia_iterations(alt_min_results, {'max_iterations': max_iterations})
mean_max_sinrn_ia_terations = get_num_mean_ia_iterations(max_sinrn_results, {'max_iterations': max_iterations})
mean_mmse_ia_terations = get_num_mean_ia_iterations(mmse_results, {'max_iterations': max_iterations})
ax2.plot(SNR_alt_min, mean_alt_min_ia_terations, '--r*')
ax2.plot(SNR_max_SINR, mean_max_sinrn_ia_terations, '--g*')
ax2.plot(SNR_mmse, mean_mmse_ia_terations, '--m*')
# Horizontal line with the max alowed ia iterations
ax2.hlines(max_iterations, SNR_alt_min[0], SNR_alt_min[-1], linestyles='dashed')
ax2.set_ylim(0, max_iterations*1.1)
ax2.set_ylabel('IA Mean Iterations')
# Set the X axis limits
ax.set_xlim(SNR_alt_min[0], SNR_alt_min[-1])
# Set the Y axis limits
#ax.set_ylim(1e-6, 1)
fig, ax = subplots(2,2,figsize=(20,15))
plot_capacity(5, ax[0,0])
plot_capacity(10, ax[0,1])
plot_capacity(15, ax[1,0])
plot_capacity(20, ax[1,1])
fig, ax = subplots(2,2,figsize=(20,15))
plot_capacity(25, ax[0,0])
plot_capacity(30, ax[0,1])
plot_capacity(35, ax[1,0])
plot_capacity(40, ax[1,1])
fig, ax = subplots(2,2,figsize=(20,15))
plot_capacity(45, ax[0,0])
plot_capacity(50, ax[0,1])
plot_capacity(55, ax[1,0])
plot_capacity(60, ax[1,1])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we import some modules we use and add the PyPhysim to the python path.
Step2: Now we set the transmit parameters and load the simulation results from the file corresponding to those transmit parameters.
Step3: Let's define helper methods to get mean number of IA iterations from a simulation results object.
Step4: Get the SNR values from the simulation parameters object.
Step5: Define a function that we can call to plot the BER.
Step6: Plot the BER
Step7: Plot the Capacity
|
965 | <ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
#%config InlineBackend.figure_format = 'pdf'
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import tensorflow as tf
# font options
font = {
#'family' : 'normal',
#'weight' : 'bold',
'size' : 18
}
plt.rc('font', **font)
plt.rc('lines', linewidth=2)
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import tensorflow as tf
import numpy as np
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# Try to find values for W and b that compute y_data = W * x_data + b
# (We know that W should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.)
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = W * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# Before starting, initialize the variables. We will 'run' this first.
init = tf.global_variables_initializer()
# Launch the graph.
sess = tf.Session()
sess.run(init)
# Fit the line.
for step in range(201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(W), sess.run(b))
# Learns best fit is W: [0.1], b: [0.3]
# https://www.tensorflow.org/tutorials/mnist/beginners/
from tensorflow.examples.tutorials.mnist import input_data
# one_hot = one-of-K coding of the labels
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# A placeholder requires an instantiation for running the computation.
# None = the dimension can be of any length
x = tf.placeholder(tf.float32, [None, 784])
# A Variable is a modifiable tensor that lives in TensorFlow's graph of interacting operations.
# tf.zeors(..) will be the initial values.
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# b is added to each row?
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Loss function
# True labels
y_ = tf.placeholder(tf.float32, [None, 10])
# sum the second dimension
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1] ))
# optimizer
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# initialization operation (not run it yet)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# 1000 training steps
for i in range(1000):
# batch_xs is a numpy array of size 100 x 784
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# check the predictions
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print sess.run(accuracy, feed_dict={x: mnist.test.images, y_:mnist.test.labels})
b = np.array([1.0, 2, 4])
x = np.array([4.0, 5, 6])
tf_b = tf.constant(b)
tf_x = tf.placeholder(tf.float64)
tf_dot = tf.reduce_sum(tf_x*tf_b)
tf_elem = tf_x*tf_b
tf_outer = tf.matmul(tf.reshape(tf_x, (-1, 1)), tf.reshape(tf_b, (1, -1)) )
tfg_dot = tf.gradients(tf_dot, [tf_x])[0]
tfg_elem = tf.gradients(tf_elem, [tf_x])[0]
tfg_outer = tf.gradients(tf_outer, [tf_x])[0]
with tf.Session() as sess:
dot = sess.run(tf_dot, feed_dict={tf_x: x})
elem = sess.run(tf_elem, feed_dict={tf_x: x})
outer = sess.run(tf_outer, feed_dict={tf_x: x})
g_dot = sess.run(tfg_dot, feed_dict={tf_x: x})
g_elem = sess.run(tfg_elem, feed_dict={tf_x: x})
g_outer = sess.run(tfg_outer, feed_dict={tf_x: x})
print 'dot: {0}'.format(dot)
print 'elem: {0}'.format(elem)
print 'outer: \n{0}'.format(outer)
print
print 'g_dot: {0}'.format(g_dot)
print 'g_elem: {0}'.format(g_elem)
print 'g_outer: {0}'.format(g_outer)
tf_reshape_outer = tf.reshape(tf_outer, (len(x)**2, -1))
tf_L = tf.unstack(tf_reshape_outer)
tfgs = [tf.gradients(elem, [tf_x])[0] for elem in tf_L]
tfgs
with tf.Session() as sess:
print sess.run(tfgs, feed_dict={tf_x: x})
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fit a line to a toy 2D problem.
Step2: MNIST
Step3: Multilayer convolutional network
Step4: The intuition is correct. In mathematics, $\frac{\partial x b^\top}{\partial x_i}$ is a matrix. But, TensorFlow sums all the elements in this matrix and treats the result as the derivative with respect to $x_i$. It then stacks all the derivatives with respect to $x_1, \ldots, x_d$. This is to say that $\frac{\partial }{\partial x}$ will always be of the size of $x$ regardless of the size of the numerator.
|
966 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
import math
import matplotlib.pyplot as plt
from matplotlib import pylab
from scipy.interpolate import interp1d
from scipy.misc import derivative
import thinkstats2
import thinkplot
from thinkstats2 import Cdf
import survival
import marriage
%time df = pd.read_hdf('FemMarriageData.hdf', 'FemMarriageData')
df.shape
df.cycle.value_counts().sort_index()
def format_date_range(array):
a, b = array.astype(int)
return '%d--%d' % (a, b)
def SummarizeCycle(cycle, df):
ages = df.age.min(), df.age.max()
ages= np.array(ages)
intvws = df.cmintvw.min(), df.cmintvw.max()
intvws = np.array(intvws) / 12 + 1900
births = df.cmbirth.min(), df.cmbirth.max()
births = np.array(births) / 12 + 1900
intvw_dates = format_date_range(intvws)
birth_dates = format_date_range(births)
print(cycle, ' & ', intvw_dates, '&', len(df), '&', birth_dates, r'\\')
for cycle, group in df.groupby('cycle'):
SummarizeCycle(cycle, group)
marriage.DigitizeResp(df)
grouped = df.groupby('birth_index')
for name, group in iter(grouped):
age_range = '%d--%d' % (int(group.age.min()), int(group.age_index.max()))
print(name, '&', len(group), '&', age_range,
'&', len(group[group.evrmarry]), '&', sum(group.missing), r'\\')
def ComputeCutoffs(df):
grouped = df.groupby('birth_index')
cutoffs = {}
for name, group in sorted(grouped):
cutoffs[name] = int(group.age.max())
return cutoffs
cutoffs = ComputeCutoffs(df)
cutoffs
cohort = grouped.get_group(80)
missing = (cohort.evrmarry & cohort.agemarry.isnull())
cohort = cohort[~missing]
complete = cohort[cohort.evrmarry].agemarry_index
ongoing = cohort[~cohort.evrmarry].age_index
hf = survival.EstimateHazardFunction(complete, ongoing, verbose=True)
cohort = grouped.get_group(70)
missing = (cohort.evrmarry & cohort.agemarry.isnull())
cohort = cohort[~missing]
complete = cohort[cohort.evrmarry].agemarry_index
ongoing = cohort[~cohort.evrmarry].age_index
hf = survival.EstimateHazardFunction(complete, ongoing, verbose=True)
cohort = grouped.get_group(30)
sf = survival.MakeSurvivalFromSeq(cohort.agemarry_index.fillna(np.inf))
ts, ss = sf.Render()
print(ss)
thinkplot.Plot(ts, ss)
thinkplot.Config(xlim=[12, 42])
hf = sf.MakeHazardFunction()
ts, lams = hf.Render()
print(lams)
thinkplot.Plot(ts, lams)
thinkplot.Config(xlim=[12, 42])
options = dict(formats=['pdf', 'png'], clf=False)
thinkplot.PrePlot(rows=2)
thinkplot.Plot(sf, label='survival')
thinkplot.Config(xlim=[13, 41], ylim=[0, 1.05])
plt.ylabel('Survival Function')
thinkplot.SubPlot(2)
thinkplot.Plot(hf, label='hazard')
thinkplot.Config(xlabel='age(years)', ylabel='Hazard function', xlim=[13, 41])
plt.ylabel('Hazard Function')
plt.xlabel('Age (years)')
thinkplot.Save(root='figs/marriage1', **options)
thinkplot.Plot(sf, label='30s')
thinkplot.Config(xlim=[13, 41], ylim=[0, 1.05])
plt.xlabel('Age (years)', fontsize=14)
plt.ylabel('Survival function', fontsize=14)
thinkplot.Save(root='figs/marriage2', **options)
thinkplot.Plot(hf, label='30s')
thinkplot.Config(xlim=[13, 41])
plt.xlabel('Age (years)', fontsize=14)
plt.ylabel('Hazard function', fontsize=14)
thinkplot.Save(root='figs/marriage3', **options)
pt = df.pivot_table(index='birth_index', columns='age_index', values='age', aggfunc=len, fill_value=0)
pt
df.pivot_table(index='birth_index', columns='agemarry_index', values='age', aggfunc=len, fill_value=0)
df['complete'] = df.evrmarry
df['complete_var'] = df.agemarry_index
df['ongoing_var'] = df.age_index
df['complete_missing'] = df.complete & df.complete_var.isnull()
df['ongoing_missing'] = ~df.complete & df.ongoing_var.isnull()
# for some marriages, we don't have the date of marriage
for cycle, group in df.groupby('cycle'):
print(cycle, sum(group.complete_missing), sum(group.ongoing_missing))
resps = [group for cycle, group in df.groupby('cycle')]
iters = 101
%time sf_map = marriage.EstimateSurvivalByCohort(resps, iters=iters, cutoffs=cutoffs)
del sf_map[30]
try:
del sf_map[100]
except KeyError:
pass
for sf in sf_map[90]:
print(sf.ss)
print(sf.Prob(34))
break
for sf in sf_map[80]:
print(sf.ss)
print(sf.Prob(34))
break
def PlotSurvivalFunctions(root, sf_map, sf_map_pred=None, **options):
if sf_map_pred:
marriage.PlotSurvivalFunctions(sf_map_pred, predict_flag=True)
marriage.PlotSurvivalFunctions(sf_map)
thinkplot.config(xlabel='Age (years)',
ylabel='Fraction never married',
xlim=[13, 50],
ylim=[0, 1.05],
loc='upper right',
frameon=False,
**options)
plt.tight_layout()
thinkplot.save(root=root, formats=['pdf', 'png'])
def set_palette(*args, **kwds):
Set the matplotlib color cycler.
args, kwds: same as for sns.color_palette
Also takes a boolean kwd, `reverse`, to indicate
whether the order of the palette should be reversed.
returns: list of colors
reverse = kwds.pop('reverse', False)
palette = sns.color_palette(*args, **kwds)
palette = list(palette)
if reverse:
palette.reverse()
cycler = plt.cycler(color=palette)
plt.gca().set_prop_cycle(cycler)
return palette
def draw_age_lines(ages):
for age in ages:
plt.axvline(age, color='gray', linestyle='dotted', alpha=0.3)
palette = set_palette('hls', 6)
draw_age_lines(ages)
options_w = dict(title='Women in the U.S. by decade of birth')
PlotSurvivalFunctions('figs/marriage4', sf_map, None, **options_w)
def MakeTable(sf_map, ages):
t = []
for name, sf_seq in sorted(sf_map.items()):
ts, ss = marriage.MakeSurvivalCI(sf_seq, [50])
ss = ss[0]
vals = [np.interp(age, ts, ss, right=np.nan) for age in ages]
t.append((name, vals))
return t
def MakePercentageTable(sf_map, ages):
Prints percentage unmarried for each cohort at each age.
t = MakeTable(sf_map, ages)
for name, sf_seq in sorted(sf_map.items()):
ts, ss = marriage.MakeSurvivalCI(sf_seq, [50])
ss = ss[0]
vals = [np.interp(age, ts, ss, right=np.nan) for age in ages]
print(name, '&', ' & '.join('%0.0f' % (val*100) for val in vals), r'\\')
ages = [24, 34, 44]
MakePercentageTable(sf_map, ages=ages)
%time sf_map_pred = marriage.EstimateSurvivalByCohort(resps, iters=iters, cutoffs=cutoffs, predict_flag=True)
del sf_map_pred[30]
try:
del sf_map[100]
except KeyError:
pass
for cohort, seq in sf_map_pred.items():
if cohort > 90:
break
medians = [sf.MakeCdf().Value(0.5) for sf in seq]
print(cohort, np.median(medians))
palette = set_palette('hls', 6)
draw_age_lines(ages)
PlotSurvivalFunctions('figs/marriage5', sf_map, sf_map_pred, **options_w)
MakePercentageTable(sf_map_pred, ages)
def PlotFractions(sf_map, ages, label_flag=False, **options):
t = MakeTable(sf_map, ages)
cohorts, cols = zip(*t)
rows = zip(*cols)
thinkplot.PrePlot(3)
t = list(zip(ages, rows))
for age, row in reversed(t):
label = 'at age %d' % age if label_flag else ''
thinkplot.Plot(cohorts, row, label=label, **options)
PlotFractions(sf_map_pred, ages, color='gray', linestyle='dashed', linewidth=2)
PlotFractions(sf_map, ages, label_flag=True, alpha=1)
fontsize=12
thinkplot.Text(36, 0.26, '24', fontsize=fontsize)
thinkplot.Text(37, 0.13, '9', fontsize=fontsize)
thinkplot.Text(37, 0.07, '7', fontsize=fontsize)
thinkplot.Text(90, 0.85, '80', fontsize=fontsize)
thinkplot.Text(90, 0.56, '51', fontsize=fontsize)
thinkplot.Text(89.5, 0.47, '42', fontsize=fontsize)
thinkplot.Text(80, 0.42, '35', fontsize=fontsize)
thinkplot.Text(70, 0.18, '18', fontsize=fontsize)
thinkplot.Config(xlim=[34, 97], ylim=[0, 1], legend=True, loc='upper left',
xlabel='cohort (decade)', ylabel='Fraction unmarried',
title='Women in the U.S.')
thinkplot.Save(root='figs/marriage6', **options)
%time df2 = pd.read_hdf('MaleMarriageData.hdf', 'MaleMarriageData')
df2.shape
for cycle, group in df2.groupby('cycle'):
SummarizeCycle(cycle, group)
sum(df2.missing)
marriage.DigitizeResp(df2)
grouped = df2.groupby('birth_index')
for name, group in iter(grouped):
age_range = '%d--%d' % (int(group.age.min()), int(group.age_index.max()))
print(name, '&', len(group), '&', age_range,
'&', len(group[group.evrmarry]), '&', sum(group.missing), r'\\')
cutoffs2 = ComputeCutoffs(df2)
cutoffs2
resps2 = [group for cycle, group in df2.groupby('cycle')]
%time sf_map_male = marriage.EstimateSurvivalByCohort(resps2, iters=iters, cutoffs=cutoffs2)
del sf_map_male[100]
palette = set_palette('hls', 6)
draw_age_lines(ages)
options_m = dict(title='Men in the U.S. by decade of birth')
PlotSurvivalFunctions('figs/marriage7', sf_map_male, None, **options_m)
%time sf_map_male_pred = marriage.EstimateSurvivalByCohort(resps2, iters=iters, cutoffs=cutoffs2, predict_flag=True)
del sf_map_male_pred[100]
for cohort, seq in sf_map_male_pred.items():
if cohort > 90:
break
medians = [sf.MakeCdf().Value(0.5) for sf in seq]
print(cohort, np.median(medians))
palette = set_palette('hls', 6)
draw_age_lines(ages)
PlotSurvivalFunctions('figs/marriage8', sf_map_male, sf_map_male_pred, **options_m)
MakePercentageTable(sf_map_male, ages)
MakePercentageTable(sf_map_male_pred, ages)
PlotFractions(sf_map_male_pred, ages, color='gray', linestyle='dashed', linewidth=2)
PlotFractions(sf_map_male, ages, label_flag=True, alpha=1)
fontsize=12
thinkplot.Text(46, 0.69, '68', fontsize=fontsize)
thinkplot.Text(46, 0.30, '26', fontsize=fontsize)
thinkplot.Text(46, 0.20, '18', fontsize=fontsize)
thinkplot.Text(70, 0.18, '19', fontsize=fontsize)
thinkplot.Text(80, 0.43, '43', fontsize=fontsize)
thinkplot.Text(90, 0.89, '86', fontsize=fontsize)
thinkplot.Text(90, 0.56, '52', fontsize=fontsize)
thinkplot.Text(90, 0.40, '38', fontsize=fontsize)
thinkplot.Config(xlim=[34, 97], ylim=[0, 1], legend=True, loc='upper left',
xlabel='cohort (decade)', ylabel='Fraction unmarried',
title='Men in the U.S.')
thinkplot.Save(root='figs/marriage9', **options)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a table showing the number of respondents in each cycle
Step2: Check for missing values in agemarry
Step3: Estimate the hazard function for the 80s cohort (curious to see what's going on during the "marriage strike")
Step4: Run the same analysis for the 70s cohort (to extract $\lambda(33)$).
Step5: Use the 30s cohort to demonstrate the simple way to do survival analysis, by computing the survival function directly.
Step6: Then use the SurvivalFunction to compute the HazardFunction
Step7: Make the first figure, showing sf and hf for the 30s cohort
Step8: Make some pivot tables, just to see where the data are
Step9: The following pivot table is not as helpful as it could be, since it doesn't show the number at risk.
Step10: Estimate the survival curve for each cohort
Step11: Check a sample
Step13: Make the figure showing estimated survival curves
Step15: Make a table of marriage rates for each cohort at each age
Step16: Generate projections
Step17: And make the figure showing projections
Step18: Make the table again with the projections filled in.
Step19: Marriage patterns for men
|
967 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/titanic.csv'
titanic = pd.read_csv(url, index_col='PassengerId')
titanic.head()
feature_cols = ['Pclass', 'Parch']
X = titanic[feature_cols]
Y = titanic.Survived
import numpy as np
# Insert code here
random_sample = np.random.rand(y.shape[0])
X_train, X_test = X[random_sample<0.7], X[random_sample>=0.7]
Y_train, Y_test = Y[random_sample<0.7], Y[random_sample>=0.7]
print(Y_train.shape, Y_test.shape)
import numpy as np
from sklearn.cross_validation import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=42)
print(Y_train.shape, Y_test.shape)
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression(C=1e9)
mod1=logreg.fit(X_train, Y_train)
mod1.coef_
titanic['survive_pred'] = logreg.predict(X)
titanic.head()
survive_pred = logreg.predict(X_test)
(Y_test == survive_pred).mean()
from sklearn.metrics import confusion_matrix
confusion_matrix(Y_test, survive_pred)
survive_pred_prob=logreg.predict_proba(X_test)[:,1]
predict2=np.where(survive_pred_prob >= 0.7, 1, 0)
confusion_matrix(Y_test, predict2)
(Y_test == predict2).mean()
survive_pred_prob=logreg.predict_proba(X_test)[:,1]
predict2=np.where(survive_pred_prob >= 0.3, 1, 0)
confusion_matrix(Y_test, predict2)
(Y_test == predict2).mean()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create X and y
Step2: Exercise 5.1
Step3: Exercise 5.2
Step4: Exercise 5.3
Step5: Exercise 5.4
Step6: Exercise 5.5
|
968 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
coverage = pd.read_csv("../dataset/jacoco_production_coverage_spring_petclinic.csv")
coverage.head()
coverage['lines'] = coverage.LINE_MISSED + coverage.LINE_COVERED
coverage['covered'] = coverage.LINE_COVERED / coverage.lines
coverage.head()
coverage['fqn'] = coverage.PACKAGE + "." + coverage.CLASS
coverage_per_class = coverage.set_index('fqn')[['lines', 'covered']]
coverage_per_class.head()
git_log = pd.read_csv(
"../dataset/git_log_numstat_spring_petclinic.log",
sep="\t",
names=['additions', 'deletions', 'path'])
git_log.head()
git_log['fqn'] = git_log.path.str.extract(
"/java/(.*)\.java",
expand=True)[0]
git_log['fqn'] = git_log.fqn.str.replace("/", ".")
git_log['fqn'][0]
changes_per_file = git_log.groupby('fqn').path.count()
changes_per_file.name = "changes"
changes_per_file.head()
analysis = coverage_per_class.join(changes_per_file)
analysis.head()
analysis['tech'] = analysis.index.str.split(".").str[-2]
analysis.head()
tech_insights = analysis.groupby('tech').agg({
"lines" : "sum",
"covered": "mean",
"changes" : "sum"
})
tech_insights
%matplotlib inline
from ausi import portfolio
portfolio.plot_diagram(tech_insights, "changes", "covered", "lines");
analysis['domain'] = "Other"
domains = ["Owner", "Pet", "Visit", "Vet", "Specialty", "Clinic"]
for domain in domains:
analysis.loc[analysis.index.str.contains(domain), 'domain'] = domain
analysis.head()
domain_insights = analysis.groupby('domain').agg({
"lines" : "sum",
"covered": "mean",
"changes" : "sum"
})
domain_insights = domain_insights.rename(columns=
{"lines": "Größe", "covered" : "Nutzungsgrad", "changes" : "Investition"})
domain_insights
portfolio.plot_diagram(domain_insights, "Investition", "Nutzungsgrad", "Größe");
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Berechnung wesentlicher Metriken für Größe und Nutzungsgrad
Step2: Vorbereitung Verbindung zu technischen Schulden
Step3: Änderungshäufigkeit der Software
Step4: Vorbereitung der Zuordnung zu Auslastungsdaten
Step5: Aufstellung der notwendigen Daten für tiefergehende Analysen
Step6: Erstellung der Management-Sicht
Step7: Identifikation technischer Komponenten
Step8: Zusammenfassung nach technischen Komponenten
Step9: Bewertungsmatrix nach technischen Gesichtspunkten
Step10: Identifikation fachlicher Komponenten
Step11: Zusammenfassung nach fachlichen Komponenten
Step12: Bewertungsmatrix nach fachlichen Gesichtspunkten
|
969 | <ASSISTANT_TASK:>
Python Code:
# boilerplate includes
import sys
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import seaborn as sns
import datetime
import scipy.interpolate
# import re
from IPython.display import display, HTML
%matplotlib notebook
plt.style.use('seaborn-notebook')
pd.set_option('display.max_columns', None)
# PARAMETERS (might be overridden by a calling script)
# if not calling from another script (batch), SUBNOTEBOOK_FLAG might not be defined
try:
SUBNOTEBOOK_FLAG
except NameError:
SUBNOTEBOOK_FLAG = False
# Not calling as a sub-script? define params here
if not SUBNOTEBOOK_FLAG:
# SET PARAMETER VARIABLES HERE UNLESS CALLING USING %run FROM ANOTHER NOTEBOOK
DATADIR = '../data/temperatures/ISD'
OUTDIR = '../data/temperatures'
FTPHOST = 'ftp.ncdc.noaa.gov'
FETCH_STATIONS_LIST_FILE = True
TEMP_COL = 'AT' # The label of the hourly temperature column we make/output
# Resampling and interpolation parameters
# spline order used for converting to on-the-hour and filling small gaps
BASE_INTERPOLATION_K = 1 # 1 for linear interpolation
# give special treatment to data gaps longer than...
POTENTIALLY_PROBLEMATIC_GAP_SIZE = pd.Timedelta('03:00:00')
# Time range to use for computing normals (30 year, just like NOAA uses)
NORM_IN_START_DATE = '1986-07-01'
NORM_IN_END_DATE = '2016-07-01'
# Time range or normals to output to use when running 'medfoes on normal temperature' (2 years, avoiding leapyears)
NORM_OUT_START_DATE = '2014-01-01'
NORM_OUT_END_DATE = '2015-12-31 23:59:59'
print("Cleaning temperature data for ",STATION_CALLSIGN)
# Potentially turn interactive figure display off
if SUPPRESS_FIGURE_DISPLAY:
plt.ioff()
# Load the data
fn = "{}_AT.h5".format(STATION_CALLSIGN)
ot = pd.read_hdf(os.path.join(DATADIR,fn), 'table')
t = ot.copy(deep=True) # not needed, just safety
# just showing the duplicates
tmp = t[t.index.duplicated(keep=False)].sort_index()
print(len(tmp), 'duplicates')
#display(tmp) # decomment to see the list of duplicates
# actually remove duplicates, just keeping the first
# @TCC could somehow try to identify the most reliable or take mean or such
t = t[~t.index.duplicated(keep='first')].sort_index()
# fairly permissive settings
rolling_sigma_window = 24*5 # None or 0 to just use median instead of median/sigma
rolling_median_window = 5
thresh = 1.5 # deviation from media/sigma to trigger removal
multipass = True # cycle until no points removed, or False for not
tin = t
cum_num = 0
while multipass:
if rolling_sigma_window:
sigma = t['AT'].rolling(window=rolling_sigma_window, center=True).std()
else:
sigma = 1
diff = (t['AT']-t['AT'].rolling(window=rolling_median_window, center=True).median())/sigma
outlier_mask = diff.abs() > thresh
num = np.count_nonzero(outlier_mask)
cum_num += num
print("removing {} points".format(num))
if num == 0:
break
# plotting each step
# ax = t.plot(linestyle='-', marker='*')
# if np.count_nonzero(outlier_mask) > 0:
# t[outlier_mask].plot(ax=ax, linestyle='none', marker='o', color='red')
# diff.abs().plot(ax=ax)
# if np.count_nonzero(outlier_mask) > 0:
# diff.abs()[outlier_mask].plot(ax=ax, linestyle='none', marker='o', color='yellow')
t = t[~outlier_mask]
# plot showing what is being removed
if cum_num > 0:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax = tin[~tin.index.isin(t.index)].plot(ax=ax, linestyle='none', marker='o', color='r', zorder=8)
ax = tin.plot(ax=ax, linestyle='-', linewidth=1, marker=None, color='red')
ax = t.plot(ax=ax, linestyle='-', marker='.', color='blue')
ax.set_ylabel('air temperature [$\degree$ C]')
ax.legend(['outlier', 'original', 'cleaned'])
ax.set_title(STATION_CALLSIGN)
# saving figure
# saving
fn = '{}_outlier.png'.format(STATION_CALLSIGN)
fig.savefig(os.path.join(OUTDIR,fn))
#mpld3.save_html(fig, '{}_outler.html'.format(STATION_CALLSIGN))
# Actually apply the outlier removal
ot = t
def remove_spurious_temps(ot, query_op, date1, date2=None, plot=True, inplace=False):
if date2 is None:
date2 = date1
ax = ot.loc[date1:date2].plot(ax=None, linestyle='-', marker='o') # plot
out_t = ot.drop(ot.loc[date1:date2].query('AT {}'.format(query_op)).index, inplace=inplace)
if inplace:
out_t = ot
out_t.loc[date1:date2].plot(ax=ax, linestyle='-', marker='*') # plot'
ax.set_title("Remove AT {}, range=[{}:{}]".format(query_op, date1, date2))
return out_t
STATION_CALLSIGN
if STATION_CALLSIGN == 'KSNA': # KSNA (Orange County)
# 2016-08-14 to 2016-08-15 overnight has some >0 values when they should be more like 19-20
remove_spurious_temps(ot, '< 0', '2016-08-14', '2016-08-15', inplace=True)
if STATION_CALLSIGN == 'KSFO':
remove_spurious_temps(ot, '< 0', '1976-07-16', '1976-07-17', inplace=True)
if STATION_CALLSIGN == 'KRIV':
remove_spurious_temps(ot, '< 0', '1995-11-15', '1995-11-15', inplace=True)
# flag the gaps in the original data that are possibly too long for the simple interpolation we did above
gaps_filename = os.path.join(OUTDIR, "{}_AT_gaps.tsv".format(STATION_CALLSIGN))
gaps = ot.index.to_series().diff()[1:]
idx = np.flatnonzero(gaps > POTENTIALLY_PROBLEMATIC_GAP_SIZE)
prob_gaps = gaps[idx]
# save to file for future reference
with open(gaps_filename,'w') as fh:
# output the gaps, biggest to smallest, to review
print('#', STATION_CALLSIGN, ot.index[0].isoformat(), ot.index[-1].isoformat(), sep='\t', file=fh)
print('# Potentially problematic gaps:', len(prob_gaps), file=fh)
tmp = prob_gaps.sort_values(ascending=False)
for i in range(len(tmp)):
rng = [tmp.index[i]-tmp.iloc[i], tmp.index[i]]
print(rng[0], rng[1], rng[1]-rng[0], sep='\t', file=fh)
if not SUPPRESS_FIGURE_DISPLAY:
# go ahead and just print it here too
with open(gaps_filename) as fh:
for l in fh:
print(l, end='')
else:
print('# Potentially problematic gaps:', len(prob_gaps))
# Interpolate to get on-the-hour values
newidx = pd.date_range(start=ot.index[0].round('d')+pd.Timedelta('0h'),
end=ot.index[-1].round('d')-pd.Timedelta('1s'),
freq='1h', tz='UTC')
if True:
# Simple linear interpolation
at_interp_func = scipy.interpolate.interp1d(ot.index.astype('int64').values,
ot['AT'].values,
kind='linear',
fill_value=np.nan, #(0,1)
bounds_error=False)
else:
# Should be better method, but has some screwy thing using updated data
at_interp_func = scipy.interpolate.InterpolatedUnivariateSpline(
ot.index.astype('int64').values,
ot['AT'].values,
k=BASE_INTERPOLATION_K,
ext='const')
nt = pd.DataFrame({'AT':at_interp_func(newidx.astype('int64').values)},
index=newidx)
# Fill those gaps using day-to-day (at same hour) interpolation
gap_pad = pd.Timedelta('-10m') # contract the gaps a bit so we don't remove good/decent edge values
t = nt.copy(deep=True) # operate on a copy so we can compare with nt
# fill the gap ranges with nan (replacing the default interpolation)
for i in range(len(prob_gaps)):
rng = [prob_gaps.index[i]-prob_gaps.iloc[i], prob_gaps.index[i]]
t[rng[0]-gap_pad:rng[1]+gap_pad] = np.nan
# reshape so each row is a whole day's (24) data points
rows = int(t.shape[0]/24)
foo = pd.DataFrame(t.iloc[:rows*24].values.reshape((rows,24)))
# simple linear interpolation
foo.interpolate(metnod='time', limit=24*60, limit_direction='both', inplace=True)
# # Alternative interpolation using running means
# # @TCC not great for very large gaps
# RUNNING_MEAN_WINDOW_SIZE = 3
# while True:
# # interpolate each column (temp at hour x on each day)
# # filling nans with values from a windowed running mean
# foo.fillna(foo.rolling(window=RUNNING_MEAN_WINDOW_SIZE, min_periods=1, center=True).mean(), inplace=True)
# if not foo.isnull().values.any():
# break
# reshape back
t = pd.DataFrame({'AT':foo.stack(dropna=False).values}, index=t.index[:rows*24])
# You can specify a specific range by setting r1 and r2, or None for full range
#r1, r2 = '1952-05-07', '1952-05-23'
r1, r2 = None, None
if r1 is None:
r1 = t.index[0]
if r2 is None:
r2 = t.index[-1]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(ot.loc[r1:r2].index, ot.loc[r1:r2]['AT'], linestyle='none', marker='.', label='raw')
#ax.scatter(ot.loc[r1:r2].index, ot.loc[r1:r2]['AT'], marker='.', label='raw')
ax.plot(nt.loc[r1:r2].index, nt.loc[r1:r2]['AT'], linestyle='-', marker=None, lw=1, label='interpolated')
# ax.plot(t.loc[r1:r2].index, t.loc[r1:r2]['AT'], '-*', lw=1, label='filled')
# @TCC maybe make a single dataframe with the parts I don't want deleted or masked out
for i in range(len(prob_gaps)):
if i == 0: # only label first segment
label = 'filled'
else:
label = ''
rng = [tmp.index[i]-tmp.iloc[i], tmp.index[i]]
ax.plot(t.loc[rng[0]:rng[1]].index, t.loc[rng[0]:rng[1]]['AT'], '.-', lw=1, color='r', label=label)
# # mark the big gaps with vertical lines
# for i in range(len(prob_gaps)):
# ax.axvline(prob_gaps.index[i]-prob_gaps.iloc[i],
# c='k', ls=':', lw=0.5)
# ax.axvline(prob_gaps.index[i],
# c='k', ls=':', lw=0.5)
ax.set_xlim((r1,r2))
ax.set_xlabel('DateTime')
ax.set_ylabel('Temperature [$\degree$C]')
ax.set_title(STATION_CALLSIGN)
ax.legend()
# saving
fig.savefig(os.path.join(OUTDIR, '{}_cleaning.png'.format(STATION_CALLSIGN)))
#mpld3.save_html(fig, '{}_cleaning.html'.format(STATION_CALLSIGN))
outfn = os.path.join(OUTDIR, "{}_AT_cleaned".format(STATION_CALLSIGN))
print("Saving cleaned temp data to:", outfn)
t.to_hdf(outfn+'.h5', 'table', mode='w',
data_colums=True, complevel=5, complib='bzip2',
dropna=False)
# # Time range to use for computing normals (30 year, just like NOAA uses)
# NORM_IN_START_DATE = '1986-07-01'
# NORM_IN_END_DATE = '2016-07-01'
# # Time range or normals to output to use when running 'medfoes on normal temperature' (2 years, avoiding leapyears)
# NORM_OUT_START_DATE = '2014-01-01'
# NORM_OUT_END_DATE = '2015-12-31 23:59:59'
# %run "Temperature functions.ipynb" # for compute_year_over_year_norm function
# tempnorm = compute_year_over_year_norm(ot,
# NORM_OUT_START_DATE, NORM_OUT_END_DATE,
# NORM_IN_START_DATE, NORM_IN_END_DATE,
# freq='hourly',
# interp_method='linear',
# norm_method='mean')
# # Save as csv for medfoes input
# outfn = os.path.join(OUTDIR, "{}_AT_cleaned_normalsX2.csv".format(STATION_CALLSIGN))
# print("Saving temp normals data to:",outfn)
# tempnorm.to_csv(outfn, index_label='datetime')
# tempnorm.plot()
# Turn iteractive display back on, if we turned it off
if SUPPRESS_FIGURE_DISPLAY:
plt.ioff()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Constants / Parameters
Step2: Interpolation and cleanup
Step3: Deduplication
Step4: Outlier removal
Step5: "by-hand" fixes for particular datasets, hopefully minimal
Step6: Identify bigger gaps which will get filled day-over-day interpolation
Step7: Interpolate to produce on-the-hour values
Step8: Fill the bigger gaps
Step9: Check that it looks OK...
Step10: Save final cleaned temperatures
Step11: Compute the normals
|
970 | <ASSISTANT_TASK:>
Python Code:
from Bio.Seq import Seq
my_seq = Seq("GATCG")
for index, letter in enumerate(my_seq):
print("%i %s" % (index, letter))
print(len(my_seq))
print(my_seq[0]) #first letter
print(my_seq[2]) #third letter
print(my_seq[-1]) #last letter
print("AAAA".count("AA"))
print(Seq("AAAA").count("AA"))
my_seq = Seq('GATCGATGGGCCTATATAGGATCGAAAATCGC')
print(len(my_seq))
print(my_seq.count("G"))
print(100 * float(my_seq.count("G") + my_seq.count("C")) / len(my_seq))
from Bio.SeqUtils import GC
my_seq = Seq('GATCGATGGGCCTATATAGGATCGAAAATCGC')
GC(my_seq)
my_seq = Seq("GATCGATGGGCCTATATAGGATCGAAAATCGC")
my_seq[4:12]
print(my_seq[0::3])
print(my_seq[1::3])
print(my_seq[2::3])
my_seq[::-1]
str(my_seq)
print(my_seq)
fasta_format_string = ">Name\n%s\n" % my_seq
print(fasta_format_string)
protein_seq = Seq("EVRNAK")
dna_seq = Seq("ACGT")
protein_seq + dna_seq
from Bio.Seq import Seq
list_of_seqs = [Seq("ACGT"), Seq("AACC"), Seq("GGTT")]
concatenated = Seq("")
for s in list_of_seqs:
concatenated += s
concatenated
from Bio.Seq import Seq
contigs = [Seq("ATG"), Seq("ATCCCG"), Seq("TTGCA")]
spacer = Seq("N"*10)
spacer.join(contigs)
dna_seq = Seq("acgtACGT")
print(dna_seq)
print(dna_seq.upper())
print(dna_seq.lower())
print("GTAC" in dna_seq)
print("GTAC" in dna_seq.upper())
from Bio.Seq import Seq
my_seq = Seq("GATCGATGGGCCTATATAGGATCGAAAATCGC")
print(my_seq)
print(my_seq.complement())
print(my_seq.reverse_complement())
my_seq[::-1]
from Bio.Seq import Seq
protein_seq = Seq("EVRNAK")
protein_seq.complement()
coding_dna = Seq("ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG")
print(coding_dna)
template_dna = coding_dna.reverse_complement()
print(template_dna)
messenger_rna = coding_dna.transcribe()
messenger_rna
template_dna.reverse_complement().transcribe()
messenger_rna.back_transcribe()
messenger_rna.translate()
coding_dna.translate()
coding_dna.translate(table="Vertebrate Mitochondrial")
coding_dna.translate(table=2)
coding_dna.translate()
coding_dna.translate(to_stop=True)
coding_dna.translate(table=2)
coding_dna.translate(table=2, to_stop=True)
coding_dna.translate(table=2, stop_symbol="@")
gene = Seq("GTGAAAAAGATGCAATCTATCGTACTCGCACTTTCCCTGGTTCTGGTCGCTCCCATGGCA" +
"GCACAGGCTGCGGAAATTACGTTAGTCCCGTCAGTAAAATTACAGATAGGCGATCGTGAT" +
"AATCGTGGCTATTACTGGGATGGAGGTCACTGGCGCGACCACGGCTGGTGGAAACAACAT" +
"TATGAATGGCGAGGCAATCGCTGGCACCTACACGGACCGCCGCCACCGCCGCGCCACCAT" +
"AAGAAAGCTCCTCATGATCATCACGGCGGTCATGGTCCAGGCAAACATCACCGCTAA")
gene.translate(table="Bacterial")
gene.translate(table="Bacterial", to_stop=True)
gene.translate(table="Bacterial", cds=True)
from Bio.Data import CodonTable
standard_table = CodonTable.unambiguous_dna_by_name["Standard"]
mito_table = CodonTable.unambiguous_dna_by_name["Vertebrate Mitochondrial"]
standard_table = CodonTable.unambiguous_dna_by_id[1]
mito_table = CodonTable.unambiguous_dna_by_id[2]
print(standard_table)
print(mito_table)
print(mito_table.stop_codons)
print(mito_table.start_codons)
print(mito_table.forward_table["ACG"])
from Bio.Seq import Seq
seq1 = Seq("ACGT")
seq2 = Seq("ACGT")
print(seq1 == seq2)
print(seq1 == seq1)
my_seq = Seq("GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGA")
my_seq[5] = "G"
mutable_seq = my_seq.tomutable()
mutable_seq
from Bio.Seq import MutableSeq
mutable_seq = MutableSeq("GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGA")
mutable_seq[5] = "C"
print(mutable_seq)
mutable_seq.remove("T")
print(mutable_seq)
mutable_seq.reverse()
print(mutable_seq)
new_seq = mutable_seq.toseq()
new_seq
from Bio.Seq import UnknownSeq
unk = UnknownSeq(20)
unk
print(unk)
print(len(unk))
unk_dna = UnknownSeq(20, character= "N")
unk_dna
print(unk_dna)
unk_dna
unk_dna.complement()
unk_dna.reverse_complement()
unk_dna.transcribe()
unk_protein = unk_dna.translate()
unk_protein
print(unk_protein)
print(len(unk_protein))
from Bio.Seq import reverse_complement, transcribe, back_transcribe, translate
my_string = "GCTGTTATGGGTCGTTGGAAGGGTGGTCGTGCTGCTGGTTAG"
print(reverse_complement(my_string))
print(transcribe(my_string))
print(back_transcribe(my_string))
print(translate(my_string))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can access elements of the sequence in the same way as for strings (but remember, Python counts from zero!)
Step2: The Seq object has a .count() method, just like a string.
Step3: For some biological uses, you may actually want an overlapping count
Step4: While you could use the above snippet of code to calculate a GC\%, note that the Bio.SeqUtils module has several GC functions already built. For example
Step5: Note that using the Bio.SeqUtils.GC() function should automatically cope with mixed case sequences and the ambiguous nucleotide S which means G or C.
Step6: Note that ‘Seq‘ objects follow the usual indexing conventions for Python strings, with the first element
Step7: Another stride trick you might have seen with a Python string is the use of a -1 stride to reverse the string. You can do this with a Seq object too
Step8: Turning Seq objects into strings
Step9: Since calling str() on a Seq object returns the full sequence as a string,
Step10: You can also use the Seq object directly with a %s placeholder when using the Python string formatting or interpolation operator (%)
Step11: This line of code constructs a simple FASTA format record (without worrying about line wrapping). Section 4.6 describes a neat way to get a FASTA formatted string from a SeqRecord object, while the more
Step12: Deliberately mixing DNA and protein like this is likely a mistake though...
Step13: Like Python strings, Biopython Seq also has a .join method
Step14: Changing case
Step15: These are useful for doing case insensitive matching
Step16: Nucleotide sequences and (reverse) complements
Step17: As mentioned earlier, an easy way to just reverse a Seq object (or a
Step18: If you do accidentally end up trying to do something weird like taking the (reverse)complement of a
Step19: Here the letter “E” is not a valid IUPAC ambiguity code for nucleotides, so was not complemented.
Step20: These should match the figure above - remember by convention nucleotide sequences are normally read from the 5' to 3' direction, while in the figure the template strand is shown reversed.
Step21: As you can see, all this does is switch T $\rightarrow$ U.
Step22: The Seq object also includes a back-transcription method for going from the mRNA to the coding strand of the DNA. Again, this is a simple U $\rightarrow$ T substitution
Step23: Translation
Step24: You can also translate directly from the coding strand DNA sequence
Step25: You should notice in the above protein sequences that in addition to the end stop character, there is an internal stop as well. This was a deliberate choice of example, as it gives an excuse to talk about some optional arguments, including different translation tables (Genetic Codes).
Step26: You can also specify the table using the NCBI table number which is shorter, and often included in the feature annotation of GenBank files
Step27: Now, you may want to translate the nucleotides up to the first in frame stop codon,
Step28: Notice that when you use the to_stop argument, the stop codon itself
Step29: Now, suppose you have a complete coding sequence CDS, which is to say a
Step30: In the bacterial genetic code GTG is a valid start codon,
Step31: In addition to telling Biopython to translate an alternative start codon as
Step32: Alternatively, these tables are labeled with ID numbers 1 and 2, respectively
Step33: You can compare the actual tables visually by printing them
Step34: You may find these following properties useful -- for example if you are trying
Step35: Comparing Seq objects
Step36: So, what does Biopython do? Well, the equality test is the default for Python
Step37: As an extension to this, while you can use a Python dictionary with
Step38: Observe what happens if you try to edit the sequence
Step39: However, you can convert it into a mutable sequence (a MutableSeq object) and do pretty much anything you want with it
Step40: Alternatively, you can create a MutableSeq object directly from a string
Step41: Either way will give you a sequence object which can be changed
Step42: Do note that unlike the Seq object, the MutableSeq object's methods like reverse_complement() and reverse() act in-situ!
Step43: You can also get a string from a MutableSeq object just like from a Seq object.
Step44: For DNA or RNA sequences, unknown nucleotides are commonly denoted by the letter “N”, while for
Step45: You can use all the usual Seq object methods too, note these give back
Step46: You may be able to find a use for the UnknownSeq object in your own
|
971 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
from stingray import Lightcurve, Crossspectrum, sampledata
from stingray.simulator import simulator, models
var = sampledata.sample_data()
# Beware: set tstart here, or nothing will work!
sim = simulator.Simulator(N=1024, mean=0.5, dt=0.125, rms=0.4, tstart=var.tstart)
delay = 10
s_ir = sim.simple_ir(start=delay, width=1)
lc = sim.simulate(var.counts, s_ir)
plt.plot(lc.time, lc.counts)
plt.plot(var.time, var.counts)
cross = Crossspectrum(var, lc)
cross = cross.rebin(0.0050)
lag = cross.time_lag()
plt.figure()
# Plot lag-frequency spectrum.
plt.plot(cross.freq, lag, 'r')
# Find cutoff points
v_cutoff = 1.0/(2*delay)
h_cutoff = lag[int((v_cutoff-0.0050)*1/0.0050)]
plt.axvline(v_cutoff, color='g',linestyle='--')
plt.axhline(h_cutoff, color='g', linestyle='-.')
# Define axis
plt.axis([0,0.2,-20,20])
plt.xlabel('Frequency (Hz)')
plt.ylabel('Lag')
plt.title('Lag-frequency Spectrum')
plt.show()
delays = [10,20]
h1 = sim.simple_ir(start=delays[0], width=1)
h2 = sim.simple_ir(start=delays[1], width=1)
sim.simulate_channel('3.5-4.5', var, h1)
sim.simulate_channel('4.5-5.5', var, h2)
cross = [Crossspectrum(var, lc).rebin(0.005) for lc in sim.get_channels(['3.5-4.5', '4.5-5.5'])]
lags = [c.time_lag() for c in cross]
v_cuts = [1.0/(2*d) for d in delays]
h_cuts = [lag[int((v_cutoff-0.005)*1/0.005)] for lag, v_cut in zip(lags, v_cuts)]
plt.figure()
plots = []
colors = ['r','g']
energies = ['3.5-4.5 keV', '4.5-5.5 keV']
# Plot lag-frequency spectrum
for i in range(0,len(lags)):
plots += plt.plot(cross[i].freq, lags[i], colors[i], label=energies[i])
plt.axvline(v_cuts[i],color=colors[i],linestyle='--')
plt.axhline(h_cuts[i], color=colors[i], linestyle='-.')
# Define axes and add labels
plt.axis([0,0.2,-20,20])
plt.legend()
plt.xlabel('Frequencies (Hz)')
plt.ylabel('Lags')
plt.title('Energy Dependent Frequency-lag Spectrum')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import relevant stingray libraries.
Step2: Initializing
Step3: For ease of analysis, define a simple delta impulse response with width 1. Here, start parameter refers to the lag delay, which we will soon see.
Step4: Finally, simulate a filtered light curve. Here, filtered means that the initial lag delay portion is cut.
Step5: Analysis
Step6: Rebin the crosss-spectrum for ease of visualization.
Step7: Calculate time lag.
Step8: Plot lag.
Step9: According to Uttley et al (2014), the lag-frequency spectrum shows a constant delay until the frequency (1/2*time_delay) which is represented by the green vertical line in the above figure. After this point, the phase wraps and the lag becomes negative.
Step10: Now, we create two energy channels to simulate light curves for these two impulse responses.
Step11: Compute cross-spectrum for each channel.
Step12: Calculate lags.
Step13: Get cut-off points.
Step14: Plot lag-frequency spectrums.
|
972 | <ASSISTANT_TASK:>
Python Code:
print('Hello from python!') # to print some text, enclose it between quotation marks - single
print("I'm here today!") # or double
print(34) # print an integer
print(2 + 4) # print the result of an arithmetic operation
print("The answer is", 42) # print multiple expressions, separated by comma
x = 3 # assignment of a simple value
print(x)
y = x + 5 # assignment of a more complex expression
print(y)
i = 12
print(i)
i = i + 1 # assigment of the current value of a variable incremented by 1 to itself
print(i)
i += 1 # shorter version with the special += operator
print(i)
a = 2 # integer
b = 5.0 # float
c = 'word' # string
d = 4 > 5 # boolean True or False
e = None # special built-in value to create a variable that has not been set to anything specific
print(a, b, c, d, e)
print(a, 'is of type', type(a)) # to check the type of a variable
a = 2 # assignment
a += 1 # change and assign (*=, /=)
3 + 2 # addition
3 - 2 # subtraction
3 * 2 # multiplication
3 / 2 # integer (python2) or float (python3) division
3 // 2 # integer division
3 % 2 # remainder
3 ** 2 # exponent
a = ['red', 'blue', 'green'] # manual initialisation
copy_of_a = a[:] # copy of a
another_a = a # same as a
b = list(range(5)) # initialise from iteratable
c = [1, 2, 3, 4, 5, 6] # manual initialisation
len(c) # length of the list
d = c[0] # access first element at index 0
e = c[1:3] # access a slice of the list,
# including element at index 1 up to but not including element at index 3
f = c[-1] # access last element
c[1] = 8 # assign new value at index position 1
g = ['re', 'bl'] + ['gr'] # list concatenation
['re', 'bl'].index('re') # returns index of 're'
a.append('yellow') # add new element to end of list
a.extend(b) # add elements from list `b` to end of list `a`
a.insert(1, 'yellow') # insert element in specified position
're' in ['re', 'bl'] # true if 're' in list
'fi' not in ['re', 'bl'] # true if 'fi' not in list
c.sort() # sort list in place
h = sorted([3, 2, 1]) # returns sorted list
i = a.pop(2) # remove and return item at index (default last)
print(a, b, c, d, e, f, g, h, i)
print(a, copy_of_a, another_a)
a = {'A': 'Adenine', 'C': 'Cytosine'} # dictionary
b = a['A'] # translate item
c = a.get('N', 'no value found') # return default value
'A' in a # true if dictionary a contains key 'A'
a['G'] = 'Guanine' # assign new key, value pair to dictonary a
a['T'] = 'Thymine' # assign new key, value pair to dictonary a
print(a)
d = a.keys() # get list of keys
e = a.values() # get list of values
f = a.items() # get list of key-value pairs
print(b, c, d, e, f)
del a['A'] # delete key and associated value
print(a)
a = {1, 2, 3} # initialise manually
b = set(range(5)) # initialise from iteratable
c = set([1,2,2,2,2,4,5,6,6,6]) # initialise from list
a.add(13) # add new element to set
a.remove(13) # remove element from set
2 in {1, 2, 3} # true if 2 in set
5 not in {1, 2, 3} # true if 5 not in set
d = a.union(b) # return the union of sets as a new set
e = a.intersection(b) # return the intersection of sets as a new set
print(a, b, c, d, e)
a = (123, 54, 92) # initialise manually
b = () # empty tuple
c = ("Ala",) # tuple of a single string (note the trailing ",")
d = (2, 3, False, "Arg", None) # a tuple of mixed types
print(a, b, c, d)
t = a, c, d # tuple packing
x, y, z = t # tuple unpacking
print(t, x, y, z)
a = 'red' # assignment
char = a[2] # access individual characters
b = 'red' + 'blue' # string concatenation
c = '1, 2, three'.split(',') # split string into list
d = '.'.join(['1', '2', 'three']) # concatenate list into string
print(a, char, b, c, d)
dna = 'ATGTCACCGTTT' # assignment
seq = list(dna) # convert string into list of character
e = len(dna) # return string length
f = dna[2:5] # slice string
g = dna.find('TGA') # substring location, return -1 when not found
print(dna, seq, e, f, g)
text = ' chrom start end ' # assignment
print('>', text, '<')
print('>', text.strip(), '<') # remove unwanted whitespace at both end of the string
print('{:.2f}'.format(0.4567)) # formating string
print('{gene:s}\t{exp:+.2f}'.format(gene='Beta-Actin', exp=1.7))
a, b = 1, 2 # assign different values to a and b
if a + b == 3:
print('True')
elif a + b == 1:
print('False')
else:
print('?')
1 == 1 # equal value
1 != 2 # not equal
2 > 1 # larger
2 < 1 # smaller
1 != 2 and 2 < 3 # logical AND
1 != 2 or 2 < 3 # logical OR
not 1 == 2 # logical NOT
a = list('ATGTCACCGTTT')
b = a # same as a
c = a[:] # copy of a
'N' in a # test if character 'N' is in a
print('a', a) # print a
print('b', b) # print b
print('c', c) # print c
print('Is N in a?', 'N' in a)
print('Are objects b and a point to the same memory address?', b is a)
print('Are objects c and a point to the same memory address?', c is a)
print('Are values of b and a identical?', b == a)
print('Are values of c and a identical?', c == a)
a[0] = 'N' # modify a
print('a', a) # print a
print('b', b) # print b
print('c', c) # print c
print('Is N in a?', 'N' in a)
print('Are objects b and a point to the same memory address?', b is a)
print('Are objects c and a point to the same memory address?', c is a)
print('Are values of b and a identical?', b == a)
print('Are values of c and a identical?', c == a)
a = ['red', 'blue', 'green']
for color in a:
print(color)
number = 1
while number < 10:
print(number)
number += 1
# break
sequence = ['CAG','TAC','CAA','TAG','TAC','CAG','CAA']
for codon in sequence:
if codon == 'TAG':
break # Quit looping at this point
else:
print(codon)
# continue
values = [10, -5, 3, -1, 7]
total = 0
for v in values:
if v < 0:
continue # Skip this iteration
total += v
print(values, 'sum:', sum(values), 'total:', total)
# reading from file
with open("data/genes.txt") as f:
for line in f:
print(line.strip())
# writing to a file
with open('programming.txt', 'w') as f:
f.write("I love programming in Python!\n")
f.write("I love making scripts.\n")
# appending to a file
with open('programming.txt', 'a') as f:
f.write("I love working with data.\n")
help(len) # help on built-in function
help(list.extend) # help on list function
# help within jupyter
len?
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Variables
Step2: Simple data types
Step3: Arithmetic operations
Step4: Lists
Step5: Dictionnaries
Step6: Sets
Step7: Tuples
Step8: Strings
Step9: Conditional execution
Step10: Comparison operations
Step11: Loops
Step12: Python has two ways of affecting the flow of the for or while loop inside the block. The break statement immediately causes all looping to finish, and execution is resumed at the next statement after the loop. The continue statement means that the rest of the code in the block is skipped for this particular item in the collection.
Step13: Files
Step14: Getting help
|
973 | <ASSISTANT_TASK:>
Python Code:
import numpy as np # Presque tous les programmeurs utilisent l'abbréviation np
ma_liste = [1, 2, 3, 4, 5]
array1 = np.array(ma_liste)
print("ma liste:", ma_liste)
print("objet numpy: ", array1)
array1
ma_liste2 = [10, 20, 30, 40, 50]
mes_listes = [ma_liste, ma_liste2]
print("mes listes: \n", mes_listes)
print("-"*40)
matrice = np.array(mes_listes)
print("La matrice: \n", matrice)
print("-"*40)
print("Représentation de la matrice:")
matrice # pour voir sa représentation
print("shape = ", matrice.shape) # information sur nombre de lignes et colonnes
print("type = ", matrice.dtype) # information sur le contenu
print(np.zeros(4))
print('-'*40)
print(np.zeros([3,3]))
print('-'*40)
print(np.ones([3,2]))
print('-'*40)
print(np.eye(4)) # matrice identité
np.arange(1, 5, 0.2)
mat1 = np.array([ [1, 2, 3], [4, 5, 6]])
print(3 * mat1)
print("-"*40)
print(mat1 / 2)
print("-"*40)
print(1 / mat1)
print("-"*40)
print(mat1 % 3)
print("-"*40)
print(mat1 + 20)
print("-"*40)
mat2 = np.array([ [11, 12, 13], [14, 15, 16]])
print(mat1 + mat2)
print("-"*40)
print(mat2 - mat1)
print("-"*40)
print(mat1 * mat1) # IMPORTANT: ceci n'est PAS la multiplication normale de matrices
mat3 = np.array([[1, 2], [3, 4]])
mat3 * mat1
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Au lieu d'imprimer l'objet numpy, on peut voir sa représentation directement
Step2: Créons une deuxième liste ainsi qu'une liste de listes
Step3: Nous avons donc une matrice avec 2 lignes et 5 colonnes, dont les éléments sont des entiers.
Step4: On peut automatiquement créer des matrices particulières, soient remplies avec des zéros, des 1, ou la matrice identité (qui est une matrice carrée par définition.
Step5: Numpy inclut une généralisation de la fonction "range" the Python, mais permettant des valeurs non-entières.
Step6: Opérations élémentaires sur les matrices (arrays) numpy
Step7: Nous considérons maintenant des opérations avec deux matrices.
Step8: Si on essaie de faire de telles opérations sur des matrices de taille différente, cela ne fonctionne pas.
|
974 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import os
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
import time
# make things wide
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
from IPython.display import clear_output, Image, display, HTML
def strip_consts(graph_def, max_const_size=32):
Strip large constant values from graph_def.
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = "<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def=None, width=1200, height=800, max_const_size=32, ungroup_gradients=False):
if not graph_def:
graph_def = tf.get_default_graph().as_graph_def()
Visualize TensorFlow graph.
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
data = str(strip_def)
if ungroup_gradients:
data = data.replace('"gradients/', '"b_')
#print(data)
code =
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
.format(data=repr(data), id='graph'+str(np.random.rand()))
iframe =
<iframe seamless style="width:{}px;height:{}px;border:0" srcdoc="{}"></iframe>
.format(width, height, code.replace('"', '"'))
display(HTML(iframe))
# If this errors out, increment the `export_version` variable, restart the Kernel, and re-run
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("batch_size", 10, "The batch size to train")
flags.DEFINE_integer("epoch_number", 10, "Number of epochs to run trainer")
flags.DEFINE_integer("steps_to_validate", 1,
"Steps to validate and print loss")
flags.DEFINE_string("checkpoint_dir", "./checkpoint/",
"indicates the checkpoint dirctory")
#flags.DEFINE_string("model_path", "./model/", "The export path of the model")
flags.DEFINE_string("model_path", "/root/pipeline/prediction.ml/tensorflow/models/tensorflow_minimal/export/", "The export path of the model")
flags.DEFINE_integer("export_version", 37, "The version number of the model")
# If this errors out, increment the `export_version` variable, restart the Kernel, and re-run
def main():
# Define training data
x = np.ones(FLAGS.batch_size)
y = np.ones(FLAGS.batch_size)
# Define the model
X = tf.placeholder(tf.float32, shape=[None], name="X")
Y = tf.placeholder(tf.float32, shape=[None], name="yhat")
w = tf.Variable(1.0, name="weight")
b = tf.Variable(1.0, name="bias")
loss = tf.square(Y - tf.mul(X, w) - b)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
predict_op = tf.mul(X, w) + b
saver = tf.train.Saver()
checkpoint_dir = FLAGS.checkpoint_dir
checkpoint_file = checkpoint_dir + "/checkpoint.ckpt"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Start the session
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
print("Continue training from the model {}".format(ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
saver_def = saver.as_saver_def()
print(saver_def.filename_tensor_name)
print(saver_def.restore_op_name)
# Start training
start_time = time.time()
for epoch in range(FLAGS.epoch_number):
sess.run(train_op, feed_dict={X: x, Y: y})
# Start validating
if epoch % FLAGS.steps_to_validate == 0:
end_time = time.time()
print("[{}] Epoch: {}".format(end_time - start_time, epoch))
saver.save(sess, checkpoint_file)
tf.train.write_graph(sess.graph_def, checkpoint_dir, 'trained_model.pb', as_text=False)
tf.train.write_graph(sess.graph_def, checkpoint_dir, 'trained_model.txt', as_text=True)
start_time = end_time
# Print model variables
w_value, b_value = sess.run([w, b])
print("The model of w: {}, b: {}".format(w_value, b_value))
# Export the model
print("Exporting trained model to {}".format(FLAGS.model_path))
model_exporter = exporter.Exporter(saver)
model_exporter.init(
sess.graph.as_graph_def(),
named_graph_signatures={
'inputs': exporter.generic_signature({"features": X}),
'outputs': exporter.generic_signature({"prediction": predict_op})
})
model_exporter.export(FLAGS.model_path, tf.constant(FLAGS.export_version), sess)
print('Done exporting!')
if __name__ == "__main__":
main()
show_graph()
!ls -l /root/pipeline/prediction.ml/tensorflow/models/tensorflow_minimal/export
!ls -l /root/pipeline/prediction.ml/tensorflow/models/tensorflow_minimal/export/00000037
!git status
!git add --all /root/pipeline/prediction.ml/tensorflow/models/tensorflow_minimal/export/00000037/
!git status
!git commit -m "updated tensorflow model"
# If this fails with "Permission denied", use terminal within jupyter to manually `git push`
!git push
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
from IPython.display import clear_output, Image, display, HTML
html = '<iframe width=100% height=500px src="http://demo.pipeline.io:8080/admin">'
display(HTML(html))
!kubectl scale --context=awsdemo --replicas=2 rc spark-worker-2-0-1
!kubectl get pod --context=awsdemo
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
from IPython.display import clear_output, Image, display, HTML
html = '<iframe width=100% height=500px src="http://kubernetes-aws.demo.pipeline.io">'
display(HTML(html))
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler, StandardScaler
from pyspark.ml.feature import OneHotEncoder, StringIndexer
from pyspark.ml import Pipeline, PipelineModel
from pyspark.ml.regression import LinearRegression
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
df = spark.read.format("csv") \
.option("inferSchema", "true").option("header", "true") \
.load("s3a://datapalooza/airbnb/airbnb.csv.bz2")
df.registerTempTable("df")
print(df.head())
print(df.count())
df_filtered = df.filter("price >= 50 AND price <= 750 AND bathrooms > 0.0 AND bedrooms is not null")
df_filtered.registerTempTable("df_filtered")
df_final = spark.sql(
select
id,
city,
case when state in('NY', 'CA', 'London', 'Berlin', 'TX' ,'IL', 'OR', 'DC', 'WA')
then state
else 'Other'
end as state,
space,
cast(price as double) as price,
cast(bathrooms as double) as bathrooms,
cast(bedrooms as double) as bedrooms,
room_type,
host_is_super_host,
cancellation_policy,
cast(case when security_deposit is null
then 0.0
else security_deposit
end as double) as security_deposit,
price_per_bedroom,
cast(case when number_of_reviews is null
then 0.0
else number_of_reviews
end as double) as number_of_reviews,
cast(case when extra_people is null
then 0.0
else extra_people
end as double) as extra_people,
instant_bookable,
cast(case when cleaning_fee is null
then 0.0
else cleaning_fee
end as double) as cleaning_fee,
cast(case when review_scores_rating is null
then 80.0
else review_scores_rating
end as double) as review_scores_rating,
cast(case when square_feet is not null and square_feet > 100
then square_feet
when (square_feet is null or square_feet <=100) and (bedrooms is null or bedrooms = 0)
then 350.0
else 380 * bedrooms
end as double) as square_feet
from df_filtered
).persist()
df_final.registerTempTable("df_final")
df_final.select("square_feet", "price", "bedrooms", "bathrooms", "cleaning_fee").describe().show()
print(df_final.count())
print(df_final.schema)
# Most popular cities
spark.sql(
select
state,
count(*) as ct,
avg(price) as avg_price,
max(price) as max_price
from df_final
group by state
order by count(*) desc
).show()
# Most expensive popular cities
spark.sql(
select
city,
count(*) as ct,
avg(price) as avg_price,
max(price) as max_price
from df_final
group by city
order by avg(price) desc
).filter("ct > 25").show()
continuous_features = ["bathrooms", \
"bedrooms", \
"security_deposit", \
"cleaning_fee", \
"extra_people", \
"number_of_reviews", \
"square_feet", \
"review_scores_rating"]
categorical_features = ["room_type", \
"host_is_super_host", \
"cancellation_policy", \
"instant_bookable", \
"state"]
[training_dataset, validation_dataset] = df_final.randomSplit([0.8, 0.2])
continuous_feature_assembler = VectorAssembler(inputCols=continuous_features, outputCol="unscaled_continuous_features")
continuous_feature_scaler = StandardScaler(inputCol="unscaled_continuous_features", outputCol="scaled_continuous_features", \
withStd=True, withMean=False)
categorical_feature_indexers = [StringIndexer(inputCol=x, \
outputCol="{}_index".format(x)) \
for x in categorical_features]
categorical_feature_one_hot_encoders = [OneHotEncoder(inputCol=x.getOutputCol(), \
outputCol="oh_encoder_{}".format(x.getOutputCol() )) \
for x in categorical_feature_indexers]
feature_cols_lr = [x.getOutputCol() \
for x in categorical_feature_one_hot_encoders]
feature_cols_lr.append("scaled_continuous_features")
feature_assembler_lr = VectorAssembler(inputCols=feature_cols_lr, \
outputCol="features_lr")
linear_regression = LinearRegression(featuresCol="features_lr", \
labelCol="price", \
predictionCol="price_prediction", \
maxIter=10, \
regParam=0.3, \
elasticNetParam=0.8)
estimators_lr = \
[continuous_feature_assembler, continuous_feature_scaler] \
+ categorical_feature_indexers + categorical_feature_one_hot_encoders \
+ [feature_assembler_lr] + [linear_regression]
pipeline = Pipeline(stages=estimators_lr)
pipeline_model = pipeline.fit(training_dataset)
print(pipeline_model)
from jpmml import toPMMLBytes
pmmlBytes = toPMMLBytes(spark, training_dataset, pipeline_model)
print(pmmlBytes.decode("utf-8"))
import urllib.request
update_url = 'http://prediction-pmml-aws.demo.pipeline.io/update-pmml/pmml_airbnb'
update_headers = {}
update_headers['Content-type'] = 'application/xml'
req = urllib.request.Request(update_url, \
headers=update_headers, \
data=pmmlBytes)
resp = urllib.request.urlopen(req)
print(resp.status) # Should return Http Status 200
import urllib.request
update_url = 'http://prediction-pmml-gcp.demo.pipeline.io/update-pmml/pmml_airbnb'
update_headers = {}
update_headers['Content-type'] = 'application/xml'
req = urllib.request.Request(update_url, \
headers=update_headers, \
data=pmmlBytes)
resp = urllib.request.urlopen(req)
print(resp.status) # Should return Http Status 200
import urllib.request
update_url = 'http://prediction-pmml-azure.demo.pipeline.io/update-pmml/pmml_airbnb'
update_headers = {}
update_headers['Content-type'] = 'application/xml'
req = urllib.request.Request(update_url, \
headers=update_headers, \
data=pmmlBytes)
resp = urllib.request.urlopen(req)
print(resp.status) # Should return Http Status 200
import urllib.parse
import json
evaluate_url = 'http://prediction-pmml-aws.demo.pipeline.io/evaluate-pmml/pmml_airbnb'
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"bathrooms":2.0, \
"bedrooms":2.0, \
"security_deposit":175.00, \
"cleaning_fee":25.0, \
"extra_people":1.0, \
"number_of_reviews": 2.0, \
"square_feet": 250.0, \
"review_scores_rating": 2.0, \
"room_type": "Entire home/apt", \
"host_is_super_host": "0.0", \
"cancellation_policy": "flexible", \
"instant_bookable": "1.0", \
"state": "CA"}'
encoded_input_params = input_params.encode('utf-8')
req = urllib.request.Request(evaluate_url, \
headers=evaluate_headers, \
data=encoded_input_params)
resp = urllib.request.urlopen(req)
print(resp.read())
import urllib.parse
import json
evaluate_url = 'http://prediction-pmml-gcp.demo.pipeline.io/evaluate-pmml/pmml_airbnb'
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"bathrooms":2.0, \
"bedrooms":2.0, \
"security_deposit":175.00, \
"cleaning_fee":25.0, \
"extra_people":1.0, \
"number_of_reviews": 2.0, \
"square_feet": 250.0, \
"review_scores_rating": 2.0, \
"room_type": "Entire home/apt", \
"host_is_super_host": "0.0", \
"cancellation_policy": "flexible", \
"instant_bookable": "1.0", \
"state": "CA"}'
encoded_input_params = input_params.encode('utf-8')
req = urllib.request.Request(evaluate_url, \
headers=evaluate_headers, \
data=encoded_input_params)
resp = urllib.request.urlopen(req)
print(resp.read())
import urllib.parse
import json
evaluate_url = 'http://prediction-pmml-azure.demo.pipeline.io/evaluate-pmml/pmml_airbnb'
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"bathrooms":2.0, \
"bedrooms":2.0, \
"security_deposit":175.00, \
"cleaning_fee":25.0, \
"extra_people":1.0, \
"number_of_reviews": 2.0, \
"square_feet": 250.0, \
"review_scores_rating": 2.0, \
"room_type": "Entire home/apt", \
"host_is_super_host": "0.0", \
"cancellation_policy": "flexible", \
"instant_bookable": "1.0", \
"state": "CA"}'
encoded_input_params = input_params.encode('utf-8')
req = urllib.request.Request(evaluate_url, \
headers=evaluate_headers, \
data=encoded_input_params)
resp = urllib.request.urlopen(req)
print(resp.read())
from urllib import request
sourceBytes = ' \n\
private String str; \n\
\n\
public void initialize(Map<String, Object> args) { \n\
} \n\
\n\
public Object predict(Map<String, Object> inputs) { \n\
String id = (String)inputs.get("id"); \n\
\n\
return id.equals("21619"); \n\
} \n\
'.encode('utf-8')
from urllib import request
name = 'codegen_equals'
update_url = 'http://prediction-codegen-aws.demo.pipeline.io/update-codegen/%s/' % name
update_headers = {}
update_headers['Content-type'] = 'text/plain'
req = request.Request("%s" % update_url, headers=update_headers, data=sourceBytes)
resp = request.urlopen(req)
generated_code = resp.read()
print(generated_code.decode('utf-8'))
from urllib import request
name = 'codegen_equals'
update_url = 'http://prediction-codegen-gcp.demo.pipeline.io/update-codegen/%s/' % name
update_headers = {}
update_headers['Content-type'] = 'text/plain'
req = request.Request("%s" % update_url, headers=update_headers, data=sourceBytes)
resp = request.urlopen(req)
generated_code = resp.read()
print(generated_code.decode('utf-8'))
from urllib import request
name = 'codegen_equals'
update_url = 'http://prediction-codegen-azure.demo.pipeline.io/update-codegen/%s/' % name
update_headers = {}
update_headers['Content-type'] = 'text/plain'
req = request.Request("%s" % update_url, headers=update_headers, data=sourceBytes)
resp = request.urlopen(req)
generated_code = resp.read()
print(generated_code.decode('utf-8'))
from urllib import request
name = 'codegen_equals'
evaluate_url = 'http://prediction-codegen-aws.demo.pipeline.io/evaluate-codegen/%s' % name
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"id":"21618"}'
encoded_input_params = input_params.encode('utf-8')
req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params)
resp = request.urlopen(req)
print(resp.read()) # Should return true
from urllib import request
name = 'codegen_equals'
evaluate_url = 'http://prediction-codegen-gcp.demo.pipeline.io/evaluate-codegen/%s' % name
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"id":"21619"}'
encoded_input_params = input_params.encode('utf-8')
req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params)
resp = request.urlopen(req)
print(resp.read()) # Should return false
from urllib import request
name = 'codegen_equals'
evaluate_url = 'http://prediction-codegen-azure.demo.pipeline.io/evaluate-codegen/%s' % name
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"id":"21619"}'
encoded_input_params = input_params.encode('utf-8')
req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params)
resp = request.urlopen(req)
print(resp.read()) # Should return false
from urllib import request
sourceBytes = ' \n\
public Map<String, Object> data = new HashMap<String, Object>(); \n\
\n\
public void initialize(Map<String, Object> args) { \n\
data.put("url", "http://demo.pipeline.io:9040/prediction/"); \n\
} \n\
\n\
public Object predict(Map<String, Object> inputs) { \n\
try { \n\
String userId = (String)inputs.get("userId"); \n\
String itemId = (String)inputs.get("itemId"); \n\
String url = data.get("url") + "/" + userId + "/" + itemId; \n\
\n\
return org.apache.http.client.fluent.Request \n\
.Get(url) \n\
.execute() \n\
.returnContent(); \n\
\n\
} catch(Exception exc) { \n\
System.out.println(exc); \n\
throw exc; \n\
} \n\
} \n\
'.encode('utf-8')
from urllib import request
name = 'codegen_httpclient'
# Note: Must have trailing '/'
update_url = 'http://prediction-codegen-aws.demo.pipeline.io/update-codegen/%s/' % name
update_headers = {}
update_headers['Content-type'] = 'text/plain'
req = request.Request("%s" % update_url, headers=update_headers, data=sourceBytes)
resp = request.urlopen(req)
print(resp.status) # Should return Http Status 200
generated_code = resp.read()
print(generated_code.decode('utf-8'))
from urllib import request
name = 'codegen_httpclient'
# Note: Must have trailing '/'
update_url = 'http://prediction-codegen-gcp.demo.pipeline.io/update-codegen/%s/' % name
update_headers = {}
update_headers['Content-type'] = 'text/plain'
req = request.Request("%s" % update_url, headers=update_headers, data=sourceBytes)
resp = request.urlopen(req)
print(resp.status) # Should return Http Status 200
generated_code = resp.read()
print(generated_code.decode('utf-8'))
from urllib import request
name = 'codegen_httpclient'
# Note: Must have trailing '/'
update_url = 'http://prediction-codegen-azure.demo.pipeline.io/update-codegen/%s/' % name
update_headers = {}
update_headers['Content-type'] = 'text/plain'
req = request.Request("%s" % update_url, headers=update_headers, data=sourceBytes)
resp = request.urlopen(req)
print(resp.status) # Should return Http Status 200
generated_code = resp.read()
print(generated_code.decode('utf-8'))
from urllib import request
name = 'codegen_httpclient'
evaluate_url = 'http://prediction-codegen-aws.demo.pipeline.io/evaluate-codegen/%s' % name
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"userId":"21619", "itemId":"10006"}'
encoded_input_params = input_params.encode('utf-8')
req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params)
resp = request.urlopen(req)
print(resp.read()) # Should return float
from urllib import request
name = 'codegen_httpclient'
evaluate_url = 'http://prediction-codegen-gcp.demo.pipeline.io/evaluate-codegen/%s' % name
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"userId":"21619", "itemId":"10006"}'
encoded_input_params = input_params.encode('utf-8')
req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params)
resp = request.urlopen(req)
print(resp.read()) # Should return float
from urllib import request
name = 'codegen_httpclient'
evaluate_url = 'http://prediction-codegen-azure.demo.pipeline.io/evaluate-codegen/%s' % name
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"userId":"21619", "itemId":"10006"}'
encoded_input_params = input_params.encode('utf-8')
req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params)
resp = request.urlopen(req)
print(resp.read()) # Should return float
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
from IPython.display import clear_output, Image, display, HTML
html = '<iframe width=100% height=500px src="http://hystrix.demo.pipeline.io/hystrix-dashboard/monitor/monitor.html?streams=%5B%7B%22name%22%3A%22Model%20Servers%20AWS%22%2C%22stream%22%3A%22http%3A%2F%2Fturbine-aws.demo.pipeline.io%2Fturbine.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%2C%7B%22name%22%3A%22Model%20Servers%20GCP%22%2C%22stream%22%3A%22http%3A%2F%2Fturbine-gcp.demo.pipeline.io%2Fturbine.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%2C%7B%22name%22%3A%22Model%20Servers%20Azure%22%2C%22stream%22%3A%22http%3A%2F%2Fturbine-azure.demo.pipeline.io%2Fturbine.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%5D">'
display(HTML(html))
# Spark ML - PMML - Airbnb
!kubectl create --context=awsdemo -f /root/pipeline/loadtest.ml/loadtest-aws-airbnb-rc.yaml
!kubectl create --context=awsdemo -f /root/pipeline/loadtest.ml/loadtest-gcp-airbnb-rc.yaml
# Codegen - Java - Simple
!kubectl create --context=awsdemo -f /root/pipeline/loadtest.ml/loadtest-aws-equals-rc.yaml
!kubectl create --context=awsdemo -f /root/pipeline/loadtest.ml/loadtest-gcp-equals-rc.yaml
# Tensorflow AI - Tensorflow Serving - Simple
!kubectl create --context=awsdemo -f /root/pipeline/loadtest.ml/loadtest-aws-minimal-rc.yaml
!kubectl create --context=awsdemo -f /root/pipeline/loadtest.ml/loadtest-gcp-minimal-rc.yaml
!kubectl delete --context=awsdemo rc loadtest-aws-airbnb
!kubectl delete --context=awsdemo rc loadtest-gcp-airbnb
!kubectl delete --context=awsdemo rc loadtest-aws-equals
!kubectl delete --context=awsdemo rc loadtest-gcp-equals
!kubectl delete --context=awsdemo rc loadtest-aws-minimal
!kubectl delete --context=awsdemo rc loadtest-gcp-minimal
!kubectl rolling-update prediction-tensorflow --context=awsdemo --image-pull-policy=Always --image=fluxcapacitor/prediction-tensorflow
!kubectl get pod --context=awsdemo
!kubectl rolling-update prediction-tensorflow --context=gcpdemo --image-pull-policy=Always --image=fluxcapacitor/prediction-tensorflow
!kubectl get pod --context=gcpdemo
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: Where Am I?
Step5: Commit and Deploy New Tensorflow AI Model
Step6: Airflow Workflow Deploys New Model through Github Post-Commit Webhook to Triggers
Step7: Train and Deploy Spark ML Model (Airbnb Model, Mutable Deploy)
Step8: Weavescope Kubernetes AWS Cluster Visualization
Step9: Generate PMML from Spark ML Model
Step10: Step 0
Step14: Step 1
Step15: Step 2
Step16: Step 3
Step17: Step 4
Step18: Step 5
Step19: Step 6
Step20: Step 7
Step21: Step 8
Step22: Push PMML to Live, Running Spark ML Model Server (Mutable)
Step23: Deploy Java-based Model (Simple Model, Mutable Deploy)
Step24: Deploy Java Model (HttpClient Model, Mutable Deploy)
Step25: Load Test and Compare Cloud Providers (AWS, Google, Azure)
Step26: Start Load Tests
Step27: End Load Tests
Step28: Rolling Deploy Tensorflow AI (Simple Model, Immutable Deploy)
|
975 | <ASSISTANT_TASK:>
Python Code:
# import essentia in streaming mode
import essentia
import essentia.streaming as es
# import matplotlib for plotting
import matplotlib.pyplot as plt
import numpy as np
# algorithm parameters
params = { 'frameSize': 2048, 'hopSize': 512, 'startFromZero': False, 'sampleRate': 44100, \
'maxnSines': 100,'magnitudeThreshold': -74,'minSineDur': 0.02,'freqDevOffset': 10, \
'freqDevSlope': 0.001}
inputFilename = 'singing-female.wav'
outputFilename = 'singing-female-sindemodel.wav'
# create an audio loader and import audio file
out = np.array(0)
loader = es.MonoLoader(filename = inputFilename, sampleRate = 44100)
pool = essentia.Pool()
# algorithm instantation
fcut = es.FrameCutter(frameSize = params['frameSize'], hopSize = params['hopSize'], startFromZero = False);
w = es.Windowing(type = "blackmanharris92");
fft = es.FFT(size = params['frameSize']);
smanal = es.SineModelAnal(sampleRate = params['sampleRate'], maxnSines = params['maxnSines'], magnitudeThreshold = params['magnitudeThreshold'], freqDevOffset = params['freqDevOffset'], freqDevSlope = params['freqDevSlope'])
smsyn = es.SineModelSynth(sampleRate = params['sampleRate'], fftSize = params['frameSize'], hopSize = params['hopSize'])
ifft = es.IFFT(size = params['frameSize']);
overl = es.OverlapAdd (frameSize = params['frameSize'], hopSize = params['hopSize'], gain = 1./params['frameSize'] );
awrite = es.MonoWriter (filename = outputFilename, sampleRate = params['sampleRate']);
# analysis
loader.audio >> fcut.signal
fcut.frame >> w.frame
w.frame >> fft.frame
fft.fft >> smanal.fft
# synthesis
smanal.magnitudes >> smsyn.magnitudes
smanal.frequencies >> smsyn.frequencies
smanal.phases >> smsyn.phases
smsyn.fft >> ifft.fft
ifft.frame >> overl.frame
overl.signal >> awrite.audio
overl.signal >> (pool, 'audio')
essentia.run(loader)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: After importing Essentia library, let's import other numerical and plotting tools
Step2: Define the parameters of the STFT workflow
Step3: Specify input and output audio filenames
Step4: Define algorithm chain for frame-by-frame process
Step5: Now we set the algorithm network and store the processed audio samples in the output file
Step6: Finally we run the process that will store an output file in a WAV file
|
976 | <ASSISTANT_TASK:>
Python Code:
from IPython.display import HTML
HTML('<iframe width="846" height="476" src="https://www.youtube.com/embed/KdSqUjFWzdY" frameborder="0" allowfullscreen></iframe>')
from IPython.display import HTML
HTML('<iframe width="960" height="540" src="https://www.youtube.com/embed/gYiwszKaCoQ" frameborder="0" allowfullscreen></iframe>')
from IPython.display import HTML
HTML('<iframe width="798" height="449" src="https://www.youtube.com/embed/gI4HN0JhPmo" frameborder="0" allowfullscreen></iframe>')
from IPython.display import HTML
HTML('<iframe width="798" height="449" src="https://www.youtube.com/embed/onseMon9zqA" frameborder="0" allowfullscreen></iframe>')
from IPython.display import HTML
HTML('<iframe width="798" height="449" src="https://www.youtube.com/embed/_q6098sNqpo" frameborder="0" allowfullscreen></iframe>')
from IPython.display import HTML
HTML('<iframe width="798" height="449" src="https://www.youtube.com/embed/mPYxFC7DI28" frameborder="0" allowfullscreen></iframe>')
from IPython.display import HTML
HTML('<iframe width="798" height="449" src="https://www.youtube.com/embed/bkhuEG0D2HM" frameborder="0" allowfullscreen></iframe>')
# Recap
from IPython.display import HTML
HTML('<iframe width="798" height="449" src="https://www.youtube.com/embed/Pl68U2iGtyI" frameborder="0" allowfullscreen></iframe>')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basics of MapReduce
Step2: Quiz
Step3: Mapper
Step4: Reducer
Step5: Quiz
|
977 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
% matplotlib inline
from sklearn.datasets import load_boston
boston = load_boston()
from sklearn.model_selection import train_test_split
X, y = boston.data, boston.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=0)
print(boston.DESCR)
fig, axes = plt.subplots(3, 5, figsize=(20, 10))
for i, ax in enumerate(axes.ravel()):
if i > 12:
ax.set_visible(False)
continue
ax.plot(X[:, i], y, 'o', alpha=.5)
ax.set_title("{}: {}".format(i, boston.feature_names[i]))
ax.set_ylabel("MEDV")
plt.boxplot(X)
plt.xticks(np.arange(1, X.shape[1] + 1),
boston.feature_names, rotation=30, ha="right");
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
from sklearn.neighbors import KNeighborsRegressor
knr = KNeighborsRegressor().fit(X_train, y_train)
knr.score(X_train, y_train)
knr.score(X_test, y_test)
knr_scaled = KNeighborsRegressor().fit(X_train_scaled, y_train)
knr_scaled.fit(X_train_scaled, y_train)
knr_scaled.score(X_train_scaled, y_train)
X_test_scaled = scaler.transform(X_test)
knr_scaled.score(X_test_scaled, y_test)
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=100, random_state=0)
rf.fit(X_train, y_train)
rf.score(X_test, y_test)
rf_scaled = RandomForestRegressor(n_estimators=100, random_state=0)
rf_scaled.fit(X_train_scaled, y_train)
rf_scaled.score(X_test_scaled, y_test)
import pandas as pd
df = pd.DataFrame({'salary': [103, 89, 142, 54, 63, 219],
'boro': ['Manhattan', 'Queens', 'Manhattan', 'Brooklyn', 'Brooklyn', 'Bronx']})
df
pd.get_dummies(df)
from sklearn.compose import make_column_transformer, ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
categorical = df.dtypes == object
ct = make_column_transformer((categorical, OneHotEncoder()),
(~categorical, StandardScaler()))
ct.fit_transform(df)
data = pd.read_csv("data/adult.csv", index_col=0)
# %load solutions/load_adult.py
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Categorical Variables
Step2: Exercises
|
978 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from eniric.atmosphere import Atmosphere
from eniric.legacy import mask_clumping, RVprec_calc_masked
from scripts.phoenix_precision import convolve_and_resample
from eniric.snr_normalization import snr_constant_band
from eniric.precision import pixel_weights, rv_precision
from eniric.utilities import band_limits, load_aces_spectrum, wav_selector
wav_, flux_ = load_aces_spectrum([3900, 4.5, 0, 0])
# Small section in K bands to experiment with
wav, flux = wav_selector(wav_, flux_, 2.1025, 2.1046)
# Telluric mask
atm_ = Atmosphere.from_band("K", bary=True)
atm = atm_.at(wav)
mask = atm.mask
# Clumping method
wclump, fclump = mask_clumping(wav, flux, mask)
print("# Number of clumps = ", len(wclump))
# print(wclump, fclump)
print(len(wclump))
wis_0 = pixel_weights(wav, flux, grad=False)
wis_1 = pixel_weights(wav, flux, grad=True)
wis_0 *= mask[:-1]
wis_1 *= mask
wis_0[wis_0 == 0] = np.nan
wis_1[wis_1 == 0] = np.nan
plt_setting = {"figsize": (15, 6)}
plt.figure(**plt_setting)
plt.plot(wav, flux / np.max(flux), label="Star")
plt.plot(atm.wl, atm.transmission, label="Telluric")
plt.plot(atm.wl, atm.mask, "--", label="Mask")
plt.axhline(0.98)
plt.legend()
plt.figure(**plt_setting)
plt.plot(wav[:-1], wis_0, "bs-", label="Mask Grad False")
plt.plot(wav, wis_1, "ko--", label="Mask Grad true")
w, f = (wclump[0], fclump[0])
wis1 = pixel_weights(w, f, grad=True)
wis0 = pixel_weights(w, f, grad=False)
plt.plot(w[:-1], wis0 * 1.05, "g+:", label="Clump, Grad False")
plt.plot(w, wis1 * 1.05, "r.-.", label="Clump, Grad True")
plt.legend()
plt.xlim(wclump[0][0] * 0.99999, wclump[0][-1] * 1.00001)
plt.show()
plt.figure(**plt_setting)
plt.plot(wav[:-1], wis_0, "bs-", label="grad False")
plt.plot(wav, wis_1, "ko--", label="grad true")
w, f = (wclump[1], fclump[1])
wis1 = pixel_weights(w, f, grad=True)
wis0 = pixel_weights(w, f, grad=False)
plt.plot(w[:-1], wis0 * 1.05, "g+:", label="Clump grad False")
plt.plot(w, wis1 * 1.05, "r.-.", label="Clump grad True")
plt.legend()
plt.xlim(wclump[-1][0] * 0.999999, wclump[-1][-1] * 1.00001)
plt.show()
# Old and new indicate the split method.
print("Old with gradient {:0.06f}".format(RVprec_calc_masked(wav, flux, atm.mask, grad=True)))
print("New with gradient {:0.06f}".format(rv_precision(wav, flux, atm.mask, grad=True)))
print("Old without finite diff{:0.06f}".format(RVprec_calc_masked(wav, flux, atm.mask, grad=False)))
print("New with finite diff{:0.06f}".format(rv_precision(wav, flux, atm.mask, grad=False)))
# Explore relative difference of different bands
wav_, flux_ = load_aces_spectrum([3900, 4.5, 0, 0])
wav, flux = wav_selector(wav_, flux_, 0.7, 2.5)
table = []
table.append("Band, Cond#1, Split, Masked, ratio, Cond#1, Split, Masked, ratio")
table.append("Grad, False , True ")
# Get J band SNR normalization value
wav_j, flux_j = convolve_and_resample(
wav, flux, vsini=1, R=100000, band="J", sampling=3
)
snr_norm = snr_constant_band(wav_j, flux_j, snr=100, band="J")
for band in ["Z", "Y", "J", "H", "K"]:
atm = Atmosphere.from_band(band, bary=True)
w, f = convolve_and_resample(wav, flux, vsini=1, R=100000, band=band, sampling=3)
f /= snr_norm
atm = atm.at(w)
a = RVprec_calc_masked(w, f, atm.mask, grad=True)
b = RVprec_calc_masked(w, f, atm.mask, grad=False)
c = rv_precision(w, f, atm.mask, grad=True)
d = rv_precision(w, f, atm.mask, grad=False)
e = rv_precision(w, f, grad=True)
f = rv_precision(w, f, grad=False)
false_ratio = (d - b) / b
true_ratio = (c - a) / a
table.append(
"{0:5}, {1:4.02f}, {2:6.02f}, {3:6.02f}, {4:5.04f}, {5:6.02f}, {6:6.02f}, {7:6.02f}, {8:5.04f}".format(
band,
f.value,
b.value,
d.value,
false_ratio,
e.value,
a.value,
c.value,
true_ratio,
)
)
for line in table:
print(line)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualize the Pixel Weights
Step2: Ffrom these two examples the calculations with the same gradients produces the same pixel weights The clumped version produces less weight though.
Step3: Differences between versions with same gradient is at the 4th sf. These are not the correct scale, this will be addressed in the next section.
|
979 | <ASSISTANT_TASK:>
Python Code:
# standard imports
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import os
import time
import seaborn as sns
# import package
import infer_structcol as ifs
nwalkers = 14 # number of walkers to step through different parameters
nsteps = 10 # number of steps for each walker to take
particle_index = 1.59 # refractive index of colloidal particles
matrix_index = 1 # refractive index of matrix material surrounding particles
wavelengths = [450,500,550,600,650,700,750,800] # wavelengths of interest where spectrum should be calculated (in nm)
directory = os.path.join(os.getcwd(),
'infer_structcol',
'tests','test_data',
'experimental_data') # path where the data is found
refl_reference_file = 'R_ref.txt' # name of reference file for reflectance data
refl_dark_file = 'R_dark.txt' # name of dark file for reflectance data
trans_reference_file = 'T_ref.txt' # name of reference file for transmittance data
trans_dark_file = 'T_dark.txt' # name of dark file for transmittance data
# User's best guess for the parameters to be inferred: theta_guess should be a dictionary
# with volume fraction phi, particle radius, and thickness. Radius should be in nm, thickness should be in um
theta_guess = {'phi':0.5, 'radius':119, 'thickness':100}
# Range of values that contain the parameters according to the user's knowledge of the system.
# theta_range should be a dictionary with the min and max values of the volume fraction, radius, and thickness
theta_range = {'min_phi':0.35, 'max_phi':0.6, 'min_radius':80, 'max_radius': 150,
'min_thickness':70, 'max_thickness':300}
# Using raw data in format of instrument 1)
# create a converted data file for transmittance and reflectance spectra in the directory
ifs.convert_data(wavelengths, refl_reference_file, refl_dark_file, os.path.join(directory, 'reflection'))
ifs.convert_data(wavelengths, trans_reference_file, trans_dark_file, os.path.join(directory, 'transmission'))
# load the converted data file as a Spectrum object
spect = ifs.load_spectrum(refl_filepath = os.path.join(directory, 'reflection', 'converted', '0_data_file.txt'),
trans_filepath = os.path.join(directory, 'transmission', 'converted', '0_data_file.txt'))
samp = ifs.Sample(spect.wavelength, particle_index, matrix_index)
# Calculate the standard deviation of the multiple scattering calculations based on number of trajectories and events
ntrajectories = 500
nevents = 200
nruns = 100 # number of multiple scattering calculations for computing the standard deviation
sigma = ifs.calc_sigma(theta_guess['phi'], theta_guess['radius'], theta_guess['thickness'], samp,
ntrajectories, nevents, nruns)
print('Reflectance standard deviations: ' + str(sigma[0]))
print('Transmittance standard deviations: ' + str(sigma[1]))
# time the calculation, if desired
t0 = time.time()
# run the markov chain monte carlo calculation to step through different values of
# parameters and infer the most likely values
walkers = ifs.run_mcmc(spect, samp, nwalkers, nsteps, theta_guess=theta_guess, theta_range=theta_range,
ntrajectories=ntrajectories, nevents=nevents, seed=None)
# print the time the calculation took
print(time.time()-t0)
vf = walkers.chain[:,:,0]
radius = walkers.chain[:,:,1]
thickness = walkers.chain[:,:,2]
refl_l0 = walkers.chain[:,:,3]
refl_l1 = walkers.chain[:,:,4]
trans_l0 = walkers.chain[:,:,5]
trans_l1 = walkers.chain[:,:,6]
fig, (ax_vf, ax_radius, ax_thickness,
ax_refl_l1, ax_refl_l0, ax_trans_l0, ax_trans_l1, ax_lnprob) = plt.subplots(8, figsize=(7,16))
ax_vf.set(ylabel='volume fraction')
ax_radius.set(ylabel='radius')
ax_thickness.set(ylabel='thickness')
ax_refl_l0.set(ylabel='reflectance l_0')
ax_refl_l1.set(ylabel='reflectance l_1')
ax_trans_l0.set(ylabel='transmittance l_0')
ax_trans_l1.set(ylabel='transmittance l_1')
ax_lnprob.set(ylabel='ln(p)')
ax_lnprob.set(xlabel='step number')
ax_lnprob.set(ylim=[-100,25])
for i in range(nwalkers):
sns.tsplot(vf[i,:], ax=ax_vf)
sns.tsplot(radius[i,:], ax=ax_radius)
sns.tsplot(thickness[i,:], ax=ax_thickness)
sns.tsplot(refl_l0[i,:], ax=ax_refl_l0)
sns.tsplot(refl_l1[i,:], ax=ax_refl_l1)
sns.tsplot(trans_l0[i,:], ax=ax_trans_l0)
sns.tsplot(trans_l1[i,:], ax=ax_trans_l1)
sns.tsplot(walkers.lnprobability[i,:], ax=ax_lnprob)
# trim off the burn in time, which we observe from the plots above. The plots do not appear to have burned in for the
# 10 steps used in this tutorial, but let's set the burn in index to 5 to illustrate how to find the inferred volume
# fraction.
trim_index = 5
vf_trimmed = np.ndarray.flatten(vf[:,trim_index:])
radius_trimmed = np.ndarray.flatten(radius[:,trim_index:])
thickness_trimmed = np.ndarray.flatten(thickness[:,trim_index:])
# find the median of the volume fractions of the walkers
vf_inferred = np.median(vf_trimmed)
radius_inferred = np.median(radius_trimmed)
thickness_inferred = np.median(thickness_trimmed)
print("The inferred volume fraction is {:.3f} + {:.3f} - {:.3f}".format(vf_inferred,
np.percentile(vf_trimmed,84)-vf_inferred,
vf_inferred - np.percentile(vf_trimmed,16)))
print("The inferred particle radius is {:.3f} + {:.3f} - {:.3f}".format(radius_inferred,
np.percentile(radius_trimmed,84)-radius_inferred,
radius_inferred - np.percentile(radius_trimmed,16)))
print("The inferred thickness is {:.3f} + {:.3f} - {:.3f}".format(thickness_inferred,
np.percentile(thickness_trimmed,84)-thickness_inferred,
thickness_inferred - np.percentile(thickness_trimmed,16)))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Infer parameters
Step3: Define sample object
Step4: Calculate the uncertainties of the multiple scattering calculations
Step5: Perform inference calculation
Step6: Plot the traces
Step7: Estimate the most likely volume fraction, particle radius, and sample thickness
|
980 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import sklearn
import sklearn.ensemble
from sklearn.metrics import silhouette_score
from sklearn.cluster import KMeans
import csv
path = '../../datas/nlp_results/'
voting_df = pd.read_csv(path+'voting_with_topics.csv')
print('Entries in the DataFrame',voting_df.shape)
#Dropping the useless column
voting_df = voting_df.drop('Unnamed: 0',1)
#Putting numerical values into the columns that should have numerical values
#print(voting_df.columns.values)
num_cols = ['Decision', ' armée', ' asile / immigration', ' assurances', ' budget', ' dunno', ' entreprise/ finance',
' environnement', ' famille / enfants', ' imposition', ' politique internationale', ' retraite ']
voting_df[num_cols] = voting_df[num_cols].apply(pd.to_numeric)
#Inserting the full name at the second position
voting_df.insert(2,'Name', voting_df['FirstName'] + ' ' + voting_df['LastName'])
voting_df = voting_df.drop_duplicates(['Name'], keep = 'last')
voting_df = voting_df.set_index(['Name'])
voting_df.head(3)
profileMatrixFile = 'profileMatrix.csv'
profileMatrix = pd.read_csv(profileMatrixFile, index_col = 0)
profileArray = profileMatrix.values
print(profileArray.shape)
profileMatrix.head()
distanceMatrixFile = 'distanceMatrix.csv'
distances = pd.read_csv(distanceMatrixFile, index_col = 0)
distances = distances.replace(-0.001, 0)
distancesArray = distances.values
print(distancesArray.shape)
distances.head()
k = 4 # number of nearest neighbours that we take into account in the adjacency matrix
for i in distances:
d = distances.loc[i]
np.sort(d)
threshold = d[k-1]
for j in distances:
if distances.loc[i][j] > threshold:
distances.loc[i][j] = 0
else:
distances.loc[i][j] = 1
distances.head()
nbClust = 4
clusterDist = sklearn.cluster.spectral_clustering(affinity = distances.values, n_clusters = nbClust)
clusterDist
ratio_df = pd.DataFrame(index = voting_df.ParlGroupName.unique())
ratio_df['ratio'] = 0
np.array(ratio_df.index)
def ratioPartite(cluster, clusterDist):
# Compute the partites distribution for all people within this cluster
people = distances.index[clusterDist == cluster]
size = len(people)
ratio_df = pd.DataFrame(index = voting_df.ParlGroupName.unique())
ratio_df['ratio'] = 1.0
for group in np.array(ratio_df.index):
print(group)
peopleGroup = [p for p in people[voting_df.loc[people].ParlGroupName == group]]
print(len(peopleGroup) / float(size))
ratio_df.set_value(group, 'ratio', len(peopleGroup) / float(size))
return ratio_df
ratio_df = pd.DataFrame(index = voting_df.ParlGroupName.unique(), columns = range(nbClust))
ratio_df[0] = range(8)
ratio_df
ratio_df = pd.DataFrame(index = voting_df.ParlGroupName.unique(), columns = range(nbClust))
for cluster in range(nbClust):
ratio = ratioPartite(cluster, clusterDist)
ratio_df[cluster] = ratio.values
ratio_df
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Collect the data
Step2: Build adjacency matrix
Step3: Spectral Clustering
Step4: Analysis of the clustering
|
981 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cmcc', 'cmcc-cm2-hr5', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
982 | <ASSISTANT_TASK:>
Python Code:
%%writefile plotting.py
from matplotlib import pyplot
from numpy import arange
import bisect
def scatterplot(x,y):
pyplot.plot(x,y,'b.')
pyplot.xlim(min(x)-1,max(x)+1)
pyplot.ylim(min(y)-1,max(y)+1)
pyplot.show()
def barplot(labels,data):
pos=arange(len(data))
pyplot.xticks(pos+0.4,labels)
pyplot.bar(pos,data)
pyplot.show()
def histplot(data,bins=None,nbins=5):
minx,maxx=min(data),max(data)
space=(maxx-minx)/float(nbins)
if not bins: bins=arange(minx,maxx,space)
binned=[bisect.bisect(bins,x) for x in data]
l=['%.1f'%x for x in list(bins)+[maxx]] if space<1 else [str(int(x)) for x in list(bins)+[maxx]]
displab=[x+'-'+y for x,y in zip(l[:-1],l[1:])]
barplot(displab,[binned.count(x+1) for x in range(len(bins))])
def barchart(x,y,numbins=5):
datarange=max(x)-min(x)
bin_width=float(datarange)/numbins
pos=min(x)
bins=[0 for i in range(numbins+1)]
for i in range(numbins):
bins[i]=pos
pos+=bin_width
bins[numbins]=max(x)+1
binsum=[0 for i in range(numbins)]
bincount=[0 for i in range(numbins)]
binaverage=[0 for i in range(numbins)]
for i in range(numbins):
for j in range(len(x)):
if x[j]>=bins[i] and x[j]<bins[i+1]:
bincount[i]+=1
binsum[i]+=y[j]
for i in range(numbins):
binaverage[i]=float(binsum[i])/bincount[i]
barplot(range(numbins),binaverage)
def piechart(labels,data):
fig=pyplot.figure(figsize=(7,7))
pyplot.pie(data,labels=labels,autopct='%1.2f%%')
pyplot.show()
%matplotlib inline
from plotting import *
barchart([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], 5)
#Plot a histogram of Height using the histplot function
from plotting import *
Height=[65.78, 71.52, 69.4, 68.22, 67.79, 68.7, 69.8, 70.01, 67.9, 66.78,
66.49, 67.62, 68.3, 67.12, 68.28, 71.09, 66.46, 68.65, 71.23, 67.13, 67.83,
68.88, 63.48, 68.42, 67.63, 67.21, 70.84, 67.49, 66.53, 65.44, 69.52, 65.81,
67.82, 70.6, 71.8, 69.21, 66.8, 67.66, 67.81, 64.05, 68.57, 65.18, 69.66, 67.97,
65.98, 68.67, 66.88, 67.7, 69.82, 69.09]
Weight=[112.99, 136.49, 153.03, 142.34, 144.3, 123.3, 141.49, 136.46,
112.37, 120.67, 127.45, 114.14, 125.61, 122.46, 116.09, 140.0, 129.5, 142.97,
137.9, 124.04, 141.28, 143.54, 97.9, 129.5, 141.85, 129.72, 142.42, 131.55,
108.33, 113.89, 103.3, 120.75, 125.79, 136.22, 140.1, 128.75, 141.8, 121.23,
131.35, 106.71, 124.36, 124.86, 139.67, 137.37, 106.45, 128.76, 145.68, 116.82,
143.62, 134.93]
#Insert your code on the next line
histplot(Height)
histplot(Weight)
scatterplot(Height, Weight)
barchart(Height, Weight)
# %load wages.py
#Write a line of code to print a scatterplot of Age on the horizontal axis
#against Wage on the vertical axis
from plotting import *
Age=[25, 26, 33, 29, 27, 21, 26, 35, 21, 37, 21, 38, 18, 19, 36, 30, 29, 24, 24, 36, 36, 27, 33, 23, 21, 26, 27, 27, 24, 26, 25, 24, 22, 25, 40, 39, 19, 31, 33, 30, 33, 27, 40, 32, 31, 35, 26, 34, 27, 34, 33, 20, 19, 40, 39, 39, 37, 18, 35, 20, 28, 31, 30, 29, 31, 18, 40, 20, 32, 20, 34, 34, 25, 29, 40, 40, 39, 36, 39, 34, 34, 35, 39, 38, 33, 32, 21, 29, 36, 33, 30, 39, 21, 19, 38, 30, 40, 36, 34, 28, 37, 29, 39, 25, 36, 33, 37, 19, 28, 26, 18, 22, 40, 20, 40, 20, 39, 29, 26, 26, 22, 37, 34, 29, 24, 23, 21, 19, 29, 30, 23, 40, 30, 30, 19, 39, 39, 25, 36, 38, 24, 32, 34, 33, 36, 30, 35, 26, 28, 23, 25, 23, 40, 20, 26, 26, 22, 23, 18, 36, 34, 36, 35, 40, 39, 39, 33, 22, 37, 20, 37, 35, 20, 23, 37, 32, 25, 35, 35, 22, 21, 31, 40, 26, 24, 29, 37, 19, 33, 31, 29, 27, 21, 19, 39, 34, 34, 40, 26, 39, 35, 31, 35, 24, 19, 27, 27, 20, 28, 30, 23, 21, 20, 26, 31, 24, 25, 25, 22, 32, 28, 36, 21, 38, 18, 25, 21, 33, 40, 19, 38, 33, 37, 32, 31, 31, 38, 19, 37, 37, 32, 36, 34, 35, 35, 35, 37, 35, 39, 34, 24, 25, 18, 40, 33, 32, 23, 25, 19, 39, 38, 36, 32, 27, 22, 40, 28, 29, 25, 36, 26, 28, 32, 34, 34, 21, 21, 32, 19, 35, 30, 35, 26, 31, 38, 34, 33, 35, 37, 38, 36, 40, 22, 30, 28, 28, 29, 36, 24, 28, 28, 28, 26, 21, 35, 22, 32, 28, 19, 33, 18, 22, 36, 26, 19, 26, 30, 27, 28, 24, 36, 37, 20, 32, 38, 39, 38, 30, 32, 30, 26, 23, 19, 29, 33, 34, 23, 30, 32, 40, 36, 29, 39, 34, 34, 22, 22, 22, 36, 38, 38, 30, 26, 40, 34, 21, 34, 38, 32, 35, 35, 26, 28, 20, 40, 23, 24, 26, 24, 39, 21, 33, 31, 39, 39, 20, 22, 18, 23, 36, 32, 37, 36, 26, 30, 30, 30, 21, 22, 40, 38, 22, 27, 23, 21, 22, 20, 30, 31, 40, 19, 32, 24, 21, 27, 32, 30, 34, 18, 25, 22, 40, 23, 19, 24, 24, 25, 40, 27, 29, 22, 39, 38, 34, 39, 30, 31, 33, 34, 25, 20, 20, 20, 20, 24, 19, 21, 31, 31, 29, 38, 39, 33, 40, 24, 38, 37, 18, 24, 38, 38, 22, 40, 21, 36, 30, 21, 30, 35, 20, 25, 25, 29, 30, 20, 29, 29, 31, 20, 26, 26, 38, 37, 39, 31, 35, 36, 30, 38, 36, 23, 39, 39, 20, 30, 34, 21, 23, 21, 33, 30, 33, 32, 36, 18, 31, 32, 25, 23, 23, 21, 34, 18, 40, 21, 29, 29, 21, 38, 35, 38, 32, 38, 27, 23, 33, 29, 19, 20, 35, 29, 27, 28, 20, 40, 35, 40, 40, 20, 36, 38, 28, 30, 30, 36, 29, 27, 25, 33, 19, 27, 28, 34, 36, 27, 40, 38, 37, 31, 33, 38, 36, 25, 23, 22, 23, 34, 26, 24, 28, 32, 22, 18, 29, 19, 21, 27, 28, 35, 30, 40, 28, 37, 34, 24, 40, 33, 29, 30, 36, 25, 26, 26, 28, 34, 39, 34, 26, 24, 33, 38, 37, 36, 34, 37, 33, 25, 27, 30, 26, 21, 40, 26, 25, 25, 40, 28, 35, 36, 39, 33, 36, 40, 32, 36, 26, 24, 36, 27, 28, 26, 37, 36, 37, 36, 20, 34, 30, 32, 40, 20, 31, 23, 27, 19, 24, 23, 24, 25, 36, 26, 33, 30, 27, 26, 28, 28, 21, 31, 24, 27, 24, 29, 29, 28, 22, 20, 23, 35, 30, 37, 31, 31, 21, 32, 29, 27, 27, 30, 39, 34, 23, 35, 39, 27, 40, 28, 36, 35, 38, 21, 18, 21, 38, 37, 24, 21, 25, 35, 27, 35, 24, 36, 32, 20]
Wage=[17000, 13000, 28000, 45000, 28000, 1200, 15500, 26400, 14000, 35000, 16400, 50000, 2600, 9000, 27000, 150000, 32000, 22000, 65000, 56000, 6500, 30000, 70000, 9000, 6000, 34000, 40000, 30000, 6400, 87000, 20000, 45000, 4800, 34000, 75000, 26000, 4000, 50000, 63000, 14700, 45000, 42000, 10000, 40000, 70000, 14000, 54000, 14000, 23000, 24400, 27900, 4700, 8000, 19000, 17300, 45000, 3900, 2900, 138000, 2100, 60000, 55000, 45000, 40000, 45700, 90000, 40000, 13000, 30000, 2000, 75000, 60000, 70000, 41000, 42000, 31000, 39000, 104000, 52000, 20000, 59000, 66000, 63000, 32000, 11000, 16000, 6400, 17000, 47700, 5000, 25000, 35000, 20000, 14000, 29000, 267000, 31000, 27000, 64000, 39600, 267000, 7100, 33000, 31500, 40000, 23000, 3000, 14000, 44000, 15100, 2600, 6200, 50000, 3000, 25000, 2000, 38000, 22000, 20000, 2500, 1500, 42000, 30000, 27000, 7000, 11900, 27000, 24000, 4300, 30200, 2500, 30000, 70000, 38700, 8000, 36000, 66000, 24000, 95000, 39000, 20000, 23000, 56000, 25200, 62000, 12000, 13000, 35000, 35000, 14000, 24000, 12000, 14000, 31000, 40000, 22900, 12000, 14000, 1600, 12000, 80000, 90000, 126000, 1600, 100000, 8000, 71000, 40000, 42000, 40000, 120000, 35000, 1200, 4000, 32000, 8000, 14500, 65000, 15000, 3000, 2000, 23900, 1000, 22000, 18200, 8000, 30000, 23000, 30000, 27000, 70000, 40000, 18000, 3100, 57000, 25000, 32000, 10000, 4000, 49000, 93000, 35000, 49000, 40000, 5500, 30000, 25000, 5700, 6000, 30000, 42900, 8000, 5300, 90000, 85000, 15000, 17000, 5600, 11500, 52000, 1000, 42000, 2100, 50000, 1500, 40000, 28000, 5300, 149000, 3200, 12000, 83000, 45000, 31200, 25000, 72000, 70000, 7000, 23000, 40000, 40000, 28000, 10000, 48000, 20000, 60000, 19000, 25000, 39000, 68000, 2300, 23900, 5000, 16300, 80000, 45000, 12000, 9000, 1300, 35000, 35000, 47000, 32000, 18000, 20000, 20000, 23400, 48000, 8000, 5200, 33500, 22000, 22000, 52000, 104000, 28000, 13000, 12000, 15000, 53000, 27000, 50000, 13900, 23000, 28100, 23000, 12000, 55000, 83000, 31000, 33200, 45000, 3000, 18000, 11000, 41000, 36000, 33600, 38000, 45000, 53000, 24000, 3000, 37500, 7700, 4800, 29000, 6600, 12400, 20000, 2000, 1100, 55000, 13400, 10000, 6000, 6000, 16000, 19000, 8300, 52000, 58000, 27000, 25000, 80000, 10000, 22000, 18000, 21000, 8000, 15200, 15000, 5000, 50000, 89000, 7000, 65000, 58000, 42000, 55000, 40000, 14000, 36000, 30000, 7900, 6000, 1200, 10000, 54000, 12800, 35000, 34000, 40000, 45000, 9600, 3300, 39000, 22000, 40000, 68000, 24400, 1000, 10800, 8400, 50000, 22000, 20000, 20000, 1300, 9000, 14200, 32000, 65000, 18000, 18000, 3000, 16700, 1500, 1400, 15000, 55000, 42000, 70000, 35000, 21600, 5800, 35000, 5700, 1700, 40000, 40000, 45000, 25000, 13000, 6400, 11000, 4200, 30000, 32000, 120000, 10000, 19000, 12000, 13000, 37000, 40000, 38000, 60000, 3100, 16000, 18000, 130000, 5000, 5000, 35000, 1000, 14300, 100000, 20000, 33000, 8000, 9400, 87000, 2500, 12000, 12000, 33000, 16500, 25500, 7200, 2300, 3100, 2100, 3200, 45000, 40000, 3800, 30000, 12000, 62000, 45000, 46000, 50000, 40000, 13000, 50000, 23000, 4000, 40000, 25000, 16000, 3000, 80000, 27000, 68000, 3500, 1300, 10000, 46000, 5800, 24000, 12500, 50000, 48000, 29000, 19000, 26000, 30000, 10000, 10000, 20000, 43000, 105000, 55000, 5000, 65000, 68000, 38000, 47000, 48700, 6100, 55000, 30000, 5000, 3500, 23400, 11400, 7000, 1300, 80000, 65000, 45000, 19000, 3000, 17100, 22900, 31200, 35000, 3000, 5000, 1000, 36000, 4800, 60000, 9800, 30000, 85000, 18000, 24000, 60000, 30000, 2000, 39000, 12000, 10500, 60000, 36000, 10500, 3600, 1200, 28600, 48000, 20800, 5400, 9600, 30000, 30000, 20000, 6700, 30000, 3200, 42000, 37000, 5000, 18000, 20000, 14000, 12000, 18000, 3000, 13500, 35000, 38000, 30000, 36000, 66000, 45000, 32000, 46000, 80000, 27000, 4000, 21000, 7600, 16000, 10300, 27000, 19000, 14000, 19000, 3100, 20000, 2700, 27000, 7000, 13600, 75000, 35000, 36000, 25000, 6000, 36000, 50000, 46000, 3000, 37000, 40000, 30000, 48800, 19700, 16000, 14000, 12000, 25000, 25000, 28600, 17000, 31200, 57000, 23000, 23500, 46000, 18700, 26700, 9900, 16000, 3000, 52000, 51000, 14000, 14400, 27000, 26000, 60000, 25000, 6000, 20000, 3000, 69000, 24800, 12000, 3100, 18000, 20000, 267000, 28000, 9800, 18200, 80000, 6800, 21100, 20000, 68000, 20000, 45000, 8000, 40000, 31900, 28000, 24000, 2000, 32000, 11000, 20000, 5900, 16100, 23900, 40000, 37500, 11000, 55000, 37500, 60000, 23000, 9500, 34500, 4000, 9000, 11200, 35200, 30000, 18000, 21800, 19700, 16700, 12500, 11300, 4000, 39000, 32000, 14000, 65000, 50000, 2000, 30400, 22000, 1600, 56000, 40000, 85000, 9000, 10000, 19000, 5300, 5200, 43000, 60000, 50000, 38000, 267000, 15600, 1800, 17000, 45000, 31000, 5000, 8000, 43000, 103000, 45000, 8800, 26000, 47000, 40000, 8000]
#Insert your code on the next line
scatterplot(Age, Wage)
barchart(Age, Wage)
histplot(Age)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using the wrapper
Step2: Quiz
Step3: Most Common Weight
Step4: Height vs Weight
Step5: Barchart
Step6: Wages
Step7: Wage Barchart
Step8: Most Common Age
|
983 | <ASSISTANT_TASK:>
Python Code:
def reducex(X, y, reduce_classes=None, reduce_percent=.2):
# import pdb; pdb.set_trace()
idxs = []
if reduce_classes:
for c in reduce_classes:
try:
idxs += list(np.where(y == c)[0])
except IndexError:
continue
np.random.seed(1000)
new_size = int(np.round(len(idxs) * reduce_percent))
np.random.shuffle(idxs)
return (X[idxs[:int(len(idxs)*reduce_percent)]], y[idxs[:int(len(idxs)*reduce_percent)]] == 3)
batch_size = 16
(Xtrain, ytrain), (Xtest, ytest) = cifar10.load_data()
train_datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
Xtrain, ytrain = reducex(Xtrain, ytrain, reduce_classes=[3,5], reduce_percent=0.3)
Xtest, ytest = reducex(Xtest, ytest, reduce_classes=[3,5], reduce_percent=0.3)
yetrain = keras.utils.to_categorical(ytrain)
yetest = keras.utils.to_categorical(ytest)
######## Resize cifar10 images to 3x48x48 #############
rsize = 48
temp = np.zeros((Xtrain.shape[0], 3, rsize, rsize))
for i, row in enumerate(Xtrain):
temp[i] = imresize(row, size=(rsize,rsize)).transpose(2,1,0)
temp2 = np.zeros((Xtrain.shape[0], 3, rsize, rsize))
for i in Xtest:
temp2[i] = imresize(row, size=(rsize,rsize)).transpose(2,1,0)
Xtrain = temp
Xtest = temp2
from keras.applications import VGG16
vmodel = VGG16(include_top=True, weights='imagenet')#, input_shape=(3,48,48))
vmodel.layers.pop()
for layr in vmodel.layers:
layr.trainable = False
last = vmodel.output
# x = Flatten(last)
x = Dense(2, activation='sigmoid')(last)
vvmodel = keras.models.Model(vmodel.input, x)
# vmodel.add(Dense(2))
# vmodel.add(Activation('sigmoid'))
vvmodel.summary()
opt = RMSprop(lr=0.01)
vvmodel.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
def fit_model(model, batches, val_batches, epochs=1):
model.fit_generator(batches, steps_per_epoch=1500 // batch_size, epochs=epochs,
validation_steps=700 // batch_size,
validation_data=val_batches)
vvmodel.fit(Xtrain[0:100], yetrain[0:100], verbose=True)
fit_model(vvmodel, train_generator, validation_generator, epochs=2)
# model.fit_generator(
# train_generator,
# steps_per_epoch=500 // batch_size,
# epochs=15,
# validation_data=validation_generator,
# validation_steps=300 // batch_size)
# img = load_img('data/train/cats/cat.0.jpg') # this is a PIL image
# x = img_to_array(img) # this is a Numpy array with shape (3, 150, 150)
x = Xtrain[8]
x = x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 3, 150, 150)
# the .flow() command below generates batches of randomly transformed images
# and saves the results to the `preview/` directory
i = 0
j = 0
xims = []
fig, axes = plt.subplots(2,2,figsize=(12,8))
for batch in datagen.flow(x, batch_size=1):
axes[j][i].set_axis_off()
axes[j][i].imshow(batch.reshape(x.shape[1:]).transpose(2,1,0), interpolation='nearest')
if i >= 1:
j += 1
i = -1
if j == 2:
break
i +=1
print j,i
# plt.imshow(Xtrain[0].reshape(32,32,3))
# fig, axes1 = plt.subplots(2,2,figsize=(12,8))
# for j in range(5):
# for k in range(5):
# i = np.random.choice(range(len(X)))
# axes1[j][k].set_axis_off()
# axes1[j][k].imshow(X[i:i+1][0], interpolation='nearest')
print ytrain[8]
# train_generator = datagen.flow(Xtrain,yetrain,
# batch_size=batch_size) # since we use binary_crossentropy loss, we need binary labels
# # this is a similar generator, for validation data
# validation_generator = test_datagen.flow(Xtest, yetest,
# batch_size=batch_size)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(3, 32, 32)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(2))
model.add(Activation('sigmoid'))
print(model.summary())
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
batch_size = 64
model.fit_generator(
train_generator,
steps_per_epoch=500 // batch_size,
epochs=15,
validation_data=validation_generator,
validation_steps=300 // batch_size)
import datetime
now = str(datetime.datetime.now())
# model.save_weights('first_try.h5') # always save your weights after training or during training
weights = model.weights[6:]
weights[0].eval().shape
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow(Xtrain,
batch_size=batch_size, shuffle=True) # since we use binary_crossentropy loss, we need binary labels
# this is a similar generator, for validation data
validation_generator = test_datagen.flow(Xtest,
batch_size=batch_size, shuffle=True)
# train_batches = train_generator.flow(Xtrain, yetrain, batch_size=batch_size, shuffle=True)
# val_batches = test_datagen.flow(Xtest, yetest, batch_size=batch_size, shuffle=False)
bottleneck_features_train = vmodel.predict_generator(
train_generator, 500)
np.save(open('bottleneck_features_train.npy', 'w'),
bottleneck_features_train)
top_model = Sequential()
top_model.add(Flatten(input_shape=(256,32),weights=np.zeros((256,32))))
top_model.add(Dense(32))
top_model.add(Activation('relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(2))
top_model.add(Activation('sigmoid'))
# top_model.set_weights(weights)
top_model.layers[0].set_weights(weights[0].eval())
top_model_weights_path = 'first_try.h5'
top_model.load_weights(top_model_weights_path)
# add the model on top of the convolutional base
vmodel.add(top_model)
# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in model.layers[:25]:
layer.trainable = False
# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
vmodel.compile(loss='binary_crossentropy',
optimizer=SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
# prepare data augmentation configuration
# train_datagen = ImageDataGenerator(
# rescale=1. / 255,
# shear_range=0.2,
# zoom_range=0.2,
# horizontal_flip=True)
# test_datagen = ImageDataGenerator(rescale=1. / 255)
# train_generator = train_datagen.flow_from_directory(
# train_data_dir,
# target_size=(img_height, img_width),
# batch_size=batch_size,
# class_mode='binary')
# validation_generator = test_datagen.flow_from_directory(
# validation_data_dir,
# target_size=(img_height, img_width),
# batch_size=batch_size,
# class_mode='binary')
# fine-tune the model
epochs = 25
vmodel.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
epochs=epochs,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Splitting the data into validation and train + One Hot Encoding the labels + data augumentation
Step2: Recizing the images into appropriate dimensions that VGG16 expects (48x48 - 224x224)
Step3: Plotting sample resized image and it's augumented copies
Step4: Barebones Convolutional model
Step5: Using VGG16 Bottleneck Features
|
984 | <ASSISTANT_TASK:>
Python Code:
from IPython.display import display
import pandas as pd
# data
data = pd.DataFrame([
[10, 300],
[20, 200],
[30, 100],
[40, 400]
], columns=['QTY', 'UNIT.V'],
index=['A', 'B', 'C', 'D'])
display(data)
def gain(unit_v, qty):
return unit_v*qty*0.1
data['GAIN'] = data.apply(lambda v: gain(v['UNIT.V'], v['QTY']), axis=1)
display(data)
def gain_1_2(var_qty, unit_v):
return unit_v*var_qty*0.12
from scipy.optimize import fsolve
help(fsolve)
fsolve(
gain_1_2, [1], args=()
)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: scipy.optimize.fsolve
|
985 | <ASSISTANT_TASK:>
Python Code:
IMAGE_SIZE = (299,299) # The dimensions to which all images found will be resized.
BATCH_SIZE = 16
NUMBER_EPOCHS = 8
TENSORBOARD_DIRECTORY = "../logs/simple_model/tensorboard"
TRAIN_DIRECTORY = "../data/train/"
VALID_DIRECTORY = "../data/valid/"
WEIGHTS_DIRECTORY = "../weights/"
TEST_DIRECTORY = "../data/test/"
NUMBER_TRAIN_SAMPLES = 20000
NUMBER_VALIDATION_SAMPLES = 5000
NUMBER_TEST_SAMPLES = 2500
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
get_available_gpus()
import tensorflow as tf
# Creates a graph.
with tf.device('/gpu:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the op.
print(sess.run(c))
from keras.applications.inception_v3 import InceptionV3
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
base_model.summary()
from keras.layers import Dense, Dropout, GlobalAveragePooling2D
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(64, activation='relu')(x)
x = Dropout(0.3)(x)
# and a logistic layer
predictions = Dense(2, activation='softmax')(x)
from keras.models import Model
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
TRAINABLE_LAST_LAYERS = 0
assert TRAINABLE_LAST_LAYERS >= 0
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
if TRAINABLE_LAST_LAYERS == 0:
for layer in base_model.layers:
layer.trainable = False
print(len(base_model.layers))
else:
for layer in base_model.layers[:-TRAINABLE_LAST_LAYERS]:
layer.trainable = False
print(len(base_model.layers[:-TRAINABLE_LAST_LAYERS]))
model.summary()
import pandas as pd
df = pd.DataFrame(([layer.name, layer.trainable] for layer in model.layers), columns=['layer', 'trainable'])
df
from keras.callbacks import EarlyStopping
from keras.callbacks import TensorBoard
# Early stop in case of getting worse
early_stop = EarlyStopping(monitor = 'val_loss', patience = 3, verbose = 0)
#TensorBoard
# run tensorboard with tensorboard --logdir=/full_path_to_your_logs
#tensorboard_path = TENSORBOARD_DIRECTORY
#tensorboard_logger = TensorBoard(log_dir=tensorboard_path, histogram_freq=0, write_graph=False, write_images=False)
#print('Logging basic info to be used by TensorBoard to {}. To see this log run:'.format(tensorboard_path))
#print('tensorboard --logdir={}'.format(tensorboard_path))
callbacks = [early_stop]#, tensorboard_logger]
OPTIMIZER_LEARNING_RATE = 1e-2
OPTIMIZER_DECAY = 1e-4
OPTIMIZER_MOMENTUM = 0.89
OPTIMIZER_NESTEROV_ENABLED = False
from keras.optimizers import SGD
optimizer = SGD(lr=OPTIMIZER_LEARNING_RATE,
decay=OPTIMIZER_DECAY,
momentum=OPTIMIZER_MOMENTUM,
nesterov=OPTIMIZER_NESTEROV_ENABLED)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=["accuracy"])
from keras.preprocessing.image import ImageDataGenerator
## train generator with shuffle but no data augmentation
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_batch_generator = train_datagen.flow_from_directory(TRAIN_DIRECTORY,
target_size = IMAGE_SIZE,
class_mode = 'categorical',
batch_size = BATCH_SIZE)
from keras.preprocessing.image import ImageDataGenerator
## train generator with shuffle but no data augmentation
validation_datagen = ImageDataGenerator(rescale = 1./255)
valid_batch_generator = validation_datagen.flow_from_directory(VALID_DIRECTORY,
target_size = IMAGE_SIZE,
class_mode = 'categorical',
batch_size = BATCH_SIZE)
# fine-tune the model
hist = model.fit_generator(
train_batch_generator,
steps_per_epoch=NUMBER_TRAIN_SAMPLES/BATCH_SIZE,
epochs=NUMBER_EPOCHS, # epochs: Integer, total number of iterations on the data.
validation_data=valid_batch_generator,
validation_steps=NUMBER_VALIDATION_SAMPLES/BATCH_SIZE,
callbacks=callbacks,
verbose=2)
(val_classes, trn_classes, val_labels, trn_labels,
val_filenames, filenames, test_filenames) = get_all_classes()
import pickle
file = PRECOMPUTED_DIRECTORY + '/classes_and_filenames.dat'
# Saving the objects:
with open(file, 'wb') as file: # Python 2: open(..., 'w')
pickle.dump([val_classes, trn_classes, val_labels, trn_labels,
val_filenames, filenames, test_filenames], file)
from keras.callbacks import EarlyStopping
from keras.callbacks import TensorBoard
# Early stop in case of getting worse
early_stop = EarlyStopping(monitor = 'val_loss', patience = 3, verbose = 0)
#TensorBoard
# run tensorboard with tensorboard --logdir=/full_path_to_your_logs
tensorboard_path = TENSORBOARD_DIRECTORY
tensorboard_logger = TensorBoard(log_dir=tensorboard_path, histogram_freq=0, write_graph=False, write_images=False)
print('Logging basic info to be used by TensorBoard to {}. To see this log run:'.format(tensorboard_path))
print('tensorboard --logdir={}'.format(tensorboard_path))
callbacks = [early_stop, tensorboard_logger]
OPTIMIZER_LEARNING_RATE = 1e-2
OPTIMIZER_DECAY = 1e-4 # LearningRate = LearningRate * 1/(1 + decay * epoch)
OPTIMIZER_MOMENTUM = 0.89
OPTIMIZER_NESTEROV_ENABLED = False
from keras.optimizers import SGD
optimizer = SGD(lr=OPTIMIZER_LEARNING_RATE,
decay=OPTIMIZER_DECAY,
momentum=OPTIMIZER_MOMENTUM,
nesterov=OPTIMIZER_NESTEROV_ENABLED)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer, \
metrics=["accuracy"])
from keras.preprocessing.image import ImageDataGenerator
## train generator with shuffle but no data augmentation
train_datagen = ImageDataGenerator(rescale = 1./255)
train_batch_generator = train_datagen.flow_from_directory(TRAIN_DIRECTORY,
target_size = IMAGE_SIZE,
class_mode = 'categorical',
batch_size = BATCH_SIZE)
from keras.preprocessing.image import ImageDataGenerator
## train generator with shuffle but no data augmentation
validation_datagen = ImageDataGenerator(rescale = 1./255)
valid_batch_generator = validation_datagen.flow_from_directory(VALID_DIRECTORY,
target_size = IMAGE_SIZE,
class_mode = 'categorical',
batch_size = BATCH_SIZE)
# fine-tune the model
hist = model.fit_generator(
train_batch_generator,
steps_per_epoch=NUMBER_TRAIN_SAMPLES/BATCH_SIZE,
epochs=NUMBER_EPOCHS, # epochs: Integer, total number of iterations on the data.
validation_data=valid_batch_generator,
validation_steps=NUMBER_VALIDATION_SAMPLES/BATCH_SIZE,
callbacks=callbacks,
verbose=2)
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.plot(hist.history['acc']); plt.plot(hist.history['val_acc']);
plt.title('model accuracy'); plt.ylabel('accuracy');
plt.xlabel('epoch'); plt.legend(['train', 'valid'], loc='upper left');
# summarize history for loss
plt.subplot(1, 2, 2)
plt.plot(hist.history['loss']); plt.plot(hist.history['val_loss']);
plt.title('model loss'); plt.ylabel('loss');
plt.xlabel('epoch'); plt.legend(['train', 'valid'], loc='upper left');
plt.show()
############
# load weights
############
model_save_path = WEIGHTS_DIRECTORY + 'inceptionv3_pretrained_weights.h5'
print("Loading weights from: {}".format(model_save_path))
model.load_weights(model_save_path)
from keras.preprocessing.image import ImageDataGenerator
## train generator with shuffle but no data augmentation
validation_datagen = ImageDataGenerator(rescale = 1./255)
test_batch_generator = validation_datagen.flow_from_directory(TEST_DIRECTORY,
target_size = IMAGE_SIZE,
class_mode = 'categorical',
batch_size = BATCH_SIZE)
model.evaluate_generator(test_batch_generator,
steps = NUMBER_TEST_SAMPLES/BATCH_SIZE)
from keras.preprocessing.image import ImageDataGenerator
## train generator with shuffle but no data augmentation
test_datagen = ImageDataGenerator(rescale = 1./255)
test_batch_generator = test_datagen.flow_from_directory(
TEST_DIRECTORY,
target_size = IMAGE_SIZE,
batch_size=1,
shuffle = False, # Important !!!
classes = None,
class_mode = None)
test_batch_generator.classes.shape
import pickle
test_classes_file = open("../results/inceptionv3_true.pickle", "wb" )
pickle.dump( test_batch_generator.classes, test_classes_file )
true_values = test_batch_generator.classes
len(test_batch_generator.filenames)
test_filenames = open("../results/inceptionv3_filenames.pickle", "wb" )
pickle.dump( test_batch_generator.filenames, test_filenames )
import numpy as np
pred = []
for i in range(int(NUMBER_TEST_SAMPLES)):
X = next(test_batch_generator) # get the next batch
#print(X.shape)
pred1 = model.predict(X, batch_size = 1, verbose = 0) #predict on a batch
pred = pred + pred1.tolist()
probabilities = np.array(pred)
print(probabilities.shape)
assert probabilities.shape == (NUMBER_TEST_SAMPLES, 2)
test_filenames = open("../results/inceptionv3_probabilities.pickle", "wb")
pickle.dump( probabilities, test_filenames )
probabilities[0]
predictions=np.argmax(probabilities,1)
test_filenames = open("../results/inceptionv3_predictions.pickle", "wb" )
pickle.dump( predictions, test_filenames )
predictions[0]
import matplotlib.pyplot as plt
-
import itertools
from sklearn.metrics import confusion_matrix
class_names = ['cat', 'dog']
cnf_matrix = confusion_matrix(true_values, predictions)
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix')
plt.show()
from numpy.random import random, permutation
#1. A few correct labels at random
correct = np.where(predictions==true_values)[0]
idx = permutation(correct)[:4]
#plots_idx(idx, probs[idx])
len(correct)
from scipy import ndimage
from PIL import Image
import matplotlib.pyplot as plt
im = ndimage.imread("../data/test/" + test_batch_generator.filenames[idx[0]])
image = Image.fromarray(im)
plt.imshow(image)
plt.title(probabilities[idx[0]])
plt.show()
im = ndimage.imread("../data/test/" + test_batch_generator.filenames[idx[1]])
image = Image.fromarray(im)
plt.imshow(image)
plt.title(probabilities[idx[1]])
plt.show()
from numpy.random import random, permutation
#1. A few correct labels at random
correct = np.where(predictions != true_values)[0]
idx = permutation(correct)[:4]
#plots_idx(idx, probs[idx])
im = ndimage.imread("../data/test/" + test_batch_generator.filenames[idx[0]])
image = Image.fromarray(im)
plt.imshow(image)
plt.title(probabilities[idx[0]])
plt.show()
im = ndimage.imread("../data/test/" + test_batch_generator.filenames[idx[1]])
image = Image.fromarray(im)
plt.imshow(image)
plt.title(probabilities[idx[1]])
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Check that we are using the GPU
Step2: Model
Step3: Base Model arquitecture
Step4: Complete model with FCN Classifier on top
Step5: Set the non trainable layers
Step6: Training the top layer
Step7: Model optimizer
Step8: Model compilation
Step9: Model Training
Step10: Validation data generator
Step11: Model fitting
Step12: Filenames and labels
Step13: Keras callbacks
Step14: Model Optimizer
Step15: Compile the model
Step16: Training
Step17: Validation data generator
Step18: Model fitting
Step19: Training plots
Step20: Plot a few examples
Step21: Test the model
|
986 | <ASSISTANT_TASK:>
Python Code:
# libraries
import numpy as np # numpy
import sys # sys to add py_matrix to the path
# matplotlib inline plots
import matplotlib.pylab as plt
%matplotlib inline
# adding py_matrix parent folder to python path
sys.path.append('../../')
import py_matrix as pm # importing py_matrix
# useful parameters
f_size=20;
# building the optical constant database, point the folder below to the "materials" py_matrix folder
eps_db_out=pm.mat.generate_eps_db('../materials/',ext='*.edb')
eps_files,eps_names,eps_db=eps_db_out['eps_files'],eps_db_out['eps_names'],eps_db_out['eps_db']
# multilayer composition
n_inc = 1.53 # BK7
n_h = 2.06 + 0.001*1j
n_l = 1.454 + 0.0001*1j
n_sub = 1.33
e_list=np.array([n_inc,n_l,n_h,n_l,n_h,n_l,n_h,n_l,n_h,n_l,n_sub])**2
# multilayer thicknesses: incident medium and substrate have zero thickness
d_h = 170.0
d_l = 495.0
d_list = np.array([0.0,d_l,d_h,d_l,d_h,d_l,d_h,d_l,d_h,d_l,0.0])
wl_0 = 804 # incident wavelenght in nm
# polar angle in radians
theta_min = 60.5*np.pi/1.8e2;
theta_max = 68*np.pi/1.8e2;
theta_step = 1000;
v_theta = np.linspace(theta_min,theta_max,theta_step)
# azimuthal angle radians
phi_0 = 0.0;
# optical constant tensor
m_eps=np.zeros((len(e_list),3,3),dtype=np.complex128);
m_eps[:,0,0]=e_list # filling dielectric tensor diagonal
m_eps[:,1,1]=e_list
m_eps[:,2,2]=e_list
# initializing reflectance output vector
v_r_p=np.zeros_like(v_theta)
v_r_s=np.zeros_like(v_theta)
# angle loop
for i_t,t in enumerate(v_theta):
#------Computing------
m_r_ps=pm.core.rt(wl_0,t,phi_0,m_eps,d_list)['m_r_ps'] # reflection matrix
v_r_p[i_t]=pm.utils.R_ps_rl(m_r_ps)['R_p'] # getting p-polarized reflectance (TM)
v_r_s[i_t]=pm.utils.R_ps_rl(m_r_ps)['R_s'] # getting s-polarized reflectance (TE)
# reflectivity plots
plt.figure(1,figsize=(9,6))
plt.plot(v_theta*1.8e2/np.pi,v_r_s,'k',linewidth=2.0)
plt.plot(v_theta*1.8e2/np.pi,v_r_p,'r',linewidth=2.0)
# labels
plt.xlabel(r'$\Theta^{\circ}$',fontsize=f_size+10)
plt.ylabel('R',fontsize=f_size+10)
# ticks
plt.xticks(fontsize=f_size)
plt.yticks(fontsize=f_size)
# grids
plt.grid()
#legends
plt.legend(['TE (s)','TM (p)'],loc='lower right',fontsize=f_size,fancybox=True);
# fields components and wavevectors
theta_0_p = v_theta[v_r_p.argmin()] # getting the TM coupling angle
theta_0_s = v_theta[v_r_s.argmin()] # getting the TE coupling angle
out_p = pm.core.rt(wl_0,theta_0_p,phi_0,m_eps,d_list) # reflection matrix
out_s = pm.core.rt(wl_0,theta_0_s,phi_0,m_eps,d_list) # reflection matrix
m_Kn_p = out_p['m_Kn']
m_Hn_p = out_p['m_Hn']
m_En_p = out_p['m_En']
m_Kn_s = out_s['m_Kn']
m_Hn_s = out_s['m_Hn']
m_En_s = out_s['m_En']
# computing the field, absorbed power and Poynting vector
v_z = np.linspace(-1000,5000,1000) # z probing
v_field_s = np.array([np.abs(pm.utils.field(m_Kn_s,m_En_s,m_Hn_s,e_list,d_list,0.0,0.0,z,'TE')['E'][1]) for z in v_z])
v_field_p = np.array([np.abs(pm.utils.field(m_Kn_p,m_En_p,m_Hn_p,e_list,d_list,0.0,0.0,z,'TM')['H'][1]) for z in v_z])
# field plots
plt.figure(figsize=(9,6))
# plot
plt.plot(v_z,v_field_s,'k',linewidth=2.0)
plt.plot(v_z,v_field_p,'r',linewidth=2.0)
for n_d,d in enumerate(d_list):
plt.axvline(d_list[0:n_d].sum(),color='gray',linestyle='dashed',linewidth=2.0)
# labels
plt.ylabel(r'$|E_{\mathrm{y,TE}}|,|H_{\mathrm{y,TM}}|$',fontsize=f_size+10)
plt.xlabel(r'z (nm)',fontsize=f_size+5)
# ticks
plt.xticks(fontsize=f_size)
plt.yticks([5,10,15,20],fontsize=f_size)
# grids
plt.grid(color='gray',axis='y')
# legend
plt.legend(['TE (s)','TM (p)'],loc='upper left',fontsize=f_size,fancybox=True);
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Inputs
Step2: Computation
Step3: Plot of the reflectance spectrum at $\lambda$ = 804 nm
Step4: Plot of the local fields at $\lambda=804$ nm at the TE and TM coupling angle
|
987 | <ASSISTANT_TASK:>
Python Code:
# conda install ipyrad -c bioconda
# conda install tetrad -c eaton-lab -c conda-forge
import ipyrad.analysis as ipa
import toytree
# the path to your sequence data in HDF5 format
data = "/home/deren/Documents/virentes-reference/analysis-ipyrad/ref_min4_outfiles/ref_min4.snps.hdf5"
# init analysis object with input data and (optional) parameter options
tet = ipa.tetrad(
name="virentes-min4",
data=data,
nquartets=1e6,
nboots=16,
)
tet.run(auto=True)
tre = toytree.tree(tet.trees.tree).root(["HE", "NI"])
tre.draw(node_labels="support", use_edge_lengths=False);
tre = toytree.tree(tet.trees.cons).root(["HE", "NI"])
tre.draw(node_labels="support", use_edge_lengths=False);
mtre = toytree.mtree(tet.trees.boots)
mtre.treelist = [i.root(["HE", "NI"]) for i in mtre.treelist]
mtre.draw_cloud_tree(
height=600,
width=400,
use_edge_lengths=False,
html=True,
);
# analysis is finished so it will not run
tet.run()
# increase nboots and continue from existing analysis object
tet.params.nboots = 20
tet.run(auto=True)
# # re-init analysis object (will load existing results at this name)
# tet = ipa.tetrad(
# name="virentes-min4",
# data=data,
# nquartets=1e6,
# nboots=25,
# load=True,
# )
# tet.run(auto=True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Input data
Step2: Initialize the analysis object
Step3: Call run
Step4: Show the full tree with bootstrap supports
Step5: Show the majority-rule consensus tree with bootstrap supports
Step6: Show variation over the bootstrap replicates
Step7: Continuing from a checkpoint
Step8: Here I set the number of requested bootstrap replicates to 20 and call .run() again. You can see that the analysis continues from 17, since we already completed 16 bootstrap replicates earlier, and will go until it completes 20 bootstraps.
Step9: Alternatively, maybe you are returning to this analysis after a while and decide you want to do more bootstraps. You can re-load the analysis object by entering the same name and working_dir as in the original analysis, and adding the load=True argument. I set the number of bootstraps to 25 now. This will load the results from before and add new results when you call .run().
|
988 | <ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib as mpl
from learntools.core import binder; binder.bind(globals())
from learntools.embeddings.ex4_tsne import *
#_RM_
input_dir = '.'
#_UNCOMMENT_
#input_dir = '../input/visualizing-embeddings-with-t-sne'
csv_path = os.path.join(input_dir, 'movies_tsne.csv')
df = pd.read_csv(csv_path, index_col=0)
FS = (13, 9)
fig, ax = plt.subplots(figsize=FS)
c = np.random.rand(len(df))
pts = ax.scatter(df.x, df.y, c=c)
cbar = fig.colorbar(pts)
#%%RM_IF(PROD)%%
# Correct (solution code)
FS = (13, 9)
fig, ax = plt.subplots(figsize=FS)
c = df.year
pts = ax.scatter(df.x, df.y, c=c)
cbar = fig.colorbar(pts)
#%%RM_IF(PROD)%%
# Custom cmap
FS = (13, 9)
fig, ax = plt.subplots(figsize=FS)
c = df.year
pts = ax.scatter(df.x, df.y, c=c, cmap='cubehelix')
cbar = fig.colorbar(pts)
#%%RM_IF(PROD)%%
# Solution w/ sampling
FS = (13, 9)
fig, ax = plt.subplots(figsize=FS)
n = 2000
_df = df.sample(n, random_state=1).copy()
c = _df.year
pts = ax.scatter(_df.x, _df.y, c=c)
cbar = fig.colorbar(pts)
#_COMMENT_IF(PROD)_
part1.solution()
# Your code goes here
#_COMMENT_IF(PROD)_
part2.hint()
#%%RM_IF(PROD)%%
FS = (13, 9)
fig, ax = plt.subplots(figsize=FS)
c = df.mean_rating
pts = ax.scatter(df.x, df.y, c=c, cmap='cubehelix')
cbar = fig.colorbar(pts)
#_COMMENT_IF(PROD)_
part2.solution()
fig, ax = plt.subplots(figsize=FS)
c = df.n_ratings
pts = ax.scatter(df.x, df.y, c=c)
cbar = fig.colorbar(pts)
part3.solution()
#%%RM_IF(PROD)%%
# Solution code
fig, ax = plt.subplots(figsize=FS)
c = df.n_ratings
pts = ax.scatter(df.x, df.y, c=c, norm=mpl.colors.LogNorm())
cbar = fig.colorbar(pts)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Release year
Step2: Are our movie embeddings sensitive to year?
Step3: 2. Average rating
Step4: Again, is there a global pattern to the distribution of mean rating?
Step5: Bonus
Step6: Yikes, this is pretty hard to read. Most of our movies have on the order of hundreds of ratings, but there's also a long tail of movies with tends of thousands of ratings, which makes a linear scale a poor choice.
|
989 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Set some Pandas options
pd.set_option('display.notebook_repr_html', False)
pd.set_option('display.max_columns', 20)
pd.set_option('display.max_rows', 25)
normals = pd.Series(np.random.normal(size=10))
normals.plot()
normals.cumsum().plot(grid=False)
variables = pd.DataFrame({'normal': np.random.normal(size=100),
'gamma': np.random.gamma(1, size=100),
'poisson': np.random.poisson(size=100)})
variables.cumsum(0).plot()
variables.cumsum(0).plot(subplots=True, grid=False)
variables.cumsum(0).plot(secondary_y='normal', grid=False)
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(12, 4))
for i,var in enumerate(['normal','gamma','poisson']):
variables[var].cumsum(0).plot(ax=axes[i], title=var)
axes[0].set_ylabel('cumulative sum')
titanic = pd.read_excel("../data/titanic.xls", "titanic")
titanic.head()
titanic.groupby('pclass').survived.sum().plot(kind='bar')
titanic.groupby(['sex','pclass']).survived.sum().plot(kind='barh')
death_counts = pd.crosstab([titanic.pclass, titanic.sex], titanic.survived.astype(bool))
death_counts.plot(kind='bar', stacked=True, color=['black','gold'], grid=False)
death_counts.div(death_counts.sum(1).astype(float), axis=0).plot(kind='barh', stacked=True, color=['black','gold'])
titanic.fare.hist(grid=False)
titanic.fare.hist(bins=30)
sturges = lambda n: int(np.log2(n) + 1)
square_root = lambda n: int(np.sqrt(n))
from scipy.stats import kurtosis
doanes = lambda data: int(1 + np.log(len(data)) + np.log(1 + kurtosis(data) * (len(data) / 6.) ** 0.5))
n = len(titanic)
sturges(n), square_root(n), doanes(titanic.fare.dropna())
titanic.fare.hist(bins=doanes(titanic.fare.dropna()))
titanic.fare.dropna().plot(kind='kde', xlim=(0,600))
titanic.fare.hist(bins=doanes(titanic.fare.dropna()), normed=True, color='lightseagreen')
titanic.fare.dropna().plot(kind='kde', xlim=(0,600), style='r--')
titanic.boxplot(column='fare', by='pclass', grid=False)
bp = titanic.boxplot(column='age', by='pclass', grid=False)
for i in [1,2,3]:
y = titanic.age[titanic.pclass==i].dropna()
# Add some random "jitter" to the x-axis
x = np.random.normal(i, 0.04, size=len(y))
plt.plot(x, y.values, 'r.', alpha=0.2)
# Write your answer here
baseball = pd.read_csv("../data/baseball.csv")
baseball.head()
plt.scatter(baseball.ab, baseball.h)
plt.xlim(0, 700); plt.ylim(0, 200)
plt.scatter(baseball.ab, baseball.h, s=baseball.hr*10, alpha=0.5)
plt.xlim(0, 700); plt.ylim(0, 200)
plt.scatter(baseball.ab, baseball.h, c=baseball.hr, s=40, cmap='hot')
plt.xlim(0, 700); plt.ylim(0, 200);
_ = pd.scatter_matrix(baseball.loc[:,'r':'sb'], figsize=(12,8), diagonal='kde')
plt.plot(normals)
import seaborn as sns
sns.set()
plt.plot(normals)
data = np.random.multivariate_normal([0, 0], [[5, 2], [2, 2]], size=2000)
data = pd.DataFrame(data, columns=['x', 'y'])
data.head()
for col in 'xy':
sns.kdeplot(data[col], shade=True)
sns.distplot(data['x'])
sns.kdeplot(data);
cdystonia = pd.read_csv("../data/cdystonia.csv", index_col=None)
cdystonia.head()
cdystonia16 = cdystonia[cdystonia.week==16]
cmap = {'Placebo':'Reds', '10000U':'Blues'}
for treat in cmap:
age = cdystonia16[cdystonia16.treat==treat].age
twstrs = cdystonia16[cdystonia16.treat==treat].twstrs
sns.kdeplot(age, twstrs,
cmap=cmap[treat], shade=True, shade_lowest=False, alpha=0.3)
with sns.axes_style('white'):
sns.jointplot("age", "twstrs", cdystonia16, kind='kde');
titanic = titanic[titanic.age.notnull() & titanic.fare.notnull()]
sns.pairplot(titanic, vars=['age', 'fare', 'pclass', 'sibsp'], hue='survived', palette="muted", markers='+')
g = sns.FacetGrid(titanic, col="sex", row="pclass")
g.map(sns.distplot, 'age')
g = sns.FacetGrid(cdystonia, col="treat", row="week")
g.map(sns.regplot, 'age', 'twstrs', order=2)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notice that by default a line plot is drawn, and a light grid is included. All of this can be changed, however
Step2: Similarly, for a DataFrame
Step3: As an illustration of the high-level nature of Pandas plots, we can split multiple series into subplots with a single argument for plot
Step4: Or, we may want to have some series displayed on the secondary y-axis, which can allow for greater detail and less empty space
Step5: If we would like a little more control, we can use matplotlib's subplots function directly, and manually assign plots to its axes
Step6: Bar plots
Step7: Another way of comparing the groups is to look at the survival rate, by adjusting for the number of people in each group.
Step8: Histograms
Step9: The hist method puts the continuous fare values into bins, trying to make a sensible décision about how many bins to use (or equivalently, how wide the bins are). We can override the default value (10)
Step10: There are algorithms for determining an "optimal" number of bins, each of which varies somehow with the number of observations in the data series.
Step11: A density plot is similar to a histogram in that it describes the distribution of the underlying data, but rather than being a pure empirical representation, it is an estimate of the underlying "true" distribution. As a result, it is smoothed into a continuous line plot. We create them in Pandas using the plot method with kind='kde', where kde stands for kernel density estimate.
Step12: Often, histograms and density plots are shown together
Step13: Here, we had to normalize the histogram (normed=True), since the kernel density is normalized by definition (it is a probability distribution).
Step14: You can think of the box plot as viewing the distribution from above. The blue crosses are "outlier" points that occur outside the extreme quantiles.
Step15: When data are dense, a couple of tricks used above help the visualization
Step16: Scatterplots
Step17: Scatterplots are useful for data exploration, where we seek to uncover relationships among variables. There are no scatterplot methods for Series or DataFrame objects; we must instead use the matplotlib function scatter.
Step18: We can add additional information to scatterplots by assigning variables to either the size of the symbols or their colors.
Step19: To view scatterplots of a large numbers of variables simultaneously, we can use the scatter_matrix function that was recently added to Pandas. It generates a matrix of pair-wise scatterplots, optiorally with histograms or kernel density estimates on the diagonal.
Step20: Seaborn
Step21: Seaborn is conventionally imported using the sns alias.
Step22: Seaborn works hand-in-hand with pandas to create publication-quality visualizations quickly and easily from DataFrame and Series data.
Step23: distplot combines a kernel density estimate and a histogram.
Step24: If kdeplot is provided with two columns of data, it will automatically generate a contour plot of the joint KDE.
Step25: Similarly, jointplot will generate a shaded joint KDE, along with the marginal KDEs of the two variables.
Step26: To explore correlations among several variables, the pairplot function generates pairwise plots, along with histograms along the diagonal, and a fair bit of customization.
Step27: Another way of exploring mutliple variables simulaneously is to generate trellis plots with FacetGrid.
Step28: Using the cervical dystonia dataset, we can simultaneously examine the relationship between age and the primary outcome variable as a function of both the treatment received and the week of the treatment by creating a scatterplot of the data, and fitting a polynomial relationship between age and twstrs
|
990 | <ASSISTANT_TASK:>
Python Code:
# This code tests that your Python installation worked.
# It generates a png image file that you should e-mail
# to the address shown on the plot
import scipy as sp
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import platform
import socket
# If you are a student, please fill in your first and last
# names inside the quotes in the two lines below. Do not
# modify anything else in this file
your_first_name = 'First'
your_last_name = 'Last'
# If you are an instructor, modify the next 3 lines.
# You do not need to modify anything else in this file.
classname = 'ATSC 301'
term = 'Fall_2016' # must contain no spaces
email = 'paustin@eos.ubc.ca'
plt.plot([0,1], 'r', [1,0], 'b')
plt.text( 0.5, 0.8, '{0:s} {1:s}'
.format(your_first_name, your_last_name),
horizontalalignment='center',
size = 'x-large',
bbox=dict(facecolor='purple', alpha=0.4))
plt.text( 0.5, 0.1,
'{1:s}\nscipy {2:s}\nnumpy {3:s}\nmatplotlib {4:s}\non {5:s}\n{6:s}'
.format(
classname,
term,
sp.__version__,
np.__version__,
matplotlib.__version__,
platform.platform(),
socket.gethostname()
) ,
horizontalalignment='center'
)
filename = your_last_name + '_' + your_first_name + '_' + term + '.png'
plt.title('*** E-mail the saved version of this plot, ***\n' +
'"{0:s}" to {1:s}'.format(filename, email), fontsize=12)
plt.savefig(filename)
plt.show()
%lsmagic
3*15
6+21/3
(6+21)/3
from __future__ import division
12*3
4+5*6-(21*8)
11/5
11//5
9734828*79372 # product of these two large integers is a long integer
# Note: This behaviour is not true as of Python 3, and an integer will
# always be as large as it needs to be, as shown below -- Loren.
12.*3.
123.4*(-53.9)/np.sqrt(5.)
11./5.
11.//5.
11.%5.
6.022e23*300.
(2+3j)*(-4+9j)
(2+3j)/(-4+9j)
np.sqrt(-3)
np.sqrt(-3+0j)
np.sin(0.5)
np.log(np.sin(0.5))
np.log(np.sin(0.5)+1.0)
np.log(5.5/1.2)
a = 23
p, q = 83.4, np.sqrt(2)
b = a
a = 23
b = a
a
b
a = 12
a
b
b = b + 1
b
c , d = 4, 7.92
c += 2
c
c *= 3
c
d /= -2
d
d -= 4
d
distance = 34.
time_traveled = 0.59
velocity = distance / time_traveled
velocity
# Calculates time, gallons of gas used, and cost of gasoline for
# a trip
distance = 400. # miles
mpg = 30. # car mileage
speed = 60. # average speed
costPerGallon = 4.10 # price of gas
time = distance / speed
gallons = distance / mpg
cost = gallons * costPerGallon
# Calculates the distance between two 3d Cartesian coordinates
import numpy as np
x1, y1, z1 = 23.7, -9.2, -7.8
x2, y2, z2 = -3.5, 4.8, 8.1
dr = np.sqrt( (x2-x1)**2 + (y2-y1)**2 + (z2-z1)**2 )
dr = sqrt( (x2-x1)**2 + (y2-y1)**2 + (z2-z1)**2 )
dr = np.sqrt( (x2-x1)**2
+ (y2-y1)**2
+ (z2-z1)**2 )
a = 1 + 2 \
+ 3 + 4
a = 1 + 2 + 3 + 4
help(range)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To run your saved file go to the IPython window and at the prompt type
Step2: There are a lot of magic commands, most of which we don't need right
Step3: Python returns the correct product, as expected. You can do more
Step4: Let's try some more arithmetic
Step5: Notice that the effect of the parentheses in In [4]
Step6: at the beginning of an IPython session. You only need to type it once
Step7: For the binary operators +, -, *, and //, the output is an
Step8: Note that the result of any operation involving only floating point
Step9: Notice that to obtain the expected result or $\sqrt{-3}$, you must write
Step10: The argument of the function can be a number or any kind of expression
Step11: Some NumPy functions
Step12: The equal sign "=" is the assignment operator. In the first
Step13: In this case Python associates a new memory location with the name b,
Step14: The assignment variable works from right to left; that is, it assigns
Step15: The statement, b = b+1 makes no sense in algebra, but in Python (and
Step16: Verify that you understand how the above operations work.
Step17: The variable names distance, time_traveled, and velocity
Step18: The number (or hash) symbol # is the "comment" character in Python;
Step19: We have introduced extra spaces into some of the expressions to improve
Step20: The reason for the error is that the sqrt function is not a part of
Step21: You can generally continue an expression on another line in Python for
Step22: is equivalent to
Step23: So you can use backslash character (\) of explicit line continuation
|
991 | <ASSISTANT_TASK:>
Python Code:
%run "../Utilities/Preparation.ipynb"
processGFormEN = not ('gformEN' in globals())
if processGFormEN:
# tz='Europe/Berlin' time
dateparseGForm = lambda x: pd.Timestamp(x.split(' GMT')[0], tz='Europe/Berlin').tz_convert('utc')
if processGFormEN:
csvEncoding = 'utf-8'
gformPath = "../../data/Google forms/"
genericFormName = '-gform-'
csvSuffix = '.csv'
enLanguageID = 'en'
enSuffix = enLanguageID
# dataFilesNamesStem is created in Preparation.ipynb
try:
gformEN = pd.read_csv(\
gformPath + dataFilesNamesStem + genericFormName + enSuffix + csvSuffix,\
dtype=str,\
parse_dates=['Timestamp'],\
date_parser=dateparseGForm,\
)
print("gformEN read_csv success")
except FileNotFoundError:
print("gformEN read_csv failed")
frLanguageID = 'fr'
frSuffix = frLanguageID
frTranslationsPath = gformPath + 'translations-' + frSuffix + csvSuffix
frCorrectScientificAnswersPath = gformPath + 'CorrectScientific-' + frSuffix + csvSuffix
frDemographicAnswersPath = gformPath + 'Demographic-' + frSuffix + csvSuffix
if processGFormEN:
gformEN['Language'] = pd.Series(enLanguageID, index=gformEN.index)
if processGFormEN:
# when did the user answer the questionnaire? After playing, before playing, undefined?
answerTemporalities = ['before', 'after', 'undefined'];
gformEN['Temporality'] = pd.Series(answerTemporalities[2], index=gformEN.index)
if processGFormEN:
renamedQuestions = pd.Index([
'Timestamp',
'Are you interested in video games?',
'Do you play video games?',
'How old are you?',
'What is your gender?',
'How long have you studied biology?',
'Are you interested in biology?',
'Before playing Hero.Coli, had you ever heard about synthetic biology?',
'Before playing Hero.Coli, had you ever heard about BioBricks?',
'Have you ever played an older version of Hero.Coli before?',
'Have you played the current version of Hero.Coli?',
'Have you played the arcade cabinet version of Hero.Coli?',
'Have you played the Android version of Hero.Coli?',
'In order to modify the abilities of the bacterium, you have to...',
'What are BioBricks and devices?',
'What is the name of this BioBrick? TER',
'What is the name of this BioBrick? PR',
'What is the name of this BioBrick? CDS',
'What is the name of this BioBrick? RBS',
'What does this BioBrick do? TER',
'What does this BioBrick do? PR',
'What does this BioBrick do? CDS',
'What does this BioBrick do? RBS',
'Pick the case where the BioBricks are well-ordered:',
'When does green fluorescence happen?',
'What happens when you unequip the movement device?',
'What is this? PLASMID',
'What does this device do? PCONS:RBS:GFP:TER',
'What does this device do? PCONS:RBS:FLHDC:TER',
'What does this device do? PCONS:RBS:AMPR:TER',
'What does this device do? PBAD:RBS:GFP:TER',
'What does this device do? PCONS:RBS:GFP:TER 2',
'What does this device do? PCONS:RBS:FLHDC:TER 2',
'What does this device do? PCONS:RBS:AMPR:TER 2',
'What does this device do? PBAD:RBS:GFP:TER 2',
'Guess: what would a device producing l-arabinose do, if it started with a l-arabinose-induced promoter?',
'Guess: the bacterium would glow yellow...',
'What is the species of the bacterium of the game?',
'What is the scientific name of the tails of the bacterium?',
'Find the antibiotic:',
'You can write down remarks here.',
'Do not edit - pre-filled anonymous ID',
'Language',
'Temporality'
])
if processGFormEN:
gformEN.columns = renamedQuestions
if processGFormEN:
correctAnswersEN = pd.Series(
[
# Timestamp
[], #1
# Basic demographics questions
[], #2
[], #3
[], #4
[], #5
# Basic biology questions
[], #6
[], #7
[], #8
[], #9
# Your experience with Hero.Coli
[], #10
[], #11
[], #12
[], #13
# General mechanics of the game
["Edit the DNA of the bacterium"], #14
["DNA sequences"], #15
# BioBricks
["Terminator"], #16
["Promoter"], #17
["Coding Sequence"], #18
["RBS"], #19
# BioBrick functions
["It shows the end of the device"], #20
["It controls when the device is active"], #21
["It controls which protein is produced, and thus which ability is affected"], #22
["It controls the level of expression, and thus how much the ability will be affected"], #23
# Devices
["Option 1"], #24
["Under blue light, when the GFP device is equipped"], #25
["Flagella quickly disappear one by one"], #26
# Devices
["A plasmid - it makes it possible to equip an additional device"], #27
# Device symbols
["It generates green fluorescence"], #28
["It makes it possible to move faster"], #29
["It generates antibiotic resistance"], #30
["It generates green fluorescence in presence of l-arabinose"], #31
# Device symbols
["It generates green fluorescence"], #32
["It makes it possible to move faster"], #33
["It generates antibiotic resistance"], #34
["It generates green fluorescence in presence of l-arabinose"], #35
# Beyond the game
["After being induced, it would produce more and"], #36
["If it produces YFP under cyan light",
"If it produced YFP under cyan light"], #37
["E. Coli"], #38
["Flagella"], #39
["Ampicillin"], #40
# Remarks
[], #41
# ID
[], #42
# Language
[], #43
# Temporality
[], #44
], index = gformEN.columns
)
#correctAnswersEN
if processGFormEN:
interestPositives = ["A lot", "Extremely", "Moderately"]
gameInterestPositives = interestPositives
frequencyPositives = interestPositives
agePositives = [18,19,20,21,22,23]
genderPositives = ["Female"]
biologyStudyPositives = ["Until bachelor's degree", "At least until master's degree"]
biologyInterestPositives = interestPositives
yesNoIdontknowPositives = ["Yes"]
previousPlayPositives = ["Multiple times","A few times","Once","Yes"]
languagePositives = [enLanguageID]
temporalityPositives = [answerTemporalities[1]]
demographicAnswersEN = pd.Series(
[
# Timestamp
[], #1
# Basic demographics questions
interestPositives, #2
frequencyPositives, #3
agePositives, #4
genderPositives, #5
# Basic biology questions
biologyStudyPositives, #6
biologyInterestPositives, #7
yesNoIdontknowPositives, #8
yesNoIdontknowPositives, #9
# Your experience with Hero.Coli
previousPlayPositives, #10
previousPlayPositives, #11
previousPlayPositives, #12
previousPlayPositives, #13
# General mechanics of the game
[], #14
[], #15
# BioBricks
[], #16
[], #17
[], #18
[], #19
# BioBrick functions
[], #20
[], #21
[], #22
[], #23
# Devices
[], #24
[], #25
[], #26
# Devices
[], #27
# Device symbols
[], #28
[], #29
[], #30
[], #31
# Device symbols
[], #32
[], #33
[], #34
[], #35
# Beyond the game
[], #36
[], #37
[], #38
[], #39
[], #40
# Remarks
[], #41
# ID
[], #42
# Language
languagePositives, #43
# Temporality
temporalityPositives, #44
], index = gformEN.columns
)
#demographicAnswersEN
if processGFormEN:
correctAnswers = correctAnswersEN
if processGFormEN:
demographicAnswers = demographicAnswersEN
#correctAnswers.loc[gformEN.columns[19]]
if processGFormEN:
checkpointQuestionMatching = pd.DataFrame(
{
'checkpoint' : [
# "Timestamp", # 1
'',
# Basic demographics questions
# "Are you interested in video games?", # 2
'',
# "Do you play video games?", # 3
'',
# "How old are you?", # 4
'',
# "What is your gender?", # 5
'',
# Basic biology questions
# "How long have you studied biology?", # 6
'',
# "Are you interested in biology?", # 7
'',
# "Before playing Hero.Coli, had you ever heard about synthetic biology?", # 8
'',
# "Before playing Hero.Coli, had you ever heard about BioBricks?", # 9
'',
# Your experience with Hero.Coli
# "Have you ever played an older version of Hero.Coli before?", # 10
'',
# "Have you played the current version of Hero.Coli?", # 11
'',
# "Have you played the arcade cabinet version of Hero.Coli?", # 12
'',
# "Have you played the Android version of Hero.Coli?", # 13
'',
# General mechanics of the game
# "In order to modify the abilities of the bacterium, you have to...", # 14
'tutorial1.Checkpoint00',
# "What are BioBricks and devices?", # 15
'tutorial1.Checkpoint00',
# BioBricks
# "What is the name of this BioBrick?", # 16
'tutorial1.Checkpoint05',
# "What is the name of this BioBrick?", # 17
'tutorial1.Checkpoint05',
# "What is the name of this BioBrick?", # 18
'tutorial1.Checkpoint02',
# "What is the name of this BioBrick?", # 19
'tutorial1.Checkpoint01',
# BioBrick functions
# "What does this BioBrick do?", # 20
'tutorial1.Checkpoint05',
# "What does this BioBrick do?", # 21
'tutorial1.Checkpoint05',
# "What does this BioBrick do?", # 22
'tutorial1.Checkpoint02',
# "What does this BioBrick do?", # 23
'tutorial1.Checkpoint01',
# Devices
# "Pick the case where the BioBricks are well-ordered:", # 24
'tutorial1.Checkpoint01',
# "When does green fluorescence happen?", # 25
'tutorial1.Checkpoint02',
# "What happens when you unequip the movement device?", # 26
'tutorial1.Checkpoint00',
# Devices
# "What is this?", # 27
'tutorial1.Checkpoint05',
# Device symbols
# "What does this device do?", # 28
'tutorial1.Checkpoint02',
# "What does this device do?", # 29
'tutorial1.Checkpoint02',
# "What does this device do?", # 30
'tutorial1.Checkpoint13',
# "What does this device do?", # 31
'tutorial1.Checkpoint05',
# Device symbols
# "What does this device do?", # 32
'tutorial1.Checkpoint02',
# "What does this device do?", # 33
'tutorial1.Checkpoint02',
# "What does this device do?", # 34
'tutorial1.Checkpoint13',
# "What does this device do?", # 35
'tutorial1.Checkpoint05',
# Beyond the game
# "Guess: what would a device producing l-arabinose do, \
# if it started with a l-arabinose-induced promoter?", # 36
'tutorial1.Checkpoint05',
# "Guess: the bacterium would glow yellow...", # 37
'tutorial1.Checkpoint02',
# "What is the species of the bacterium of the game?", # 38
'tutorial1.Checkpoint00',
# "What is the scientific name of the tails of the bacterium?", # 39
'tutorial1.Checkpoint00',
# "Find the antibiotic:", # 40
'tutorial1.Checkpoint02',
# Remarks
# "You can write down remarks here.", # 41
'',
# Thanks to have filled this study!
# "Do not edit - pre-filled anonymous ID" # 42
'',
# Language
'',
# Temporality
'',
]
}, index = gformEN.columns
)
#checkpointQuestionMatching
#checkpointQuestionMatching['checkpoint'][20]
#checkpointQuestionMatching.loc[gformEN.columns[20], 'checkpoint']
if processGFormEN:
def getUniqueSortedCheckpoints( checkpoints ):
result = checkpoints.unique()
result = result[result!='']
result = pd.Series(result)
result = result.sort_values()
result.index = range(0, len(result))
return result
if processGFormEN:
validableCheckpoints = getUniqueSortedCheckpoints(checkpointQuestionMatching['checkpoint'])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Additional columns
Step2: Preparation for temporalities
Step3: Displays all unique answers to every question
Step4: List of answers
Step5: Demographic questions
Step6: Language selection
Step7: Basic operations
Step8: Checkpoint / Question matching
|
992 | <ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.1,<2.2"
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
b.add_dataset('mesh', times=np.linspace(0,1,11), dataset='mesh01')
print b['requiv@component']
print b['requiv_max@primary@component']
print b['requiv_max@primary@constraint']
b.set_value('requiv@primary@component', 3)
b.set_value('sma@binary@component', 10)
passed, message = b.run_checks()
print passed, message
b.set_value('sma@binary@component', 5)
passed, message = b.run_checks()
print passed, message
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
Step2: Now let's add a mesh dataset at a few different times so that we can see how the potentials affect the surfaces of the stars.
Step3: Relevant Parameters
Step4: Critical Potentials and System Checks
Step5: At this time, if you were to call run_compute, an error would be thrown. An error isn't immediately thrown when setting requiv, however, since the overflow can be recitified by changing any of the other relevant parameters. For instance, let's change sma to be large enough to account for this value of rpole and you'll see that the error does not occur again.
Step6: These logger warnings are handy when running phoebe interactively, but in a script its also handy to be able to check whether the system is currently computable /before/ running run_compute.
|
993 | <ASSISTANT_TASK:>
Python Code:
# TEST
from pytest import approx
import numpy as np
import larch
import pandas as pd
from larch import PX, P, X
from larch.data_warehouse import example_file
df = pd.read_csv(example_file("MTCwork.csv.gz"))
df.set_index(['casenum','altnum'], inplace=True, drop=False)
m = larch.Model()
m.utility_ca = (
PX('tottime')
+ PX('totcost')
+ sum(P(f'ASC_{i}') * X(f'altnum=={i}') for i in [2,3,4,5,6])
+ sum(P(f'HHINC#{i}') * X(f'(altnum=={i})*hhinc') for i in [2,3,4,5,6])
)
m.fit(df, y=df.chose)
# TEST
assert m.pvals == approx(np.array([
-2.178014e+00, -3.725078e+00, -6.708610e-01,
-2.376328e+00, -2.067752e-01, -2.169938e-03,
3.577067e-04, -5.286324e-03, -1.280798e-02,
-9.686303e-03, -4.920235e-03, -5.134209e-02]))
m.predict(df)
proba = m.predict_proba(df)
proba.head(10)
score = m.score(df, y=df.chose)
score
score * m.dataframes.n_cases
# TEST
assert score * m.dataframes.n_cases == approx(-3626.1862555129305)
import larch.prelearning
dfs = larch.DataFrames(df.drop(columns=['casenum','altnum']), ch='chose', crack=True)
prelearned = larch.prelearning.XGBoostPrelearner(
dfs,
ca_columns=['totcost', 'tottime'],
co_columns=['numveh', 'hhsize', 'hhinc', 'famtype', 'age'],
eval_metric='logloss',
)
dfs1 = prelearned.apply(dfs)
m = larch.Model(dfs1)
m.utility_ca = (
PX('tottime')
+ PX('totcost')
+ PX('prelearned_utility')
)
m.utility_co[2] = P("ASC_SR2") + P("hhinc#2") * X("hhinc")
m.utility_co[3] = P("ASC_SR3P") + P("hhinc#3") * X("hhinc")
m.utility_co[4] = P("ASC_TRAN") + P("hhinc#4") * X("hhinc")
m.utility_co[5] = P("ASC_BIKE") + P("hhinc#5") * X("hhinc")
m.utility_co[6] = P("ASC_WALK") + P("hhinc#6") * X("hhinc")
m.load_data()
m.loglike()
m.maximize_loglike()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To use the scikit-learn interface, we'll need to define our model
Step2: Because the larch.Model object is an estimator, if offers a fit
Step3: Unlike most scikit-learn estimators, the fit method cannot
Step4: Using Scikit-Learn within Larch
|
994 | <ASSISTANT_TASK:>
Python Code:
import SimpleITK as sitk
# Utility method that either downloads data from the network or
# if already downloaded returns the file name for reading from disk (cached data).
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = 'Output'
# GUI components (sliders, dropdown...).
from ipywidgets import interact, fixed
# Enable display of html.
from IPython.display import display, HTML
# Plots will be inlined.
%matplotlib inline
# Callbacks for plotting registration progress.
import registration_callbacks
def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix):
Write the given transformation to file, resample the moving_image onto the fixed_images grid and save the
result to file.
Args:
transform (SimpleITK Transform): transform that maps points from the fixed image coordinate system to the moving.
fixed_image (SimpleITK Image): resample onto the spatial grid defined by this image.
moving_image (SimpleITK Image): resample this image.
outputfile_prefix (string): transform is written to outputfile_prefix.tfm and resampled image is written to
outputfile_prefix.mha.
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(fixed_image)
# SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results.
resample.SetInterpolator(sitk.sitkLinear)
resample.SetTransform(transform)
sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix+'.mha')
sitk.WriteTransform(transform, outputfile_prefix+'.tfm')
def DICOM_series_dropdown_callback(fixed_image, moving_image, series_dictionary):
Callback from dropbox which selects the two series which will be used for registration.
The callback prints out some information about each of the series from the meta-data dictionary.
For a list of all meta-dictionary tags and their human readable names see DICOM standard part 6,
Data Dictionary (http://medical.nema.org/medical/dicom/current/output/pdf/part06.pdf)
# The callback will update these global variables with the user selection.
global selected_series_fixed
global selected_series_moving
img_fixed = sitk.ReadImage(series_dictionary[fixed_image][0])
img_moving = sitk.ReadImage(series_dictionary[moving_image][0])
# There are many interesting tags in the DICOM data dictionary, display a selected few.
tags_to_print = {'0010|0010': 'Patient name: ',
'0008|0060' : 'Modality: ',
'0008|0021' : 'Series date: ',
'0008|0031' : 'Series time:',
'0008|0070' : 'Manufacturer: '}
html_table = []
html_table.append('<table><tr><td><b>Tag</b></td><td><b>Fixed Image</b></td><td><b>Moving Image</b></td></tr>')
for tag in tags_to_print:
fixed_tag = ''
moving_tag = ''
try:
fixed_tag = img_fixed.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
try:
moving_tag = img_moving.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
html_table.append('<tr><td>' + tags_to_print[tag] +
'</td><td>' + fixed_tag +
'</td><td>' + moving_tag + '</td></tr>')
html_table.append('</table>')
display(HTML(''.join(html_table)))
selected_series_fixed = fixed_image
selected_series_moving = moving_image
data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt"))
# 'selected_series_moving/fixed' will be updated by the interact function.
selected_series_fixed = ''
selected_series_moving = ''
# Directory contains multiple DICOM studies/series, store the file names
# in dictionary with the key being the seriesID.
reader = sitk.ImageSeriesReader()
series_file_names = {}
series_IDs = reader.GetGDCMSeriesIDs(data_directory) #list of all series
if series_IDs: #check that we have at least one series
for series in series_IDs:
series_file_names[series] = reader.GetGDCMSeriesFileNames(data_directory, series)
interact(DICOM_series_dropdown_callback, fixed_image=series_IDs, moving_image =series_IDs, series_dictionary=fixed(series_file_names));
else:
print('This is surprising, data directory does not contain any DICOM series.')
# Actually read the data based on the user's selection.
reader.SetFileNames(series_file_names[selected_series_fixed])
fixed_image = reader.Execute()
reader.SetFileNames(series_file_names[selected_series_moving])
moving_image = reader.Execute()
# Save images to file and view overlap using external viewer.
sitk.WriteImage(fixed_image, os.path.join(OUTPUT_DIR, "fixedImage.mha"))
sitk.WriteImage(moving_image, os.path.join(OUTPUT_DIR, "preAlignment.mha"))
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(fixed_image,moving_image.GetPixelIDValue()),
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
# Save moving image after initial transform and view overlap using external viewer.
save_transform_and_image(initial_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "initialAlignment"))
print(initial_transform)
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
# Scale the step size differently for each parameter, this is critical!!!
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform, inPlace=False)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
final_transform_v1 = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v1, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1"))
print(final_transform_v1)
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Set the initial moving and optimized transforms.
optimized_transform = sitk.Euler3DTransform()
registration_method.SetMovingInitialTransform(initial_transform)
registration_method.SetInitialTransform(optimized_transform)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
# Need to compose the transformations after registration.
final_transform_v11 = sitk.Transform(optimized_transform)
final_transform_v11.AddTransform(initial_transform)
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v11, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1.1"))
print(final_transform_v11)
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100) #, estimateLearningRate=registration_method.EachIteration)
registration_method.SetOptimizerScalesFromPhysicalShift()
final_transform = sitk.Euler3DTransform(initial_transform)
registration_method.SetInitialTransform(final_transform)
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas = [2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent,
registration_callbacks.metric_update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, 'finalAlignment-v2'))
print(final_transform)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Utility functions
Step4: Loading Data
Step5: Initial Alignment
Step6: Look at the transformation, what type is it?
Step7: Final registration
Step8: Look at the final transformation, what type is it?
Step9: Version 1.1
Step10: Look at the final transformation, what type is it? Why is it differnt from the previous example?
Step11: Version 2
Step12: Look at the final transformation, what type is it?
|
995 | <ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import shogun as sg
import matplotlib.pyplot as plt
%matplotlib inline
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../../data')
# training data
train_income=['Low','Medium','Low','High','Low','High','Medium','Medium','High','Low','Medium',
'Medium','High','Low','Medium']
train_age = ['Old','Young','Old','Young','Old','Young','Young','Old','Old','Old','Young','Old',
'Old','Old','Young']
train_education = ['University','College','University','University','University','College','College',
'High School','University','High School','College','High School','University','High School','College']
train_marital = ['Married','Single','Married','Single','Married','Single','Married','Single','Single',
'Married','Married','Single','Single','Married','Married']
train_usage = ['Low','Medium','Low','High','Low','Medium','Medium','Low','High','Low','Medium','Low',
'High','Low','Medium']
# print data
print('Training Data Table : \n')
print('Income \t\t Age \t\t Education \t\t Marital Status \t Usage')
for i in range(len(train_income)):
print(train_income[i]+' \t\t '+train_age[i]+' \t\t '+train_education[i]+' \t\t '+train_marital[i]+' \t\t '+train_usage[i])
# encoding dictionary
income = {'Low' : 1.0, 'Medium' : 2.0, 'High' : 3.0}
age = {'Young' : 1.0, 'Old' : 2.0}
education = {'High School' : 1.0, 'College' : 2.0, 'University' : 3.0}
marital_status = {'Married' : 1.0, 'Single' : 2.0}
usage = {'Low' : 1.0, 'Medium' : 2.0, 'High' : 3.0}
# encode training data
for i in range(len(train_income)):
train_income[i] = income[train_income[i]]
train_age[i] = age[train_age[i]]
train_education[i] = education[train_education[i]]
train_marital[i] = marital_status[train_marital[i]]
train_usage[i] = usage[train_usage[i]]
# form Shogun feature matrix
train_data = np.array([train_income, train_age, train_education, train_marital])
train_feats = sg.create_features(train_data);
# form Shogun multiclass labels
labels = sg.create_labels(np.array(train_usage));
# create ID3ClassifierTree object
id3 = sg.create_machine("ID3ClassifierTree", labels=labels)
# learn the tree from training features
is_successful = id3.train(train_feats)
# test data
test_income = ['Medium','Medium','Low','High','High']
test_age = ['Old','Young','Old','Young','Old']
test_education = ['University','College','High School','University','College']
test_marital = ['Married','Single','Married','Single','Married']
test_usage = ['Low','Medium','Low','High','High']
# tabulate test data
print('Test Data Table : \n')
print('Income \t\t Age \t\t Education \t\t Marital Status \t Usage')
for i in range(len(test_income)):
print(test_income[i]+' \t\t '+test_age[i]+' \t\t '+test_education[i]+' \t\t '+test_marital[i]+' \t\t ?')
# encode test data
for i in range(len(test_income)):
test_income[i] = income[test_income[i]]
test_age[i] = age[test_age[i]]
test_education[i] = education[test_education[i]]
test_marital[i] = marital_status[test_marital[i]]
# bind to shogun features
test_data = np.array([test_income, test_age, test_education, test_marital])
test_feats = sg.create_features(test_data)
# apply decision tree classification
test_labels = id3.apply(test_feats)
output = test_labels.get("labels");
output_labels=[0]*len(output)
# decode back test data for printing
for i in range(len(test_income)):
test_income[i]=list(income.keys())[list(income.values()).index(test_income[i])]
test_age[i]=list(age.keys())[list(age.values()).index(test_age[i])]
test_education[i]=list(education.keys())[list(education.values()).index(test_education[i])]
test_marital[i]=list(marital_status.keys())[list(marital_status.values()).index(test_marital[i])]
output_labels[i]=list(usage.keys())[list(usage.values()).index(output[i])]
# print output data
print('Final Test Data Table : \n')
print('Income \t Age \t Education \t Marital Status \t Usage(predicted)')
for i in range(len(test_income)):
print(test_income[i]+' \t '+test_age[i]+' \t '+test_education[i]+' \t '+test_marital[i]+' \t\t '+output_labels[i])
# class attribute
evaluation = {'unacc' : 1.0, 'acc' : 2.0, 'good' : 3.0, 'vgood' : 4.0}
# non-class attributes
buying = {'vhigh' : 1.0, 'high' : 2.0, 'med' : 3.0, 'low' : 4.0}
maint = {'vhigh' : 1.0, 'high' : 2.0, 'med' : 3.0, 'low' : 4.0}
doors = {'2' : 1.0, '3' : 2.0, '4' : 3.0, '5more' : 4.0}
persons = {'2' : 1.0, '4' : 2.0, 'more' : 3.0}
lug_boot = {'small' : 1.0, 'med' : 2.0, 'big' : 3.0}
safety = {'low' : 1.0, 'med' : 2.0, 'high' : 3.0}
with open( os.path.join(SHOGUN_DATA_DIR, 'uci/car/car.data'), 'r') as f:
feats = []
labels = []
# read data from file and encode
for line in f:
words = line.rstrip().split(',')
words[0] = buying[words[0]]
words[1] = maint[words[1]]
words[2] = doors[words[2]]
words[3] = persons[words[3]]
words[4] = lug_boot[words[4]]
words[5] = safety[words[5]]
words[6] = evaluation[words[6]]
feats.append(words[0:6])
labels.append(words[6])
feats = np.array(feats)
labels = np.array(labels)
# number of test vectors
num_test_vectors = 200;
test_indices = np.random.randint(feats.shape[0], size = num_test_vectors)
test_features = feats[test_indices]
test_labels = labels[test_indices]
# remove test vectors from training set
feats = np.delete(feats,test_indices,0)
labels = np.delete(labels,test_indices,0)
# shogun test features and labels
test_feats = sg.create_features(test_features.T)
test_labels = sg.create_labels(test_labels)
# method for id3 training and
def ID3_routine(feats, labels):
# Shogun train features and labels
train_feats = sg.create_features(feats.T)
train_lab = sg.create_labels(labels)
# create ID3ClassifierTree object
id3 = sg.create_machine("ID3ClassifierTree", labels=train_lab)
# learn the tree from training features
id3.train(train_feats)
# apply to test dataset
output = id3.apply(test_feats)
return output
output = ID3_routine(feats, labels)
# Shogun object for calculating multiclass accuracy
accuracy = sg.create_evaluation("MulticlassAccuracy")
print('Accuracy : ' + str(accuracy.evaluate(output, test_labels)))
# list of error rates for all training dataset sizes
error_rate = []
# number of error rate readings taken for each value of dataset size
num_repetitions = 3
# loop over training dataset size
for i in range(500,1600,200):
indices = np.random.randint(feats.shape[0], size = i)
train_features = feats[indices]
train_labels = labels[indices]
average_error = 0
for i in range(num_repetitions):
output = ID3_routine(train_features, train_labels)
average_error = average_error + (1-accuracy.evaluate(output, test_labels))
error_rate.append(average_error/num_repetitions)
# plot the error rates
from scipy.interpolate import interp1d
fig,axis = plt.subplots(1,1)
x = np.arange(500,1600,200)
f = interp1d(x, error_rate)
xnew = np.linspace(500,1500,100)
plt.plot(x,error_rate,'o',xnew,f(xnew),'-')
plt.xlim([400,1600])
plt.xlabel('training dataset size')
plt.ylabel('Classification Error')
plt.title('Decision Tree Performance')
plt.show()
def create_toy_classification_dataset(ncat,do_plot):
# create attribute values and labels for class 1
x = np.ones((1,ncat))
y = 1+np.random.rand(1,ncat)*4
lab = np.zeros(ncat)
# add attribute values and labels for class 2
x = np.concatenate((x,np.ones((1,ncat))),1)
y = np.concatenate((y,5+np.random.rand(1,ncat)*4),1)
lab = np.concatenate((lab,np.ones(ncat)))
# add attribute values and labels for class 3
x = np.concatenate((x,2*np.ones((1,ncat))),1)
y = np.concatenate((y,1+np.random.rand(1,ncat)*8),1)
lab = np.concatenate((lab,2*np.ones(ncat)))
# create test data
ntest = 20
x_t = np.concatenate((np.ones((1,int(3*ntest/4))),2*np.ones((1,int(ntest/4)))),1)
y_t = 1+np.random.rand(1,ntest)*8
if do_plot:
# plot training data
c = ['r','g','b']
for i in range(3):
plt.scatter(x[0,lab==i],y[0,lab==i],color=c[i],marker='x',s=50)
# plot test data
plt.scatter(x_t[0,:],y_t[0,:],color='k',s=10,alpha=0.8)
plt.xlabel('attribute X')
plt.ylabel('attribute Y')
plt.show()
# form training feature matrix
train_feats = sg.create_features(np.concatenate((x,y),0))
# from training labels
train_labels = sg.create_labels(lab)
# from test feature matrix
test_feats = sg.create_features(np.concatenate((x_t,y_t),0))
return (train_feats,train_labels,test_feats);
train_feats,train_labels,test_feats = create_toy_classification_dataset(20,True)
# steps in C4.5 Tree training bundled together in a python method
def train_tree(feats,types,labels):
# C4.5 Tree object
tree = sg.create_machine("C45ClassifierTree", labels=labels, m_nominal=types)
# supply training matrix and train
tree.train(feats)
return tree
# specify attribute types X is categorical hence True, Y is continuous hence False
feat_types = np.array([True,False])
# get back trained tree
C45Tree = train_tree(train_feats,feat_types,train_labels)
def classify_data(tree,data):
# get classification labels
output = tree.apply(data)
# get classification certainty
output_certainty=tree.get('m_certainty')
return output,output_certainty
out_labels,out_certainty = classify_data(C45Tree,test_feats)
# plot results
def plot_toy_classification_results(train_feats,train_labels,test_feats,test_labels):
train = train_feats.get('feature_matrix')
lab = train_labels.get("labels")
test = test_feats.get('feature_matrix')
out_labels = test_labels.get("labels")
c = ['r','g','b']
for i in range(out_labels.size):
plt.scatter(test[0,i],test[1,i],color=c[np.int32(out_labels[i])],s=50)
# plot training dataset for visual comparison
for i in range(3):
plt.scatter(train[0,lab==i],train[1,lab==i],color=c[i],marker='x',s=30,alpha=0.7)
plt.show()
plot_toy_classification_results(train_feats,train_labels,test_feats,out_labels)
import csv
# dictionary to encode class names to class labels
to_label = {'Iris-setosa' : 0.0, 'Iris-versicolor' : 1.0, 'Iris-virginica' : 2.0}
# read csv file and separate out labels and features
lab = []
feat = []
with open( os.path.join(SHOGUN_DATA_DIR, 'uci/iris/iris.data')) as csvfile:
csvread = csv.reader(csvfile,delimiter=',')
for row in csvread:
feat.append([float(i) for i in row[0:4]])
lab.append(to_label[row[4]])
lab = np.array(lab)
feat = np.array(feat).T
# no.of vectors in test dataset
ntest = 25
# no. of vectors in train dataset
ntrain = 150-ntest
# randomize the order of vectors
subset = np.int32(np.random.permutation(150))
# choose 1st ntrain from randomized set as training vectors
feats_train = feat[:,subset[0:ntrain]]
# form training labels correspondingly
train_labels = lab[subset[0:ntrain]]
# form test features and labels (for accuracy evaluations)
feats_test = feat[:,subset[ntrain:ntrain+ntest]]
test_labels = lab[subset[ntrain:ntrain+ntest]]
# plot training features
c = ['r', 'g', 'b']
for i in range(3):
plt.scatter(feats_train[2,train_labels==i],feats_train[3,train_labels==i],color=c[i],marker='x')
# plot test data points in black
plt.scatter(feats_test[2,:],feats_test[3,:],color='k',marker='o')
plt.show()
# training data
feats_train = sg.create_features(feats_train)
train_labels = sg.create_labels(train_labels)
# test data
feats_test = sg.create_features(feats_test)
test_labels = sg.create_labels(test_labels)
# randomize the order of vectors
subset = np.int32(np.random.permutation(ntrain))
nvalidation = 45
# form training subset and validation subset
train_subset = subset[0:ntrain-nvalidation]
validation_subset = subset[ntrain-nvalidation:ntrain]
# set attribute types - all continuous
feature_types = np.array([False, False, False, False])
# remove validation subset before training the tree
feats_train.add_subset(train_subset)
train_labels.add_subset(train_subset)
# train tree
C45Tree = train_tree(feats_train,feature_types,train_labels)
# bring back validation subset
feats_train.remove_subset()
train_labels.remove_subset()
# remove data belonging to training subset
feats_train.add_subset(validation_subset)
train_labels.add_subset(validation_subset)
# FIXME: expose C45ClassifierTree::prune_tree
# prune the tree
# C45Tree.prune_tree(feats_train,train_labels)
# bring back training subset
feats_train.remove_subset()
train_labels.remove_subset()
# get results
output, output_certainty = classify_data(C45Tree,feats_test)
# Shogun object for calculating multiclass accuracy
accuracy = sg.create_evaluation("MulticlassAccuracy")
print('Accuracy : ' + str(accuracy.evaluate(output, test_labels)))
# convert MulticlassLabels object to labels vector
output = output.get("labels")
test_labels = test_labels.get("labels")
train_labels = train_labels.get("labels")
# convert features object to matrix
feats_test = feats_test.get('feature_matrix')
feats_train = feats_train.get('feature_matrix')
# plot ground truth
for i in range(3):
plt.scatter(feats_test[2,test_labels==i],feats_test[3,test_labels==i],color=c[i],marker='x',s=100)
# plot predicted labels
for i in range(output.size):
plt.scatter(feats_test[2,i],feats_test[3,i],color=c[np.int32(output[i])],marker='o',s=30*output_certainty[i])
plt.show()
train_feats,train_labels,test_feats=create_toy_classification_dataset(20,True)
def train_carttree(feat_types,problem_type,num_folds,use_cv_pruning,labels,feats):
# create CART tree object
c = sg.create_machine("CARTree", nominal=feat_types,
mode=problem_type,
folds=num_folds,
apply_cv_pruning=use_cv_pruning,
labels=labels)
# train using training features
c.train(feats)
return c
# form feature types True for nominal (attribute X), False for ordinal/continuous (attribute Y)
ft = np.array([True, False])
# get back trained tree
cart = train_carttree(ft, "PT_MULTICLASS", 5, True, train_labels, train_feats)
# get output labels
output_labels = cart.apply(test_feats)
plot_toy_classification_results(train_feats,train_labels,test_feats,output_labels)
def create_toy_regression_dataset(nsamples,noise_var):
# randomly choose positions in X axis between 0 to 16
samples_x = np.random.rand(1,nsamples)*16
# find out y (=sin(x)) values for the sampled x positions and add noise to it
samples_y = np.sin(samples_x)+(np.random.rand(1,nsamples)-0.5)*noise_var
# plot the samples
plt.scatter(samples_x,samples_y,color='b',marker='x')
# create training features
train_feats = sg.create_features(samples_x)
# training labels
train_labels = sg.create_labels(samples_y[0,:])
return (train_feats,train_labels)
# plot the reference sinusoid
def plot_ref_sinusoid():
plot_x = np.linspace(-2,18,100)
plt.plot(plot_x,np.sin(plot_x),color='y',linewidth=1.5)
plt.xlabel('Feature values')
plt.ylabel('Labels')
plt.xlim([-3,19])
plt.ylim([-1.5,1.5])
# number of samples is 300, noise variance is 0.5
train_feats,train_labels = create_toy_regression_dataset(300,0.5)
plot_ref_sinusoid()
plt.show()
# feature type - continuous
feat_type = np.array([False])
# get back trained tree
cart = train_carttree(feat_type, "PT_REGRESSION", 5, True, train_labels, train_feats)
def plot_predicted_sinusoid(cart):
# regression range - 0 to 16
x_test = np.array([np.linspace(0,16,100)])
# form Shogun features
test_feats = sg.create_features(x_test)
# apply regression using our previously trained CART-tree
regression_output = cart.apply(test_feats).get("labels")
# plot the result
plt.plot(x_test[0,:],regression_output,linewidth=2.0)
# plot reference sinusoid
plot_ref_sinusoid()
plt.show()
plot_predicted_sinusoid(cart)
# dictionary to encode class names to class labels
to_label = {'Iris-setosa' : 0.0, 'Iris-versicolor' : 1.0, 'Iris-virginica' : 2.0}
# read csv file and separate out labels and features
lab = []
feat = []
with open( os.path.join(SHOGUN_DATA_DIR, 'uci/iris/iris.data')) as csvfile:
csvread = csv.reader(csvfile,delimiter=',')
for row in csvread:
feat.append([float(i) for i in row[0:4]])
lab.append(to_label[row[4]])
lab = np.array(lab)
feat = np.array(feat).T
# plot the dataset using two highly correlated attributes
c = ['r', 'g', 'b']
for i in range(3):
plt.scatter(feat[2,lab==i],feat[3,lab==i],color=c[i],marker='x')
plt.show()
# set attribute types - all continuous
feature_types = np.array([False, False, False, False])
# setup CART-tree with cross validation pruning switched off
cart = sg.create_machine("CARTree", nominal=feature_types,
mode="PT_MULTICLASS",
folds=5,
apply_cv_pruning=False)
# training features
feats_train = sg.create_features(feat)
# training labels
labels_train = sg.create_labels(lab)
# set evaluation criteria - multiclass accuracy
accuracy = sg.create_evaluation("MulticlassAccuracy")
# set splitting criteria - 10 fold cross-validation
split = sg.create_splitting_strategy("CrossValidationSplitting", labels=labels_train, num_subsets=10)
# set cross-validation parameters
cross_val = sg.create_machine_evaluation("CrossValidation",
machine=cart,
features=feats_train,
labels=labels_train,
splitting_strategy=split,
evaluation_criterion=accuracy,
num_runs=10)
# get cross validation result
# CARTree is not x-validatable
result = cross_val.evaluate()
print(result)
print('Mean Accuracy : ' + str(result.get("mean")))
# dictionary to convert string features to integer values
to_int = {'A' : 1, 'B' : 2, 'C' : 3, 'D' : 4, 'E' : 5}
# read csv file and separate out labels and features
lab = []
feat = []
with open( os.path.join(SHOGUN_DATA_DIR, 'uci/servo/servo.data')) as csvfile:
csvread = csv.reader(csvfile,delimiter=',')
for row in csvread:
feat.append([to_int[row[0]], to_int[row[1]], float(row[2]), float(row[3])])
lab.append(float(row[4]))
lab = np.array(lab)
feat = np.array(feat).T
# form training features
feats_train = sg.create_features(feat)
# form training labels
labels_train = sg.create_labels(lab)
def get_cv_error(max_depth):
# set attribute types - 2 nominal and 2 ordinal
feature_types = np.array([True, True, False, False])
# setup CART-tree with cross validation pruning switched off
cart = sg.create_machine("CARTree", nominal=feature_types,
mode="PT_REGRESSION",
folds=5,
apply_cv_pruning=False,
max_depth=max_depth)
# set evaluation criteria - mean squared error
accuracy = sg.create_evaluation("MeanSquaredError")
# set splitting criteria - 10 fold cross-validation
split = sg.create_splitting_strategy("CrossValidationSplitting", labels=labels_train, num_subsets=10)
# set cross-validation parameters
cross_val = sg.create_machine_evaluation("CrossValidation",
machine=cart,
features=feats_train,
labels=labels_train,
splitting_strategy=split,
evaluation_criterion=accuracy,
num_runs=10)
# return cross validation result
return cross_val.evaluate().get("mean")
import matplotlib.pyplot as plt
cv_errors = [get_cv_error(i) for i in range(1,15)]
plt.plot(range(1,15),cv_errors,'bo',range(1,15),cv_errors,'k')
plt.xlabel('max_allowed_depth')
plt.ylabel('cross-validated error')
plt.ylim(0,1.2)
plt.show()
train_feats,train_labels,test_feats = create_toy_classification_dataset(20,True)
def train_chaidtree(dependent_var_type,feature_types,num_bins,feats,labels):
# create CHAID tree object
c = sg.create_machine("CHAIDTree", dependent_vartype=dependent_var_type,
feature_types=feature_types,
num_breakpoints=num_bins,
labels=labels)
# train using training features
c.train(feats)
return c
# form feature types 0 for nominal (attribute X), 2 for continuous (attribute Y)
ft = np.array([0, 2],dtype=np.int32)
# cache training matrix
train_feats_cache=sg.create_features(train_feats.get("feature_matrix"))
# get back trained tree - dependent variable type is nominal (hence 0), number of bins for binning is 10
chaid = train_chaidtree(0,ft,10,train_feats,train_labels)
print('updated_matrix')
print(train_feats.get('feature_matrix'))
print('')
print('original_matrix')
print(train_feats_cache.get('feature_matrix'))
# get output labels
output_labels = chaid.apply(test_feats)
plot_toy_classification_results(train_feats_cache,train_labels,test_feats,output_labels)
train_feats,train_labels = create_toy_regression_dataset(300,0.5)
plot_ref_sinusoid()
plt.show()
# feature type - continuous
feat_type = np.array([2],dtype=np.int32)
# get back trained tree
chaid = train_chaidtree(2,feat_type, 50, train_feats, train_labels)
plot_predicted_sinusoid(chaid)
train_feats = sg.create_features(sg.read_csv( os.path.join(SHOGUN_DATA_DIR, 'uci/wine/fm_wine.dat')))
train_labels = sg.create_labels(sg.read_csv( os.path.join(SHOGUN_DATA_DIR, 'uci/wine/label_wine.dat')))
# set attribute types - all attributes are continuous(2)
feature_types = np.array([2 for i in range(13)],dtype=np.int32)
# setup CHAID tree - dependent variable is nominal(0), feature types set, number of bins(20)
chaid = sg.create_machine("CHAIDTree", dependent_vartype=0,
feature_types=feature_types,
num_breakpoints=20)
# set evaluation criteria - multiclass accuracy
accuracy = sg.create_evaluation("MulticlassAccuracy")
# set splitting criteria - 10 fold cross-validation
split = sg.create_splitting_strategy("CrossValidationSplitting", labels=train_labels, num_subsets=10)
# set cross-validation parameters
cross_val = sg.create_machine_evaluation("CrossValidation",
machine=chaid,
features=train_feats,
labels=train_labels,
splitting_strategy=split,
evaluation_criterion=accuracy,
num_runs=10)
# CHAIDTree is not x-validatable
print(f"Mean classification accuracy : {cross_val.evaluate().get('mean')*100} %")
train_feats=sg.create_features(sg.read_csv( os.path.join(SHOGUN_DATA_DIR, 'uci/housing/fm_housing.dat')))
train_labels=sg.create_labels(sg.read_csv( os.path.join(SHOGUN_DATA_DIR, 'uci/housing/housing_label.dat')))
# print range of regression labels - this is useful for calculating relative deviation later
print('labels range : '+str(np.ptp(train_labels.get("labels"))))
def get_cv_error(max_depth):
# set feature types - all continuous(2) except 4th column which is nominal(0)
feature_types = np.array([2]*13,dtype=np.int32)
feature_types[3]=0
feature_types[8]=1
feature_types[9]=1
# setup CHAID-tree
chaid = sg.create_machine("CHAIDTree", dependent_vartype=2,
feature_types=feature_types,
num_breakpoints=10,
max_tree_depth=10)
# set evaluation criteria - mean squared error
accuracy = sg.create_evaluation("MeanSquaredError")
# set splitting criteria - 5 fold cross-validation
split = sg.create_splitting_strategy("CrossValidationSplitting",
labels=train_labels,
num_subsets=5)
# set cross-validation parameters
cross_val = sg.create_machine_evaluation("CrossValidation",
machine=chaid,
features=train_feats,
labels=train_labels,
splitting_strategy=split,
evaluation_criterion=accuracy,
num_runs=3)
# return cross validation result
return cross_val.evaluate().get("mean")
cv_errors = [get_cv_error(i) for i in range(1,10)]
plt.plot(range(1,10),cv_errors,'bo',range(1,10),cv_errors,'k')
plt.xlabel('max_allowed_depth')
plt.ylabel('cross-validated error')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We want to create a decision tree from the above training dataset. The first step for that is to encode the data into numeric values and bind them to Shogun's features and multiclass labels.
Step2: Next, we learn our decision tree using the features and labels created.
Step3: Our decision tree is ready now and we want to use it to make some predictions over test data. So, let us create some test data examples first.
Step4: Next, as with training data, we encode our test dataset and bind it to Shogun features. Then, we apply our decision tree to the test examples to obtain the predicted labels.
Step5: Finally let us tabulate the results obtained and compare them with our intuitive predictions.
Step6: So, do the predictions made by our decision tree match our inferences from training set? Yes! For example, from the training set we infer that the individual having low income has low usage and also all individuals going to college have medium usage. The decision tree predicts the same for both cases.
Step7: Next, let us read the file and form Shogun features and labels.
Step8: From the entire dataset, let us choose some test vectors to form our test dataset.
Step9: Next step is to train our decision tree using the training features and applying it to our test dataset to get predicted output classes.
Step10: Finally, let us compare our predicted labels with test labels to find out the percentage error of our classification model.
Step11: We see that the accuracy is moderately high. Thus our decision tree can evaluate any car given its features with a high success rate. As a final exercise, let us examine the effect of training dataset size on the accuracy of decision tree.
Step12: NOTE
Step13: In the above plot the training data points are are marked with different colours of crosses where each colour corresponds to a particular label. The test data points are marked by black circles. For us it is a trivial task to assign correct colours (i.e. labels) to the black points. Let us see how accurately C4.5 assigns colours to these test points.
Step14: Now that we have trained the decision tree, we can use it to classify our test vectors.
Step15: Let us use the output labels to colour our test data points to qualitatively judge the performance of the decision tree.
Step16: We see that the decision tree trained using the C4.5 algorithm works almost perfectly in this toy dataset. Now let us try this algorithm on a real world dataset.
Step17: Because there is no separate test dataset, we first divide the given dataset into training and testing subsets.
Step18: Before marching forward with applying C4.5, let us plot the data to get a better understanding. The given data points are 4-D and hence cannot be conveniently plotted. We need to reduce the number of dimensions to 2. This reduction can be achieved using any dimension reduction algorithm like PCA. However for the sake of brevity, let us just choose two highly correlated dimensions, petal width and petal length (see summary statistics), right away for plotting.
Step19: First, let us create Shogun features and labels from the given data.
Step20: We know for fact that decision trees tend to overfit. Hence pruning becomes a necessary step. In case of toy dataset, we skipped the pruning step because the dataset was simple and noise-free. But in case of a real dataset like the Iris dataset pruning cannot be skipped. So we have to partition the training dataset into the training subset and the validation subset.
Step21: Now we train the decision tree first, then prune it and finally use it to get output labels for test vectors.
Step22: Let us calculate the accuracy of the classification made by our tree as well as plot the results for qualitative evaluation.
Step23: From the evaluation of results, we infer that, with the help of a C4.5 trained decision tree, we can predict (with high accuracy) the type of Iris plant given its petal and sepal widths and lengths.
Step24: Next, we supply necessary parameters to the CART algorithm and use it train our decision tree.
Step25: In the above code snippet, we see four parameters being supplied to the CART tree object. feat_types supplies knowledge of attribute types of training data to the CART algorithm and problem_type specifies whether it is a multiclass classification problem (PT_MULTICLASS) or a regression problem (PT_REGRESSION). The boolean parameter use_cv_pruning switches on cross-validation pruning of the trained tree and num_folds specifies the number of folds of cross-validation to be applied while pruning. At this point, let us divert ourselves briefly towards undertanding what kind of pruning strategy is employed by Shogun's CART implementation. The CART algorithm uses the cost-complexity pruning strategy. Cost-Complexity pruning yields a list of subtrees of varying depths using complexity normalized resubstitution error, $R_\alpha(T)$. Resubstitution error, R(T), measures how well a decision tree fits the training data. But, this measure favours larger trees over smaller ones. Hence the complexity normalized resubstitution error metric is used which adds penalty for increased complexity and in-turn counters overfitting.
Step26: Regression example using toy data
Step27: Next, we train our CART-tree.
Step28: Now let us use the trained decision tree to regress over the entire range of the previously depicted sinusoid.
Step29: As we can see from the above plot, CART-induced decision tree follows the reference sinusoid quite beautifully!
Step30: Next, we setup the model which is CART-tree in this case.
Step31: Finally we can use Shogun's cross-validation class to get performance.
Step32: We get a mean accuracy of about 0.93-0.94. This number essentially means that a CART-tree trained using this dataset is expected to classify Iris flowers, given their required attributes, with an accuracy of 93-94% in a real world scenario. The parameters required by Shogun's cross-validation class should be noted in the above code snippet. The class requires the model, training features, training labels, splitting strategy and evaluation method to be specified.
Step33: The servo dataset is a small training dataset (contains just 167 training vectors) with no separate test dataset, like the Iris dataset. Hence we will apply the same cross-validation strategy we applied in case of the Iris dataset. However, to make things interesting let us play around with a yet-untouched parameter of CART-induced tree i.e. the maximum allowed tree depth. As the tree depth increases, the tree becomes more complex and hence fits the training data more closely. By setting a maximum allowed tree depth, we restrict the complexity of trained tree and hence avoid over-fitting. But choosing a low value of the maximum allowed tree depth may lead to early stopping i.e. under-fitting. Let us explore how we can decide the appropriate value of the max-allowed-tree-depth parameter. Let us create a method, which takes max-allowed-tree-depth parameter as input and returns the corresponding cross-validated error as output.
Step34: Next, let us supply a range of max_depth values to the above method and plot the returned cross-validated errors.
Step35: From the above plot quite clearly gives us the most appropriate value of maximum allowed depth. We see that the first minima occurs at a maximum allowed depth of 6-8. Hence, one of these should be the desired value. It is to be noted that the error metric that we are discussing here is the mean squared error. Thus, from the above plot, we can also claim that, given required parameters, our CART-flavoured decision tree can predict the rise time within an average error range of $\pm0.5$ (i.e. square root of 0.25 which is the approximate minimum cross-validated error). The relative error i.e average_error/range_of_labels comes out to be ~30%.
Step36: Now, we set up our CHAID-tree with appropriate parameters and train over given data.
Step37: An important point to be noted in the above code snippet is that CHAID training modifies the training data. The actual continuous feature values are replaced by the discrete ordinal values obtained during continuous to ordinal conversion. Notice the difference between the original feature matrix and the updated matrix. The updated matrix contains only 10 distinct values denoting all values of the original matrix for feature dimension at row index 1.
Step38: Regression example with toy dataset
Step39: As usual, we start by setting up our decision tree and training it.
Step40: Next, we use the trained decision tree to follow the reference sinusoid.
Step41: A distinguishing feature about the predicted curve is the presence of steps. These steps are essentially an artifact of continuous to ordinal conversion. If we decrease the number of bins for the conversion the step widths will increase.
Step42: Like the case of CART, here we are also interested in finding out the approximate accuracy with which our CHAID tree trained on this dataset will perform in real world. Hence, we will apply the cross validation strategy. But first we specify the parameters of the CHAID tree.
Step43: Next we set up the cross-validation class and get back the error estimate we want i.e mean classification error.
Step44: Regression example using real dataset
Step45: Next, we set up the parameters for the CHAID tree as well as the cross-validation class.
|
996 | <ASSISTANT_TASK:>
Python Code:
import logging # python logging module
# basic format for logging
logFormat = "%(asctime)s - [%(levelname)s] (%(funcName)s:%(lineno)d) %(message)s"
# logs will be stored in tweepy.log
logging.basicConfig(filename='tweepy.log', level=logging.INFO,
format=logFormat, datefmt="%Y-%m-%d %H:%M:%S")
import tweepy # importing all the modules required
import socket # will be used to create sockets
import json # manipulate json
# Keep these tokens secret, as anyone can have full access to your
# twitter account, using these tokens
consumerKey = "#"
consumerSecret = "#"
accessToken = "#"
accessTokenSecret = "#"
# Performing the authentication and authorization, post this step
# we will have full access to twitter api's
def connectToTwitter():
Connect to twitter.
try:
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
logging.info("Successfully logged in to twitter.")
return api, auth
except Exception as e:
logging.info("Something went wrong in oauth, please check your tokens.")
logging.error(e)
# Tweet listner class which subclasses from tweepy.StreamListener
class TweetListner(tweepy.StreamListener):
Twitter stream listner
def __init__(self, csocket):
self.clientSocket = csocket
def dataProcessing(self, data):
Process the data, before sending to spark streaming
sendData = {} # data that is sent to spark streamer
user = data.get("user", {})
name = user.get("name", "undefined").encode('utf-8')
followersCount = user.get("followers_count", 0)
sendData["name"] = name
sendData["followersCount"] = followersCount
#data_string = "{}:{}".format(name, followersCount)
self.clientSocket.send(json.dumps(sendData) + u"\n") # append new line character, so that spark recognizes it
logging.debug(json.dumps(sendData))
def on_data(self, raw_data):
Called when raw data is received from connection.
return False to stop stream and close connection.
try:
data = json.loads(raw_data)
self.dataProcessing(data)
return True
except Exception as e:
logging.error("An unhandled exception has occured, check your data processing")
logging.error(e)
return False
def on_error(self, status_code):
Called when a non-200 status code is returned
logging.error("A non-200 status code is returned")
return False
def on_exception(self, exception):
Called when an unhandled exception occurs.
logging.error("An unhandled exception has occured")
return
# Creating a proxy socket
def createProxySocket(host, port):
Returns a socket which can be used to connect
to spark.
try:
s = socket.socket() # initialize socket instance
s.bind((host, port)) # bind to the given host and port
s.listen(5) # Enable a server to accept connections.
logging.info("Listening on the port {}".format(port))
cSocket, address = s.accept() # waiting for a connection
logging.info("Received Request from: {}".format(address))
return cSocket
except socket.error as e:
if e.errno == socket.errno.EADDRINUSE: # Address in use
logging.error("The given host:port {}:{} is already in use"\
.format(host, port))
logging.info("Trying on port: {}".format(port + 1))
return createProxySocket(host, port + 1)
def getWOEIDForTrendsAvailable(api, place):
Returns the WOEID of the country if the trend is available there.
# Iterate through trends
data = api.trends_available()
for item in data:
if item["name"] == place: # Use place = "Worldwide" to get woeid of world
woeid = item["woeid"]
break
return woeid #name = India, woeid
# Get the list of trending topics from twitter
def getTrendingTopics(api, woeid):
Get the top trending topics from twitter
data = api.trends_place(woeid)
listOfTrendingTopic = [trend["name"] for trend in data[0]["trends"]]
return listOfTrendingTopic
if __name__ == "__main__":
try:
api, auth = connectToTwitter() # connecting to twitter
# Global information is available by using 1 as the WOEID
# woeid = getWOEIDForTrendsAvailable(api, "Worldwide") # get the woeid of the worldwide
woeid = 1
trendingTopics = getTrendingTopics(api, woeid)
host = "localhost"
port = 8888
cSocket = createProxySocket(host, port) # Creating a socket
tweetStream = tweepy.Stream(auth, TweetListner(cSocket)) # Stream the twitter data
tweetStream.filter(track=trendingTopics) # Filter on trending topics
except KeyboardInterrupt: # Keyboard interrupt called
logging.error("KeyboardInterrupt was hit")
except Exception as e:
logging.error("Unhandled exception has occured")
logging.error(e)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Authentication and Authorisation
Step3: Post this step, we will have full access to twitter api's
Step10: Streaming with tweepy
Step13: Drawbacks of twitter streaming API
|
997 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
# The inital set of baby names and bith rates
names = ['Bob','Jessica','Mary','John','Mel']
births = [968, 155, 77, 578, 973]
BabyDataSet = zip(names,births)
BabyDataSet
df = pd.DataFrame(data = BabyDataSet, columns=['Names', 'Births'])
df
df.to_csv('births1880.csv',index=False,header=False)
df = pd.read_csv("births1880.csv")
df
df = pd.read_csv("births1880.csv", header=None)
df
df = pd.read_csv("births1880.csv", names=['Names','Births'])
df
df = pd.read_csv("https://raw.githubusercontent.com/bigmlcom/bigmler/master/data/iris.csv")
df.head(10)
# show basic statistics
df.describe()
# Select a column
df["sepal length"].head()
# select columns
df[["sepal length", "petal width"]].head()
# select rows by name
df.loc[5:10]
# select rows by position
df.iloc[5:10]
# select rows by condition
df[df["sepal length"] > 5.0]
df["sepal length"].max()
df.sort("sepal length", ascending=False).head(1)
df.sort("petal length", ascending=True)
df.iloc[1]
import matplotlib.pyplot as plt
%matplotlib inline
df.hist()
import seaborn as sns
# make the plots to show right below the codes
% matplotlib inline
import numpy as np
x = np.random.normal(size=100)
sns.distplot(x)
import random
x = [random.normalvariate (0, 1) for i in range (0, 1000)]
sns.distplot (x)
mean, cov = [0, 1], [(1, .5), (.5, 1)]
data = np.random.multivariate_normal(mean, cov, 200)
df = pd.DataFrame(data, columns=["x", "y"])
sns.jointplot(x="x", y="y", data=df)
sns.jointplot(x="x", y="y", data=df, kind="hex")
sns.jointplot(x="x", y="y", data=df, kind="kde")
iris = sns.load_dataset("iris")
sns.pairplot(iris)
# we can add colors to different species
sns.pairplot(iris, hue="species")
tips = sns.load_dataset("tips")
tips.head()
sns.regplot(x="total_bill", y="tip", data=tips)
anscombe = sns.load_dataset("anscombe")
sns.regplot(x="x", y="y", data=anscombe[anscombe["dataset"] == "II"])
sns.regplot(x="x", y="y", data=anscombe[anscombe["dataset"] == "II"], order=2)
sns.stripplot(x="day", y="total_bill", data=tips)
sns.boxplot(x="day", y="total_bill", hue="time", data=tips)
titanic = sns.load_dataset("titanic")
sns.barplot(x="sex", y="survived", hue="class", data=titanic)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Data
Step2: To merge these two lists together we will use the zip function.
Step3: We are basically done creating the data set. We now will use the pandas library to export this data set into a csv file.
Step4: Export the dataframe to a csv file. We can name the file births1880.csv. The function to_csv will be used to export the file. The file will be saved in the same location of the notebook unless specified otherwise.
Step5: Get Data
Step6: This brings us the our first problem of the exercise. The read_csv function treated the first record in the csv file as the header names. This is obviously not correct since the text file did not provide us with header names.
Step7: If we wanted to give the columns specific names, we would have to pass another paramter called names. We can also omit the header parameter.
Step8: It is also possible to read in a csv file by passing an url address
Step9: Analyze Data
Step10: We can get the maximum sepal length by
Step11: If we want to find full information of the flower with maximum sepal length
Step12: Exercise
Step13: Pandas also has some basic plotting functions.
Step14: Part 2
Step15: Plotting univariate distributions
Step16: Plotting bivariate distributions
Step17: Scatter plot
Step18: Hexbin plot
Step19: Kernel density estimation
Step20: Visualizing pairwise relationships in a dataset
Step21: Visualizing linear relationships
Step22: We can use the function regplot to show the linear relationship between total_bill and tip.
Step23: Visualizing higher order relationships
Step24: The plot clearly shows that this is not a good model.
Step25: Strip mplots
Step26: Boxplots
Step27: Bar plots
|
998 | <ASSISTANT_TASK:>
Python Code:
import mbuild as mb
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
lj_particle1 = mb.Particle(name='LJ', pos=[0, 0, 0])
self.add(lj_particle1)
lj_particle2 = mb.Particle(name='LJ', pos=[1, 0, 0])
self.add(lj_particle2)
lj_particle3 = mb.Particle(name='LJ', pos=[0, 1, 0])
self.add(lj_particle3)
lj_particle4 = mb.Particle(name='LJ', pos=[0, 0, 1])
self.add(lj_particle4)
lj_particle5 = mb.Particle(name='LJ', pos=[1, 0, 1])
self.add(lj_particle5)
lj_particle6 = mb.Particle(name='LJ', pos=[1, 1, 0])
self.add(lj_particle6)
lj_particle7 = mb.Particle(name='LJ', pos=[0, 1, 1])
self.add(lj_particle7)
lj_particle8 = mb.Particle(name='LJ', pos=[1, 1, 1])
self.add(lj_particle8)
monoLJ = MonoLJ()
colors = {'LJ': {'color': 0xbfbfbf, 'radius': 5}}
monoLJ.visualize(element_properties=colors)
import mbuild as mb
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
for i in range(0,2):
for j in range(0,2):
for k in range(0,2):
lj_particle = mb.clone(lj_proto)
pos = [i,j,k]
mb.translate(lj_particle, pos)
self.add(lj_particle)
monoLJ = MonoLJ()
colors = {'LJ': {'color': 0xbfbfbf, 'radius': 5}}
monoLJ.visualize(element_properties=colors)
import mbuild as mb
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern = mb.Grid3DPattern(2, 2, 2)
pattern.scale(2)
for pos in pattern:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
monoLJ = MonoLJ()
colors = {'LJ': {'color': 0xbfbfbf, 'radius': 5}}
monoLJ.visualize(element_properties=colors)
import mbuild as mb
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern = mb.Grid2DPattern(5, 5)
pattern.scale(5)
for pos in pattern:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
monoLJ = MonoLJ()
colors = {'LJ': {'color': 0xbfbfbf, 'radius': 5}}
monoLJ.visualize(element_properties=colors)
import mbuild as mb
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern_sphere = mb.SpherePattern(200)
pattern_sphere.scale(0.5)
for pos in pattern_sphere:
lj_particle = mb.clone(lj_proto)
pos[0]-=1.0
mb.translate(lj_particle, pos)
self.add(lj_particle)
pattern_disk = mb.DiskPattern(200)
pattern_disk.scale(0.5)
for pos in pattern_disk:
lj_particle = mb.clone(lj_proto)
pos[0]+=1.0
mb.translate(lj_particle, pos)
self.add(lj_particle)
monoLJ = MonoLJ()
colors = {'LJ': {'color': 0xbfbfbf, 'radius': 1.0}}
monoLJ.visualize(element_properties=colors)
import mbuild as mb
class SphereLJ(mb.Compound):
def __init__(self):
super(SphereLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern_sphere = mb.SpherePattern(200)
pattern_sphere.scale(0.5)
for pos in pattern_sphere:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
class DiskLJ(mb.Compound):
def __init__(self):
super(DiskLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern_disk = mb.DiskPattern(200)
pattern_disk.scale(0.5)
for pos in pattern_disk:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
sphere = SphereLJ();
pos=[-1, 0, 0]
mb.translate(sphere, pos)
self.add(sphere)
disk = DiskLJ();
pos=[1, 0, 0]
mb.translate(disk, pos)
self.add(disk)
monoLJ = MonoLJ()
colors = {'LJ': {'color': 0xbfbfbf, 'radius': 1.0}}
monoLJ.visualize(element_properties=colors)
import mbuild as mb
class SphereLJ(mb.Compound):
def __init__(self):
super(SphereLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern_sphere = mb.SpherePattern(13)
pattern_sphere.scale(0.5)
for pos in pattern_sphere:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
sphere = SphereLJ();
pattern = mb.Grid3DPattern(3, 3, 3)
pattern.scale(10)
for pos in pattern:
lj_sphere = mb.clone(sphere)
mb.translate_to(lj_sphere, pos)
#shift the particle so the center of mass
#of the system is at the origin
mb.translate(lj_sphere, [-5,-5,-5])
self.add(lj_sphere)
monoLJ = MonoLJ()
colors = {'LJ': {'color': 0xbfbfbf, 'radius': 5}}
monoLJ.visualize(element_properties=colors)
import mbuild as mb
import random
from numpy import pi
class CubeLJ(mb.Compound):
def __init__(self):
super(CubeLJ, self).__init__()
lj_proto = mb.Particle(name='LJ', pos=[0, 0, 0])
pattern = mb.Grid3DPattern(2, 2, 2)
pattern.scale(1)
for pos in pattern:
lj_particle = mb.clone(lj_proto)
mb.translate(lj_particle, pos)
self.add(lj_particle)
class MonoLJ(mb.Compound):
def __init__(self):
super(MonoLJ, self).__init__()
cube_proto = CubeLJ();
pattern = mb.Grid3DPattern(3, 3, 3)
pattern.scale(10)
rnd = random.Random()
rnd.seed(123)
for pos in pattern:
lj_cube = mb.clone(cube_proto)
mb.translate_to(lj_cube, pos)
#shift the particle so the center of mass
#of the system is at the origin
mb.translate(lj_cube, [-5,-5,-5])
mb.spin_x(lj_cube, rnd.uniform(0, 2 * pi))
mb.spin_y(lj_cube, rnd.uniform(0, 2 * pi))
mb.spin_z(lj_cube, rnd.uniform(0, 2 * pi))
self.add(lj_cube)
monoLJ = MonoLJ()
colors = {'LJ': {'color': 0xbfbfbb, 'radius': 5}}
monoLJ.visualize(element_properties=colors)
#save as xyz file
monoLJ.save('output.xyz')
#save as mol2
monoLJ.save('output.mol2')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: While this would work for defining a single molecule or very small system, this would not be efficient for large systems. Instead, the clone and translate operator can be used to facilitate automation. Below, we simply define a single prototype particle (lj_proto), which we then copy and translate about the system.
Step2: To simplify this process, mBuild provides several build-in patterning tools, where for example, Grid3DPattern can be used to perform this same operation. Grid3DPattern generates a set of points, from 0 to 1, which get stored in the variable "pattern". We need only loop over the points in pattern, cloning, translating, and adding to the system. Note, because Grid3DPattern defines points between 0 and 1, they must be scaled based on the desired system size, i.e., pattern.scale(2).
Step3: Larger systems can therefore be easily generated by toggling the values given to Grid3DPattern. Other patterns can also be generated using the same basic code, such as a 2D grid pattern
Step4: Points on a sphere can be generated using SpherePattern. Points on a disk using DisKPattern, etc.
Step5: We can also take advantage of the hierachical nature of mBuild to accomplish the same task more cleanly. Below we create a component that corresponds to the sphere (class SphereLJ), and one that corresponds to the disk (class DiskLJ), and then instantiate and shift each of these individually in the MonoLJ component.
Step6: Again, since mBuild is hierarchical, the pattern functions can be used to generate large systems of any arbitary component. For example, we can replicate the SphereLJ component on a regular array.
Step7: Several functions exist for rotating compounds. For example, the spin command allows a compound to be rotated, in place, about a specific axis (i.e., it considers the origin for the rotation to lie at the compound's center of mass).
Step8: Configurations can be dumped to file using the save command; this takes advantage of MDTraj and supports a range of file formats (see http
|
999 | <ASSISTANT_TASK:>
Python Code:
# %load sflow.py
# [SublimeLinter pep8-max-line-length:300]
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
#
# MAIN
#
# -------------------------------------------------------------------------
if __name__ == '__main__':
from src.environment import Environment
from src.runner import Runner
# We have to pass in the path of the environment xml as args[0]
# the name of the environment xml as args[1] here:
# and the scenario name as args[2] here:
args = ["configs/environment/", "environment_config", "benchmark"]
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
scenario = str(args[2])
environment = Environment(environment_directory, identifier)
runner = Runner(environment, scenario)
#
# UPDATE STEP
#
for i in range(int(environment.static_parameters['num_simulations'])):
environment.initialize(environment_directory, identifier)
runner.initialize(environment, scenario)
# do the run
runner.do_run(environment, scenario)
# %load configs/agents/hf/hf.xml
<agent identifier="HF">
<parameter type="stock_variables" name="GB" value="40"></parameter>
<parameter type="stock_variables" name="CB" value="20"></parameter>
<parameter type="stock_variables" name="Cash" value="10"></parameter>
<parameter type="stock_variables" name="does_repo" value="yes"></parameter>
</agent>
<!-- <parameter type="parameters" name="leverage" value="13.0780116888061"></parameter> -->
# %load configs/environment/environment_config.xml
<environment identifier='toy_test'>
<!-- simulation parameters -->
<parameter type='static' name='num_sweeps' value='10'></parameter>
<parameter type='static' name='num_simulations' value='1'></parameter>
<parameter type='static' name='cbank_directory' value='configs/agents/cbank/'></parameter>
<parameter type='static' name='dealer_directory' value='configs/agents/dealer/'></parameter>
<parameter type='static' name='hf_directory' value='configs/agents/hf/'></parameter>
<parameter type='static' name='pf_directory' value='configs/agents/pf/'></parameter>
<parameter type='static' name='ic_directory' value='configs/agents/ic/'></parameter>
<parameter type='static' name='mmf_directory' value='configs/agents/mmf/'></parameter>
<parameter type='static' name='if_directory' value='configs/agents/if/'></parameter>
<parameter type='exogenous' name='price_GB' value="1.0"></parameter>
<parameter type='exogenous' name='price_CB' value="1.0"></parameter>
<parameter type='exogenous' name='haircut' value="0.05"></parameter>
<parameter type='exogenous' name='interest_GB' value="0.02"></parameter>
<parameter type='exogenous' name='interest_CB' value="0.04"></parameter>
<parameter type='exogenous' name='interest_repo' value="0.02"></parameter>
<parameter type='exogenous' name='interest_loans' value="0.02"></parameter>
<parameter type='exogenous' name='interest_deposits' value="0.02"></parameter>
<parameter type='exogenous' name='GB_shock' value="-0.1"></parameter>
<parameter type='exogenous' name='CB_shock' value="-0.1"></parameter>
<parameter type='exogenous' name='Redemption' value="-0.1"></parameter>
</environment>
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# class Updater
# -------------------------------------------------------------------------
import numpy as np
class Updater():
#
#
# METHODS
#
def get_identifier(self):
return self.identifier
# -------------------------------------------------------------------------
# __init__
# -------------------------------------------------------------------------
def __init__(self, environment, runner):
self.environment = environment
self.runner = runner
self.results_df = 0
self.prices = np.array([])
self.rates={}
self.system_equity = 0
self.system_assets = 0
self.delta_pGB = 0
self.delta_pGB = 0
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# do_update
# -------------------------------------------------------------------------
def do_update_benchmark(self, environment, current_step, scenario):
import pandas as pd
if current_step < 1:
self.add_rates(environment)
self.initialize_prices(environment, current_step)
print "***In t=:", current_step , " This is the price matrix***\n", self.prices, "\n",
self.initialize_assets_all_agents(current_step, environment)
self.profit_all_agents(environment, current_step)
else:
self.update_all_agents_balance_sheets(environment, current_step, scenario)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the beginning we need to instantiate two objects in the main file to run the program -
Step2: Those type " " names later become dictionaries for the instantiated agent object. So the hedge fund has
Step3: Here you can change the initital interest rates and prices for the marketable assets and also the shocks. It's possible to assign them to other variables and change them later in the updater. It's also possible to not read them in at all (just delete the lines) and define those in the updater script, but in the current setup they get read into the dictionary in the environment, environment.exogenous variables = {price_GB
|