Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
---|---|---|
0 | <ASSISTANT_TASK:>
Python Code:
short_freq = (hdulist[0].header['STEPSIZE'] / (60. * 60. * 24.))
med_freq = (10. / (60. * 24.))
long_freq = 1. / 24.
max_freq = 10. / 24.
from scipy.signal import lombscargle
short_periods = np.linspace(short_freq, med_freq, len(times))
med_periods = np.linspace(med_freq, long_freq, len(times))
long_periods = np.linspace(long_freq, max_freq, len(times))
short_ang_freqs = 2 * np.pi / short_periods
med_ang_freqs = 2 * np.pi / med_periods
long_ang_freqs = 2 * np.pi / long_periods
short_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), short_ang_freqs)
med_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), med_ang_freqs)
long_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), long_ang_freqs)
short_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
med_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
long_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
fig = plt.figure()
ax1 = fig.add_subplot(411)
ax2 = fig.add_subplot(412)
ax3 = fig.add_subplot(413)
ax4 = fig.add_subplot(414)
ax1.minorticks_on()
ax1.plot(times, counts, 'b+')
ax2.plot(short_periods, short_power)
ax3.plot(med_periods, med_power)
ax4.plot(long_periods, long_power)
ax2.set(xlim=(short_freq, med_freq))
ax3.set(xlim=(med_freq, long_freq))
ax4.set(xlim=(long_freq, max_freq))
fig.tight_layout()
from astroML.time_series import lomb_scargle
errors = [0.0001 for item in counts]
short_power = lomb_scargle(times, counts, errors, short_ang_freqs)
med_power = lomb_scargle(times, counts, errors, med_ang_freqs)
long_power = lomb_scargle(times, counts, errors, long_ang_freqs)
fig = plt.figure()
ax1 = fig.add_subplot(411)
ax2 = fig.add_subplot(412)
ax3 = fig.add_subplot(413)
ax4 = fig.add_subplot(414)
ax1.minorticks_on()
ax1.plot(times, counts, 'b+')
ax2.plot(short_periods, short_power)
ax3.plot(med_periods, med_power)
ax4.plot(long_periods, long_power)
ax2.set(xlim=(short_freq, med_freq))
ax3.set(xlim=(med_freq, long_freq))
ax4.set(xlim=(long_freq, max_freq))
fig.tight_layout()
# Open the data and get a subset
counts, times = [], []
for count, time in zip(all_counts, all_times):
if time > 56500:
counts.append(count)
times.append(time)
fig, ax = plt.subplots()
ax.plot(times, counts, 'b+')
short_periods = np.linspace(short_freq, med_freq, len(times))
med_periods = np.linspace(med_freq, long_freq, len(times))
long_periods = np.linspace(long_freq, max_freq, len(times))
short_ang_freqs = 2 * np.pi / short_periods
med_ang_freqs = 2 * np.pi / med_periods
long_ang_freqs = 2 * np.pi / long_periods
short_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), short_ang_freqs)
med_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), med_ang_freqs)
long_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), long_ang_freqs)
short_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
med_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
long_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
fig = plt.figure()
ax1 = fig.add_subplot(411)
ax2 = fig.add_subplot(412)
ax3 = fig.add_subplot(413)
ax4 = fig.add_subplot(414)
ax1.minorticks_on()
ax1.plot(times, counts, 'b+')
ax2.plot(short_periods, short_power)
ax3.plot(med_periods, med_power)
ax4.plot(long_periods, long_power)
ax2.set(xlim=(short_freq, med_freq))
ax3.set(xlim=(med_freq, long_freq))
ax4.set(xlim=(long_freq, max_freq))
fig.tight_layout()
# Open the data and get a subset
hdulist = fits.open('/grp/hst/hstlc/hst13902/outputs/composite/SDSSJ155304.92+354828.6_FUV_G130M_1309_curve.fits', mode='readonly')
subset = np.where(56853 < hdulist[1].data['mjd'])
counts = hdulist[1].data['net'] # use flux for longer timescales, otherwise use net
times = hdulist[1].data['mjd']
counts = counts.byteswap().newbyteorder()
times = times.byteswap().newbyteorder()
# Plot the sample
fig, ax = plt.subplots()
ax.plot(times, counts, 'b+')
short_periods = np.linspace(short_freq, med_freq, len(times))
med_periods = np.linspace(med_freq, long_freq, len(times))
long_periods = np.linspace(long_freq, max_freq, len(times))
short_ang_freqs = 2 * np.pi / short_periods
med_ang_freqs = 2 * np.pi / med_periods
long_ang_freqs = 2 * np.pi / long_periods
short_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), short_ang_freqs)
med_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), med_ang_freqs)
long_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), long_ang_freqs)
short_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
med_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
long_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
fig = plt.figure()
ax1 = fig.add_subplot(411)
ax2 = fig.add_subplot(412)
ax3 = fig.add_subplot(413)
ax4 = fig.add_subplot(414)
ax1.minorticks_on()
ax1.plot(times, counts, 'b+')
ax2.plot(short_periods, short_power)
ax3.plot(med_periods, med_power)
ax4.plot(long_periods, long_power)
ax2.set(xlim=(short_freq, med_freq))
ax3.set(xlim=(med_freq, long_freq))
ax4.set(xlim=(long_freq, max_freq))
fig.tight_layout()
# Open the data and get a subset
hdulist = fits.open('/grp/hst/hstlc/hst13902/outputs/composite/V-KL-UMA_FUV_G160M_1600_curve.fits', mode='readonly')
subset = np.where(56853 < hdulist[1].data['mjd'])
all_counts = hdulist[1].data['net'] # use flux for longer timescales, otherwise use net
all_times = hdulist[1].data['mjd']
counts, times = [], []
for count, time in zip(all_counts, all_times):
if 56853.930 < time < 56853.955:
counts.append(count)
times.append(time)
short_periods = np.linspace(short_freq, med_freq, len(times))
med_periods = np.linspace(med_freq, long_freq, len(times))
long_periods = np.linspace(long_freq, max_freq, len(times))
short_ang_freqs = 2 * np.pi / short_periods
med_ang_freqs = 2 * np.pi / med_periods
long_ang_freqs = 2 * np.pi / long_periods
short_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), short_ang_freqs)
med_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), med_ang_freqs)
long_power = lombscargle(np.asarray(times), np.asarray(counts) - np.asarray(counts).mean(), long_ang_freqs)
short_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
med_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
long_power *= 2 / (len(times) * np.asarray(counts).std() ** 2)
short_mean = np.mean(short_power)
med_mean = np.mean(med_power)
long_mean = np.mean(long_power)
short_std = np.std(short_power)
med_std = np.std(med_power)
long_std = np.std(long_power)
short_three_sigma = 3 * short_std
med_three_sigma = 3 * med_std
long_three_sigma = 3 * long_std
short_power_three_sigma = np.where(short_power > short_three_sigma)
med_power_three_sigma = np.where(med_power > med_three_sigma)
long_power_three_sigma = np.where(long_power > long_three_sigma)
short_period_three_sigma = np.where(short_power > short_three_sigma)
med_period_three_sigma = np.where(med_power > med_three_sigma)
long_period_three_sigma = np.where(long_power > long_three_sigma)
short_starting_index = short_power_three_sigma[0][0]
med_starting_index = med_power_three_sigma[0][0]
long_starting_index = long_power_three_sigma[0][0]
short_significant_periods = scipy.signal.argrelextrema(short_power[short_power_three_sigma], np.greater)
med_significant_periods = scipy.signal.argrelextrema(med_power[med_power_three_sigma], np.greater)
long_significant_periods = scipy.signal.argrelextrema(long_power[long_power_three_sigma], np.greater)
short_significant_periods_three_sigma = short_periods[[period + short_starting_index for period in short_significant_periods]]
med_significant_periods_three_sigma = med_periods[[period + med_starting_index for period in med_significant_periods]]
long_significant_periods_three_sigma = long_periods[[period + long_starting_index for period in long_significant_periods]]
fig = plt.figure()
ax1 = fig.add_subplot(411)
ax2 = fig.add_subplot(412)
ax3 = fig.add_subplot(413)
ax4 = fig.add_subplot(414)
ax1.minorticks_on()
ax1.plot(times, counts, 'b+')
ax2.plot(short_periods, short_power)
ax2.axhline(short_mean, color='r', linestyle='-')
ax2.axhline(short_three_sigma, color='g', linestyle='-')
for period in short_significant_periods_three_sigma:
ax2.axvline(period, color='k', linestyle='--')
ax2.set(xlim=(short_freq, med_freq))
ax3.plot(med_periods, med_power)
ax3.axhline(med_mean, color='r', linestyle='-')
ax3.axhline(med_three_sigma, color='g', linestyle='-')
for period in med_significant_periods_three_sigma:
ax2.axvline(period, color='k', linestyle='--')
ax3.set(xlim=(med_freq, long_freq))
ax4.plot(long_periods, long_power)
ax4.axhline(long_mean, color='r', linestyle='-')
ax4.axhline(long_three_sigma, color='g', linestyle='-')
for period in long_significant_periods_three_sigma:
ax2.axvline(period, color='k', linestyle='--')
ax4.set(xlim=(long_freq, max_freq))
fig.tight_layout()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lomb-Scargle Periodogram via scipy
Step2: Lomb-Scargle Periodogram via astroML
Step3: Try with more data!
Step4: Try with noisy data
Step5: Automatically detect significant periods
|
1 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import antipackage
import github.ellisonbg.misc.vizarray as va
def checkerboard(size):
Return a 2d checkboard of 0.0 and 1.0 as a NumPy array
check = np.zeros((size,size),float)
check.fill(0.0)
n = 0
while n<(size):
if n % 2 == 0: #For even number rows, start filling 1's at position 0
p = 0
else: #For odd number rows, start filling 1's at position 1
p = 1
while p<(size):
check[n,p] = (1.0) #Fill 1's at position n,p
p = p + 2 #Skip one position in row before filling in a row (Key to the checkerboard pattern)
n = n + 1 #Move to next row
return check
#print (checkerboard(7)) #Was used to test output
#raise NotImplementedError()
a = checkerboard(4)
assert a[0,0]==1.0
assert a.sum()==8.0
assert a.dtype==np.dtype(float)
assert np.all(a[0,0:5:2]==1.0)
assert np.all(a[1,0:5:2]==0.0)
b = checkerboard(5)
assert b[0,0]==1.0
assert b.sum()==13.0
assert np.all(b.ravel()[0:26:2]==1.0)
assert np.all(b.ravel()[1:25:2]==0.0)
va.set_block_size(10)
va.vizarray(checkerboard(20))
#raise NotImplementedError()
assert True
va.set_block_size(5)
va.vizarray(checkerboard(27))
#raise NotImplementedError()
assert True
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Checkerboard
Step3: Use vizarray to visualize a checkerboard of size=20 with a block size of 10px.
Step4: Use vizarray to visualize a checkerboard of size=27 with a block size of 5px.
|
2 | <ASSISTANT_TASK:>
Python Code:
%pylab inline
import sys
from corner import corner
sys.path.append('.')
from src.mocklc import MockLC, SimulationSetup
from src.blendlpf import MockLPF
import src.plotting as pl
lc = MockLC(SimulationSetup('M', 0.1, 0.0, 0.0, 'short_transit', cteff=5500, know_orbit=True))
lc.create(wnsigma=[0.001, 0.001, 0.001, 0.001], rnsigma=0.00001, rntscale=0.5, nights=1);
lc.plot();
lpf = MockLPF('Example_1', lc)
lpf.print_parameters(columns=2)
lpf.optimize_global(1000)
lpf.plot_light_curves()
lpf.sample_mcmc(5000, reset=True, repeats=2)
df = lpf.posterior_samples()
pl.joint_radius_ratio_plot(df, fw=13, clim=(0.099, 0.12), htelim=(3570, 3630), ctelim=(2400,3800), blim=(0, 0.5), rlim=(3.8, 5.2));
pl.joint_contamination_plot(df, fw=13, clim=(0, 0.4), htelim=(3570, 3630), ctelim=(2400,3800), blim=(0, 0.5), rlim=(3.8, 5.2));
pl.marginal_radius_ratio_plot(df, bins=60, klim=(0.097, 0.12), figsize=(7,5));
corner(df.iloc[:,2:-3]);
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a mock light curve
Step2: Initialize the log posterior function
Step3: Optimize
Step4: Estimate the posterior
Step5: Analysis
Step6: Plot the apparent and true radius ratio posteriors
Step7: Make a corner plot to have a good overview to the posterior space
|
3 | <ASSISTANT_TASK:>
Python Code:
df1 = pd.read_csv('listings/30042015/30042015.csv', sep = ";")
df2 = pd.read_csv('listings/17072015/17072015.csv', sep = ";")
df3 = pd.read_csv('listings/02102015/02102015.csv', sep = ";")
df4 = pd.read_csv('listings/03012016/03012016.csv', sep = ";")
df5 = pd.read_csv('listings/08122016/08122016.csv', sep = ";")
df6 = pd.read_csv('listings/08042017/08042017.csv', sep = ";")
dfs_l = (df1, df2, df3, df4, df5, df6)
#convertim a datime per cada df
for df in dfs_l:
df.host_since = pd.to_datetime(df.host_since, format="%Y-%m-%d")
df.last_scraped = pd.to_datetime(df.last_scraped, format="%Y-%m-%d")
l_hosts = [df['host_id'].values for df in dfs_l]
df_hosts = pd.DataFrame(l_hosts)
df_hosts = df_hosts.T
df_hosts.columns = ['2015-04-30','2015-07-17','2015-10-02','2016-01-03','2016-12-08','2017-04-08']
df_hosts = df_hosts.apply(lambda x: x.sort_values().values)
print ([len(x) for x in l_hosts])
df_hosts.head()
uniq_id=np.sort(np.unique(np.hstack(l_hosts)))
id_df = pd.DataFrame(uniq_id)
id_df.set_index(0, inplace=True)
#molt millorable
## Ignasi no miris :/
for date in tqdm_notebook(df_hosts.columns):
id_df[date]=''
for i in tqdm_notebook(id_df.index):
if np.any(df_hosts[date].isin([i])):
id_df[date].loc[i] = i
else:
id_df[date].loc[i] = np.nan
id_df.head()
last_seen = id_df.apply(lambda x: x.last_valid_index(), axis=1) #magic function last_valid_index!
last_seen = pd.DataFrame(last_seen, columns=['host_until'])
last_seen.host_until = pd.to_datetime(last_seen.host_until, format="%Y-%m-%d")
last_seen_dict = pd.Series(last_seen, index = last_seen.index).to_dict()
#mapejam el valor de l'ultima entrada valida al host_id per obtenir "host_until"
listing_tot = pd.concat(dfs_l)
listing_tot['host_until'] = listing_tot.host_id.map(last_seen_dict)
listing_tot.head()
listing_tot.to_csv('listings_host_until.csv',sep=';', index=False)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: feim un DataFrame on cada columna conté els host_id de cada scrap i de nom li posam la data de l'scrap
Step2: Feim un dataframe amb l'índex dels IDs únics de tots els dataframes i hi afegim els valors de les altres llistes a la posició corresponent, deixant espais buits on no s'ha trobat el host_id
Step3: L'última entrada vàlida de cada fila ens dirà quin va ser el derrer cop que aquell host va ser vist en un scrap
|
4 | <ASSISTANT_TASK:>
Python Code:
!conda install -y netcdf4
from netCDF4 import Dataset, num2date, date2num
from numpy import *
import matplotlib.pyplot as plt
%matplotlib inline
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
x = linspace(0, 1, 100) # generates a hundred values between 0 and 1
f = 2
a = 3
plt.plot(x, sin(2*pi*x*f))
def pltsin(f):
plt.plot(x, sin(2*pi*x*f))
pltsin(0.5)
interact(pltsin, f=(1, 10, 0.2), x = (1, 10, 0.2))
def pltsina(f, a):
plt.plot(x, a*sin(2*pi*x*f))
plt.ylim(-10.5, 10.5)
interact(pltsina, f=(1, 10, 0.2), a = (1, 10, 0.2))
f=Dataset ('ncep-data/air.sig995.2013.nc') # get individual data set out of the right folder
air = f.variables['air'] # get variable
plt.imshow(air[0,:,:]) # display first timestep
# Create function to browse through the days
def sh(time):
plt.imshow(air[time,:,:])
# Now make it interactive
interact(sh, time=(0, 355, 1))
# Browse variable
def sh(time =0, var='air', year = '2013'):
f=Dataset('ncep-data/'+var+'.sig995.'+year+'.nc')
vv=f.variables[var]
plt.imshow(vv[time,:,:])
#Give a list of variables
variabs =['air', 'uwnd', 'vwnd', 'rhum']
year = ['2013', '2014', '2015']
# Now interact with it
interact(sh, time=(0, 355, 1), year = year, var=variabs)
help(sh)
from mpl_toolkits.basemap import Basemap
# create north polar sterographic projection
m=Basemap(projection='npstere', boundinglat=60, lon_0=0, resolution ='l')
m.fillcontinents(color='gray', lake_color='gray')
m.drawparallels(arange(-80.,81.,20.))
m.drawmeridians(arange(-180.,181.,20.))
m.drawmapboundary(fill_color='white')
# Set up some variables
lon = f.variables['lon'][:]
lat = f.variables['lat'][:]
lon, lat = meshgrid(lon, lat)
x, y = m(lon, lat)
def sh(time =0, var='air', year = '2013'):
f=Dataset('ncep-data/'+var+'.sig995.'+year+'.nc')
vv=f.variables[var]
tt=f.variables['time']
dd=num2date(tt[time], tt.units)
m.fillcontinents(color='gray', lake_color='gray')
m.drawparallels(arange(-80.,81.,20.))
m.drawmeridians(arange(-180.,181.,20.))
m.drawmapboundary(fill_color='white')
cs = m.contourf(x, y, vv[time,:,:]-273.15)
interact(sh, year=year, time=(0,355,1), var=variabs)
my_map = Basemap (projection='merc', lat_0=0, lon_0=30,
resolution='h', area_thresh=1000.0,
llcrnrlon=29, llcrnrlat=-1,
urcrnrlon=31, urcrnrlat=1)
# area threshold states how rivers etc look - scale, resolution sets resolution, llcrnlon etc sets box,
# lat and lon decide where you look
my_map.drawcoastlines()
my_map.drawcountries()
my_map.fillcontinents(color='coral')
my_map.drawmapboundary()
my_map.drawmeridians(arange(0,360,30))
my_map.drawparallels(arange(-90, 90, 30))
lon=30
lat=0
x,y=my_map(lon, lat)
my_map.plot(x, y, 'bo', markersize=7.2)
plt.show() # here the function that decides actually plots
# This just lets the output of the following code samples
# display inline on this page, at an appropirate size
from pylab import rcParams
# Create a simple basemap
my_map = Basemap (projection='ortho', lat_0=50, lon_0=0,
resolution='l', area_thresh=1000.0)
my_map.drawcoastlines()
my_map.drawcountries()
my_map.fillcontinents(color='red', lake_color='gray')
plt.show()
#Check the first few lats and longs
import csv
# Open the earthquake data file.
filename = '1.0_week.csv'
# Create empty lists for the latitudes and longitudes.
lats, lons, mags = [], [], []
# Read through the entire file, skip the first line,
# and pull out just the lats and lons.
with open(filename) as f:
# Create a csv reader object.
reader = csv.reader(f)
# Ignore the header row.
next(reader)
# Store the latitudes and longitudes in the appropriate lists.
for row in reader:
lats.append(float(row[1]))
lons.append(float(row[2]))
mags.append(float(row[4]))
# Display the first 5 lats and lons.
print('lats', lats[0:5])
print('lons', lons[0:5])
print('mags', mags[0:5])
### And now create a plot of these on a map projection
import csv
# Open the earthquake data file.
filename = '1.0_week.csv'
# Create empty lists for the latitudes and longitudes.
lats, lons, mags = [], [], []
# Read through the entire file, skip the first line,
# and pull out just the lats and lons.
with open(filename) as f:
# Create a csv reader object.
reader = csv.reader(f)
# Ignore the header row.
next(reader)
# Store the latitudes and longitudes in the appropriate lists.
for row in reader:
lats.append(float(row[1]))
lons.append(float(row[2]))
mags.append(float(row[4]))
# --- Build Map ---
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
eq_map = Basemap(projection='robin', resolution = 'l', area_thresh = 1000.0,
lat_0=52, lon_0=0)
eq_map.drawcoastlines()
eq_map.drawcountries()
eq_map.fillcontinents(color = 'coral')
eq_map.drawmapboundary()
eq_map.drawmeridians(np.arange(0, 360, 30))
eq_map.drawparallels(np.arange(-90, 90, 30))
min_marker_size = 1
for lon, lat, mags in zip(lons, lats, mags):
x,y = eq_map(lon, lat)
msize = mags * min_marker_size
eq_map.plot(x, y, , markersize=msize)
if mags >= 5.0
eqcolor = 'r'
elif: mags >= 1.0 and <= 3.0
eqcolor = 'g'
elif: <= 1.0
eqcolor = 'y
eq_map.plot(x, y, eqcolor, markersize=msize)
plt.show()
x,y
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Add to the function to allow amplitude to be varied and aadd in an additional slider to vary both f and a
Step2: Climate data
Step3: Plotting some live (ish) earthquake data...
Step4: This is great but one cool enhancement would be to make the size of the point represent the magnitude of the earthquake.
|
5 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import numexpr as ne
ne.set_num_threads(10);
rho = np.empty((512,512,512), dtype=np.float32)
rho[:] = np.random.random(rho.shape)
rho_mean = rho.mean(dtype=np.float64).astype(np.float32) # Use double precision for intermediate accumulations
%%timeit
delta = np.exp((rho/rho_mean - 1.)**2.)
%%timeit
delta = ne.evaluate('exp((rho/rho_mean - 1.)**2.)')
ne.set_num_threads(10)
%timeit ne.evaluate('exp((rho/rho_mean - 1.)**2.)')
ne.set_num_threads(1)
%timeit ne.evaluate('exp((rho/rho_mean - 1.)**2.)')
np_delta = np.exp((rho/rho_mean - 1.)**2.)
ne_delta = ne.evaluate('exp((rho/rho_mean - 1.)**2.)')
print np_delta.dtype
print ne_delta.dtype
ne_delta = ne.evaluate('exp((rho/rho_mean - 1)**2.)')
print ne_delta.dtype
ne.set_num_threads(10)
%timeit ne.evaluate('exp((rho/rho_mean - 1)**2.)')
ne.set_num_threads(1)
%timeit ne.evaluate('exp((rho/rho_mean - 1)**2.)')
rho_double = np.random.random((768,768,768))
rho = rho_double.copy()
print 'NumPy:\n\t',
%timeit np_rho2 = rho**2
rho = rho_double.copy()
print 'NumPy inplace:\n\t',
%timeit global rho; rho **= 2
rho = rho_double.copy()
print 'NumExpr:\n\t',
ne.set_num_threads(1)
%timeit ne_rho2 = ne.evaluate('rho**2')
rho = rho_double.copy()
print 'NumExpr inplace:\n\t',
ne_inplace_rho2 = rho
%timeit ne.evaluate('rho**2', out=ne_inplace_rho2)
ne.set_num_threads(10)
rho = rho_double.copy()
print 'NumExpr inplace, multi-threaded:\n\t',
ne_inplace_rho2 = rho
%timeit ne.evaluate('rho**2', out=ne_inplace_rho2)
rho = np.empty((512,512,512), dtype=np.float32)
rho[:] = np.random.random(rho.shape)
%timeit (np.sin(rho**2) + np.cos(rho**3))**.5
ne.set_num_threads(1)
%timeit ne.evaluate('(sin(rho**2) + cos(rho**3))**.5')
ne.set_num_threads(10)
%timeit ne.evaluate('(sin(rho**2) + cos(rho**3))**.5')
import os
os.environ['NUMBA_NUM_THREADS'] = '10'
import numba as nb
import numpy as np
arr = np.empty((4096,4096), dtype=np.float64)
arr[:] = np.random.random(arr.shape)
def py_sum2d(arr):
M, N = arr.shape
result = 0.0
for i in range(M):
for j in range(N):
result += arr[i,j]
return result
@nb.jit
def nb_sum2d(arr):
M, N = arr.shape
result = 0.0
for i in range(M):
for j in range(N):
result += arr[i,j]
return result
%timeit py_sum2d(arr)
%timeit nb_sum2d(arr)
import scipy.special
@nb.jit(nopython=True)
def nb_sum2d_j1(arr):
M, N = arr.shape
result = 0.
for i in range(M):
for j in range(N):
result += scipy.special.j1(arr[i,j])
return result
nb_sum2d_j1(arr)
@nb.vectorize([nb.float64(nb.float64, nb.float64)], nopython=True, target='cpu')
def vec_op(a, b):
return np.sin(a**b)**2.
@nb.vectorize([nb.float64(nb.float64, nb.float64)], nopython=True, target='parallel')
def parallel_vec_op(a, b):
return np.sin(a**b)**2.
%timeit vec_op(arr, 2*arr)
%timeit parallel_vec_op(arr, 2*arr)
@nb.guvectorize([(nb.float64[:,:], nb.float64[:,:], nb.float64[:,:])], '(nx,ny),(nx,ny)->(nx,ny)', target='cpu', nopython=True)
def guvec(a, b, c):
M, N = a.shape
for i in range(M):
for j in range(N):
c[i,j] = a[i,j]**b[i,j]
@nb.guvectorize([(nb.float64[:,:], nb.float64[:,:], nb.float64[:,:])], '(nx,ny),(nx,ny)->(nx,ny)', target='parallel', nopython=True)
def parallel_guvec(a, b, c):
M, N = a.shape
for i in range(M):
for j in range(N):
c[i,j] = a[i,j]**b[i,j]
%timeit guvec(arr, 2*arr)
%timeit parallel_guvec(arr, 2*arr)
@nb.guvectorize([(nb.float64[:], nb.float64[:], nb.float64[:])], '(ny),(ny)->(ny)', target='parallel', nopython=True)
def parallel_guvec(a, b, c):
N, = a.shape
for j in range(N):
c[j] = a[j]**b[j]
%timeit parallel_guvec(arr, 2*arr)
@nb.jit(nopython=True, parallel=True)
def nb_prange(a, b):
M, N = a.shape
c = np.empty_like(a)
for i in nb.prange(M):
for j in range(N):
c[i,j] = a[i,j]**b[i,j]
%timeit nb_prange(arr, 2*arr)
def py_radial_hist(values, box, bin_edges):
nx,ny,nz = values.shape
X,Y,Z = np.ogrid[0:box:box/nx, 0:box:box/ny, 0:box:box/nz]
radii = X**2 + Y**2 + Z**2
return np.histogram(radii, bins=bin_edges**2, weights=values)
@nb.jit(nopython=True)
def nb_radial_hist(values, boxsize, bin_edges):
nx,ny,nz = values.shape
histogram = np.zeros(len(bin_edges)-1)
# Do binning with squared distances
bin_edges = bin_edges**2
nbp1 = len(bin_edges)
for i in range(nx):
dx2 = (boxsize/nx*i)**2
for j in range(ny):
dy2 = (boxsize/ny*j)**2
for k in range(nz):
dz2 = (boxsize/nz*k)**2
dist = dx2 + dy2 + dz2
if dist < bin_edges[0] or dist > bin_edges[-1]:
continue
for b in range(1,nbp1):
if dist < bin_edges[b]:
histogram[b-1] += values[i,j,k]
break
else: # last bin is closed
histogram[-1] += values[i,j,k]
return histogram
box = 1.
bin_edges = np.linspace(0,box,100)
values = np.random.random((512,512,512))
%timeit py_radial_hist(values, box, bin_edges)
%timeit nb_radial_hist(values, box, bin_edges)
@nb.jit(nopython=True, parallel=True)
def BAD_nb_parallel_radial_hist(values, boxsize, bin_edges):
nx,ny,nz = values.shape
histogram = np.zeros(len(bin_edges)-1)
# parallel=True quirk: some versions wouldn't compile without squaring in-place
bin_edges = bin_edges**2
nbp1 = len(bin_edges)
for i in range(nx):
for j in range(ny):
# another quirk: prange must be in inner loop
for k in nb.prange(nz):
dx2 = (boxsize/nx*i)**2
dy2 = (boxsize/ny*j)**2
dz2 = (boxsize/nz*k)**2
dist = dx2 + dy2 + dz2
if dist < bin_edges[0] or dist > bin_edges[-1]:
continue
for b in range(1,nbp1):
if dist < bin_edges[b]:
histogram[b-1] += values[i,j,k]
break
else: # last bin is closed
histogram[-1] += values[i,j,k]
# also some versions wouldn't compile without a return!
return histogram
py_answer = py_radial_hist(values, box, bin_edges)[0]
nb_answer = nb_radial_hist(values, box, bin_edges)
BAD_parallel_answer = BAD_nb_parallel_radial_hist(values, box, bin_edges)
print np.allclose(py_answer, nb_answer)
print np.allclose(py_answer, BAD_parallel_answer)
BAD_parallel_answer2 = BAD_nb_parallel_radial_hist(values, box, bin_edges)
print np.allclose(BAD_parallel_answer, BAD_parallel_answer2)
@nb.jit(nopython=True, parallel=True)
def nb_parallel_radial_hist(values, boxsize, bin_edges):
nx,ny,nz = values.shape
histogram = np.zeros((nz, len(bin_edges)-1))
bin_edges = bin_edges**2
nbp1 = len(bin_edges)
for i in range(nx):
for j in range(ny):
# prange only works on inner loop
for k in nb.prange(nz):
dx2 = (boxsize/nx*i)**2
dy2 = (boxsize/ny*j)**2
dz2 = (boxsize/nz*k)**2
dist = dx2 + dy2 + dz2
if dist < bin_edges[0] or dist > bin_edges[-1]:
continue
for b in range(1,nbp1):
if dist < bin_edges[b]:
histogram[k, b-1] += values[i,j,k]
break
else: # last bin is closed
histogram[k, -1] += values[i,j,k]
# Silly! This could be written as
# reduced_hist = histogram.sum(axis=0)
# but Numba auto-parallelization doesn't support axis reductions
reduced_hist = np.zeros(len(bin_edges)-1)
for b in range(len(reduced_hist)):
for k in range(nz):
reduced_hist[b] += histogram[k, b]
return reduced_hist
parallel_answer = nb_parallel_radial_hist(values, box, bin_edges)
print np.allclose(py_answer, parallel_answer)
%timeit nb_radial_hist(values, box, bin_edges)
%timeit BAD_nb_parallel_radial_hist(values, box, bin_edges)
%timeit nb_parallel_radial_hist(values, box, bin_edges)
@nb.jit(nopython=True, parallel=True)
def BAD_nb_parallel_radial_hist_flat(values, boxsize, bin_edges):
nx,ny,nz = values.shape
histogram = np.zeros(len(bin_edges)-1)
bin_edges = bin_edges**2
nbp1 = len(bin_edges)
for x in nb.prange(nx*ny*nz):
i = x / (ny*nz)
j = (x % (ny*nz)) / nz
k = x % nz
dx2 = (boxsize/nx*i)**2
dy2 = (boxsize/ny*j)**2
dz2 = (boxsize/nz*k)**2
dist = dx2 + dy2 + dz2
if dist < bin_edges[0] or dist > bin_edges[-1]:
continue
for b in range(1,nbp1):
if dist < bin_edges[b]:
histogram[b-1] += values[i,j,k]
break
else: # last bin is closed
histogram[-1] += values[i,j,k]
return histogram
%timeit BAD_nb_parallel_radial_hist_flat(values, box, bin_edges)
import Abacus.Analysis.PowerSpectrum.Histogram as AbacusHistogram
#reload(AbacusHistogram.Histogram.)
# Pre-compute the mean radius of the bin and the counts per bin
bin_info = AbacusHistogram.RadialBinGrid(box, values, bin_edges)
%timeit AbacusHistogram.RadialBinGrid(box, values, bin_edges, bin_info=bin_info)
%%writefile build_ffilib.py
if __name__ == '__main__':
# Compile the FFI lib
import cffi
ffibuilder = cffi.FFI()
ffibuilder.set_source('_gslffilib',
r
#include <gsl/gsl_sf_bessel.h> // This gets compiled
, libraries=['gsl', 'gslcblas'])
ffibuilder.cdef(
double gsl_sf_bessel_j1 (double x); // This can be copied straight from the man page and is parsed by CFFI
)
ffibuilder.compile(verbose=True)
!python build_ffilib.py
%ls *ffi*
import _gslffilib
_gslffilib.lib.gsl_sf_bessel_j1(1.)
import numpy as np
import numba as nb
import numba.cffi_support
numba.cffi_support.register_module(_gslffilib)
gsl_sf_bessel_j1 = _gslffilib.lib.gsl_sf_bessel_j1
import scipy.special
@nb.jit(nopython=True)
def nb_sum2d_cffi(arr):
M, N = arr.shape
result = 0.
for i in range(M):
for j in range(N):
result += gsl_sf_bessel_j1(arr[i,j])
return result
nb_sum2d_cffi(np.random.random((128,128)))
import ctypes as ct
libgsl = ct.cdll.LoadLibrary("libgsl.so")
# Set the argument and return types
libgsl.gsl_sf_bessel_j1.restype = ct.c_double
libgsl.gsl_sf_bessel_j1.argtypes = [ct.c_double]
libgsl.gsl_sf_bessel_j1(1.)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The NumPy way
Step2: The Numexpr way
Step3: We were using 10 cores. Did our speedup come from multi-threading or loop-blocking/vectorization?
Step4: Wait, what happened? Why is single-threaded NumExpr slower than NumPy?
Step5: NumExpr changed the computation to double precision while we weren't looking! Floats like 1. are always interpreted as doubles (in Python and NumExpr), and NumExpr uses the highest precision of any operand (in symmetric binary operators, but not ** for some reason). This is in contrast to NumPy, which respects the dtype of the array operands.
Step6: So even in the single-threaded case, we got a 10% speedup. NumExpr achieves this by doing the division and subtraction on small blocks of the array rather than doing the divison on the whole array then a second pass to do the subtraction.
Step7: NumExpr is actually slower than NumPy in this simple case; it's possible that the NumExpr overhead isn't worth it for this simple operation. But if we were to switch to multi-threaded we would see a speed-up
Step8: Example
Step9: Numba
Step10: Numba has one main function
Step11: The first time we run the timing test, we get a message that one of the Numba timing tests took way longer than the others. That's because the first time we call the jitted function, it has to be compiled for the dtype of the arguments. The compiled function is cached, though, making subsequent calls faster.
Step12: Numba does let you call ctypes and CFFI functions in nopython mode, as we'll see later.
Step13: So we got an excellent speedup, but we also had to specify the input dtypes. There are dynamic ufuncs that give more flexibility, but sometimes they don't behave as expected with the parallel target.
Step14: Wait, why didn't we get any parallel speedup? In Numba, parallelization only happens when broadcasting, so let's write it in a way that broadcasts over the first dimension
Step15: I've hacked a fairly general solution that achieves parallelism by broadcasting over an array of indices (I call it parallel_bcast). I'm expecting it will be superceded by Numba functionality within a few years, though.
Step16: See below for a non-trivial example where prange fails to live up to its promise, however.
Step17: So we cut the runtime of the Numpy implementation by 1/3, and we didn't even consider the memory usage. The Numpy implementation doubles the memory usage of the input array, since it has to construct the radii. This can be a big problem when dealing with $2048^3$ FFT meshes, for example.
Step18: As we expected, the race condition caused the parallel version to give the wrong answer. In fact, it doesn't even give a consistent answer from run to run
Step19: We can write a parallel-safe version by giving each k its own temporary histogram space to write into
Step20: Finally, how do the timings stack up?
Step21: That didn't go as expected. If each thread is only operating on one element at a time, that could explain the inefficiency. Hopefully it's implemented internally to operate on nz/nthreads elements at a time, though.
Step22: That was about as fast as we expected, but it still gives the wrong answer. Maybe when prange supports nested loops it will be possible to write a correct version of this.
Step25: CFFI
Step26: We can see that we just compiled _gslffilib.so. Here's how we use it
Step27: And here's how we use it with Numba
Step28: There are other ways to use CFFI (ABI vs API, in-line vs out-of-line), but the above is the preferred approach for most applications.
|
6 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
import tqdm
import numpy as np
import espressomd.observables
import espressomd.accumulators
espressomd.assert_features(
["ENGINE", "ROTATION", "MASS", "ROTATIONAL_INERTIA", "CUDA"])
ED_PARAMS = {'time_step': 0.01,
'box_l': 3*[10.],
'skin': 0.4,
'active_velocity': 5,
'kT': 1,
'gamma': 1,
'gamma_rotation': 1,
'mass': 0.1,
'rinertia': 3*[1.],
'corr_tmax': 100}
ED_N_SAMPLING_STEPS = 5000000
system = espressomd.System(box_l=ED_PARAMS['box_l'])
system.cell_system.skin = ED_PARAMS['skin']
system.time_step = ED_PARAMS['time_step']
pos_obs = espressomd.observables.ParticlePositions(
ids=[part_act.id, part_pass.id])
msd = espressomd.accumulators.Correlator(obs1=pos_obs,
corr_operation="square_distance_componentwise",
delta_N=1,
tau_max=ED_PARAMS['corr_tmax'],
tau_lin=16)
system.auto_update_accumulators.add(msd)
vel_obs = espressomd.observables.ParticleVelocities(
ids=[part_act.id, part_pass.id])
vacf = espressomd.accumulators.Correlator(obs1=vel_obs,
corr_operation="componentwise_product",
delta_N=1,
tau_max=ED_PARAMS['corr_tmax'],
tau_lin=16)
system.auto_update_accumulators.add(vacf)
ang_obs = espressomd.observables.ParticleAngularVelocities(
ids=[part_act.id, part_pass.id])
avacf = espressomd.accumulators.Correlator(obs1=ang_obs,
corr_operation="componentwise_product",
delta_N=1,
tau_max=ED_PARAMS['corr_tmax'],
tau_lin=16)
system.auto_update_accumulators.add(avacf)
for i in tqdm.tqdm(range(100)):
system.integrator.run(int(ED_N_SAMPLING_STEPS/100))
system.auto_update_accumulators.remove(msd)
msd.finalize()
system.auto_update_accumulators.remove(vacf)
vacf.finalize()
system.auto_update_accumulators.remove(avacf)
avacf.finalize()
taus_msd = msd.lag_times()
msd_result = msd.result()
msd_result = np.sum(msd_result, axis=2)
taus_vacf = vacf.lag_times()
vacf_result = np.sum(vacf.result(), axis=2)
taus_avacf = avacf.lag_times()
avacf_result = np.sum(avacf.result(), axis=2)
fig_msd = plt.figure(figsize=(10, 6))
plt.plot(taus_msd, msd_result[:, 0], label='active')
plt.plot(taus_msd, msd_result[:, 1], label='passive')
plt.xlim((taus_msd[1], None))
plt.loglog()
plt.xlabel('t')
plt.ylabel('MSD(t)')
plt.legend()
plt.show()
def acf_stable_regime(x, y):
Remove the noisy tail in autocorrelation functions of finite time series.
cut = np.argmax(y <= 0.) - 2
assert cut >= 1
return (x[1:cut], y[1:cut])
fig_vacf = plt.figure(figsize=(10, 6))
plt.plot(*acf_stable_regime(taus_vacf, vacf_result[:, 0]), label='active')
plt.plot(*acf_stable_regime(taus_vacf, vacf_result[:, 1]), label='passive')
plt.xlim((taus_vacf[1], None))
plt.loglog()
plt.xlabel('t')
plt.ylabel('VACF(t)')
plt.legend()
plt.show()
fig_avacf = plt.figure(figsize=(10, 6))
plt.plot(*acf_stable_regime(taus_avacf, avacf_result[:, 0]), label='active')
plt.plot(*acf_stable_regime(taus_avacf, avacf_result[:, 1]), label='passive')
plt.xlim((taus_avacf[1], None))
plt.loglog()
plt.xlabel('t')
plt.ylabel('AVACF(t)')
plt.legend()
plt.show()
def clear_system(system):
system.part.clear()
system.thermostat.turn_off()
system.constraints.clear()
system.auto_update_accumulators.clear()
system.time = 0.
clear_system(system)
import espressomd.shapes
import espressomd.math
RECT_PARAMS = {'length': 100,
'radius': 20,
'funnel_inner_radius': 3,
'funnel_angle': np.pi / 4.0,
'funnel_thickness': 0.1,
'n_particles': 500,
'active_velocity': 7,
'time_step': 0.01,
'wca_sigma': 0.5,
'wca_epsilon': 0.1,
'skin': 0.4,
'kT': 0.1,
'gamma': 1.,
'gamma_rotation': 1}
RECT_STEPS_PER_SAMPLE = 100
RECT_N_SAMPLES = 500
TYPES = {'particles': 0,
'boundaries': 1}
box_l = np.array(
[RECT_PARAMS['length'], 2*RECT_PARAMS['radius'], 2*RECT_PARAMS['radius']])
system.box_l = box_l
system.cell_system.skin = RECT_PARAMS['skin']
system.time_step = RECT_PARAMS['time_step']
system.thermostat.set_langevin(
kT=RECT_PARAMS['kT'], gamma=RECT_PARAMS['gamma'], gamma_rotation=RECT_PARAMS['gamma_rotation'], seed=42)
cylinder = espressomd.shapes.Cylinder(
center=0.5 * box_l,
axis=[1, 0, 0], radius=RECT_PARAMS['radius'], length=RECT_PARAMS['length'], direction=-1)
system.constraints.add(shape=cylinder, particle_type=TYPES['boundaries'])
# Setup walls
wall = espressomd.shapes.Wall(dist=0, normal=[1, 0, 0])
system.constraints.add(shape=wall, particle_type=TYPES['boundaries'])
wall = espressomd.shapes.Wall(dist=-RECT_PARAMS['length'], normal=[-1, 0, 0])
system.constraints.add(shape=wall, particle_type=TYPES['boundaries'])
funnel_length = (RECT_PARAMS['radius']-RECT_PARAMS['funnel_inner_radius']
)/np.tan(RECT_PARAMS['funnel_angle'])
com_deviations = list()
times = list()
def moving_average(data, window_size):
return np.convolve(data, np.ones(window_size), 'same') / window_size
smoothing_window = 10
com_smoothed = moving_average(com_deviations, smoothing_window)
fig_rect = plt.figure(figsize=(10, 6))
plt.plot(times[smoothing_window:-smoothing_window],
com_smoothed[smoothing_window:-smoothing_window])
plt.xlabel('t')
plt.ylabel('center of mass deviation')
plt.show()
clear_system(system)
import espressomd.lb
HYDRO_PARAMS = {'box_l': 3*[25],
'time_step': 0.01,
'skin': 1,
'agrid': 1,
'dens': 1,
'visc': 1,
'gamma': 1,
'mass': 5,
'dipole_length': 2,
'active_force': 0.1,
'mode': 'pusher'}
HYDRO_N_STEPS = 2000
system.box_l = HYDRO_PARAMS['box_l']
system.cell_system.skin = HYDRO_PARAMS['skin']
system.time_step = HYDRO_PARAMS['time_step']
system.min_global_cut = HYDRO_PARAMS['dipole_length']
box_l = np.array(HYDRO_PARAMS['box_l'])
pos = box_l/2.
pos[2] = -10.
system.integrator.run(HYDRO_N_STEPS)
vels = np.squeeze(lbf[:, int(system.box_l[1]/2), :].velocity)
vel_abs = np.linalg.norm(vels, axis=2)
lb_shape = lbf.shape
xs, zs = np.meshgrid(np.linspace(0.5, box_l[0] - 0.5, num=lb_shape[0]),
np.linspace(0.5, box_l[2] - 0.5, num=lb_shape[2]))
fig_vels, ax_vels = plt.subplots(figsize=(10, 6))
im = plt.pcolormesh(vel_abs.T, cmap='YlOrRd')
plt.quiver(xs, zs, vels[:, :, 0].T, vels[:, :, 2].T, angles='xy', scale=0.005)
circ = plt.Circle(particle.pos_folded[[0, 2]], 0.5, color='blue')
ax_vels.add_patch(circ)
ax_vels.set_aspect('equal')
plt.xlabel('x')
plt.ylabel('z')
cb = plt.colorbar(im, label=r'$|v_{\mathrm{fluid}}|$')
plt.show()
lbf.write_vtk_velocity('./fluid.vtk')
system.part.writevtk('./particle.vtk')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise
Step2: No more setup needed! We can run the simulation and plot our observables.
Step4: The Mean Square Displacement of an active particle is characterized by a longer ballistic regime and an increased diffusion coefficient for longer lag times. In the overdamped limit it is given by
Step5: Before we go to the second part, it is important to clear the state of the system.
Step6: Rectification
Step7: Exercise
Step8: Exercise
Step9: Even though the potential energy inside the geometry is 0 in every part of the accessible region, the active particles are clearly not Boltzmann distributed (homogenous density). Instead, they get funneled into the right half, showing the inapplicability of equilibrium statistical mechanics.
Step10: Hydrodynamics of self-propelled particles
Step11: Exercise
Step12: Exercise
Step13: We can also export the particle and fluid data to .vtk format to display the results with a visualization software like ParaView.
|
7 | <ASSISTANT_TASK:>
Python Code:
import xarray as xr
import numpy as np
import os, sys
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
%matplotlib inline
def read_data(file_name):
Read netcdf file and return variables:
rlat, rlon, var, px and py.
# read the dataset
ds = xr.open_dataset(file_name)
# retrieve the data of variable tas, coordinate variables rlat and rlon, and rotated pole
var = ds.tas[0,:,:]
rlat = ds.rlat[:]
rlon = ds.rlon[:]
pole = ds.rotated_pole
try:
# retrieve attribute grid_north_pole_longitude
if hasattr(pole,'grid_north_pole_longitude'):
px = pole.attrs['grid_north_pole_longitude']
# retrieve attribute grid_north_pole_latitude
if hasattr(pole,'grid_north_pole_latitude'):
py = pole.attrs['grid_north_pole_latitude']
except:
print('Unexpected error:', sys.exc_info()[0])
raise
return rlon, rlat, var, px, py
def main():
Draw variable tas on map using the RotatedPole projection.
The coordinate variables rlat and rlon are required.
dir_name = 'data/'
file_name = 'rotated_curvilinear_data.nc'
fname = os.path.join(dir_name,file_name)
# read file content and return relevant variables
rlon, rlat, var, pole_lon, pole_lat = read_data(fname)
# initialize plot
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent([-46, 70, 20, 75], crs=ccrs.PlateCarree())
# set fill colors for ocean and land areas
ax.add_feature(cartopy.feature.OCEAN, color='white', zorder=0)
ax.add_feature(cartopy.feature.LAND, color='lightgray',zorder=0,
linewidth=0.5, edgecolor='black')
# add gridlines
ax.gridlines(draw_labels=True, linewidth=0.5, color='gray', xlocs=range(-180,180,15), ylocs=range(-90,90,15))
# add coastlines
ax.coastlines(resolution='50m', linewidth=0.3, color='black')
# add title
ax.set_title('Python: rotated curvilinear grid', fontsize=10, fontweight='bold')
# set projection
crs = ccrs.RotatedPole(pole_longitude=pole_lon, pole_latitude=pole_lat)
# contour fill plot
ax.contourf(rlon, rlat, var, levels=15, cmap='RdYlBu_r', transform=crs)
# save the plot output to PNG file (and display it in notebook if '%matplotlib inline' is set)
plt.savefig('Py_rotated_curvilinear_grid_1.png', bbox_inches='tight', dpi=200)
if __name__ == '__main__':
main()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Allow to display the output of plotting commands in notebook
Step3: Function read_data
Step5: Function main
Step6: Run main
|
8 | <ASSISTANT_TASK:>
Python Code:
df
df=pd.read_csv(csv_path)
df[(df[u'year'] <= 2016)]
print pd.Timestamp.min
print pd.Timestamp.max
year2=[]
for i in df['year']:
try: year2.append(int(i[6:10]))
except: year2.append(np.nan)
df['year']=year2
df[(df[u'year'] <= 2016)]
df = df[(df[u'reclat'] != 0.0) & (df[u'reclong'] != 0.0) & (df[u'reclong'] >= -180) & (df[u'reclong'] <= 180) & (df[u'year'] >= 860) & (df[u'year'] <= 2016)]
df.head()
df=df.drop(u'id', axis=1)
df.dropna(inplace=True)
valid=df
valid.index=[i for i in range(len(valid.index))]
valid.rename(columns={'mass (g)': 'mass'}, inplace = True)
tipus=[i for i in range(len(valid.index))]
orszag=[i for i in range(len(valid.index))]
for i in range(len(valid.index)):
recclass=valid.loc[i]['recclass']
if 'Iron' in recclass:
tipus[i]='Iron'
elif 'Mesosiderite' in recclass or 'Pallasite' in recclass:
tipus[i]='Stony-Iron'
else:
tipus[i]='Stony'
# Antarktisszal problema!!
if valid.loc[i]['reclat'] <= -60:
orszag[i]='Antarctica'
else:
coordinate=valid.loc[i]['reclat'], valid.loc[i]['reclong']
orszag[i]=rg.get(coordinate)['country']
valid['type']=tipus
valid['country']=orszag
gf=[]
for i in range(len(valid.index)):
seged={}
for j in range(len(valid.loc[i])):
seged[valid.columns[j]]=str(valid.loc[i][j])
gf.append(seged)
gf
json_path='Meteorite_Landings3.json'
file(json_path,'w').write(json.dumps(gf))
#Orszag szerinti eloszlas
plt.rcParams['font.size'] = 20
valid_cgroup=valid.groupby('country')
valid_sum=valid_cgroup.size()
valid_sum.sort_values(ascending=True, inplace=True)
valid_sum.plot.pie(figsize=(20, 20))
# Latott es megtalalt szetvalasztasa
#fell=valid[valid['fall']=="Fell"].copy()
#found=valid[valid['fall']=="Found"].copy()
#found.index=[i for i in range(len(found.index))]
#fell.index=[i for i in range(len(fell.index))]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: lassuk lepesekben
Step2: ugy nez ki ez a kifejezes a hibas a 2016-al. ez a zert van, mert ez az oszlop nem valos datumkent van ertelmezve. ket lehetoseg van, vagy atkonvertaljuk datumma, vagy manualisan levagjuk az evet. az elso modszerrel van gey problema
Step3: ezert az egyeteln mod a manualis favagas...
Step4: most kellene mukodjon - ez erdemben nem valtoztatja meg a vizualizaciot
Step5: vissa az eredetihez
Step6: Nezd meg az a fuggvenyt, hogy pandas copy illetve deepcopy. Ez igy nem masolja le a dataframe-et, csak a pointereket valtoztatja meg..
|
9 | <ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import make_regression
from sklearn.cross_validation import train_test_split
X, y, true_coefficient = make_regression(n_samples=80, n_features=30, n_informative=10, noise=100, coef=True, random_state=5)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=5)
print(X_train.shape)
print(y_train.shape)
from sklearn.linear_model import LinearRegression
linear_regression = LinearRegression().fit(X_train, y_train)
print("R^2 on training set: %f" % linear_regression.score(X_train, y_train))
print("R^2 on test set: %f" % linear_regression.score(X_test, y_test))
from sklearn.metrics import r2_score
print(r2_score(np.dot(X, true_coefficient), y))
plt.figure(figsize=(10, 5))
coefficient_sorting = np.argsort(true_coefficient)[::-1]
plt.plot(true_coefficient[coefficient_sorting], "o", label="true")
plt.plot(linear_regression.coef_[coefficient_sorting], "o", label="linear regression")
plt.legend()
from sklearn.linear_model import Ridge
ridge_models = {}
training_scores = []
test_scores = []
for alpha in [100, 10, 1, .01]:
ridge = Ridge(alpha=alpha).fit(X_train, y_train)
training_scores.append(ridge.score(X_train, y_train))
test_scores.append(ridge.score(X_test, y_test))
ridge_models[alpha] = ridge
plt.plot(training_scores, label="training scores")
plt.plot(test_scores, label="test scores")
plt.xticks(range(4), [100, 10, 1, .01])
plt.legend(loc="best")
plt.figure(figsize=(10, 5))
plt.plot(true_coefficient[coefficient_sorting], "o", label="true", c='b')
for i, alpha in enumerate([100, 10, 1, .01]):
plt.plot(ridge_models[alpha].coef_[coefficient_sorting], "o", label="alpha = %.2f" % alpha, c=plt.cm.summer(i / 3.))
plt.legend(loc="best")
from sklearn.linear_model import Lasso
lasso_models = {}
training_scores = []
test_scores = []
for alpha in [30, 10, 1, .01]:
lasso = Lasso(alpha=alpha).fit(X_train, y_train)
training_scores.append(lasso.score(X_train, y_train))
test_scores.append(lasso.score(X_test, y_test))
lasso_models[alpha] = lasso
plt.plot(training_scores, label="training scores")
plt.plot(test_scores, label="test scores")
plt.xticks(range(4), [30, 10, 1, .01])
plt.legend(loc="best")
plt.figure(figsize=(10, 5))
plt.plot(true_coefficient[coefficient_sorting], "o", label="true", c='b')
for i, alpha in enumerate([30, 10, 1, .01]):
plt.plot(lasso_models[alpha].coef_[coefficient_sorting], "o", label="alpha = %.2f" % alpha, c=plt.cm.summer(i / 3.))
plt.legend(loc="best")
from figures import plot_linear_svc_regularization
plot_linear_svc_regularization()
from sklearn.datasets import make_blobs
X, y = make_blobs(random_state=42)
plt.scatter(X[:, 0], X[:, 1], c=y)
from sklearn.svm import LinearSVC
linear_svm = LinearSVC().fit(X, y)
print(linear_svm.coef_.shape)
print(linear_svm.intercept_.shape)
plt.scatter(X[:, 0], X[:, 1], c=y)
line = np.linspace(-15, 15)
for coef, intercept in zip(linear_svm.coef_, linear_svm.intercept_):
plt.plot(line, -(line * coef[0] + intercept) / coef[1])
plt.ylim(-10, 15)
plt.xlim(-10, 8)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Linear Regression
Step2: Ridge Regression (L2 penalty)
Step3: Lasso (L1 penalty)
Step4: Linear models for classification
Step5: Multi-Class linear classification
|
10 | <ASSISTANT_TASK:>
Python Code:
# conda install ipyrad -c bioconda
# conda install toytree -c eaton-lab
import pandas as pd
import toytree
# load the tree table from CSV
tree_table = pd.read_csv(
"./analysis-treeslider/test.tree_table.csv",
index_col=0,
)
# examine top of table
tree_table.head()
outfile = open("trees.nwk", "w")
outfile.write("\n".join(tree_table.tree.tolist()))
outfile.close()
%%bash
wget -q https://github.com/smirarab/ASTRAL/raw/master/Astral.5.6.3.zip
unzip -qo Astral.5.6.3
%%bash
java -jar Astral/astral.5.6.3.jar -i trees.nwk > astral.tre 2>astral.err
tre = toytree.rtree.coaltree(10)
tre.
tre.rotate_node(names=["r0", "r1", "r2"])
tre.draw(node_labels=True);
tre = toytree.tree("astral.tre").root('reference')
tre.draw(node_labels="support", tip_labels_align=True);
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Short Tutorial
Step2: Write the trees column to a file
Step3: Get Astral
Step4: Run Astral
Step5: Plot astral species tree
|
11 | <ASSISTANT_TASK:>
Python Code:
%pylab inline
from sg2lib import *
gamma = 1
Sx = 2
Fs = array([[1, gamma], [0, 1]])
Fp = array([[Sx, 0], [0, 1/Sx]])
n = 10
Fsi = array([[1, gamma/n], [0, 1]])
print('Incremental deformation gradient:')
print(Fsi)
array_equal(matrix_power(Fsi, n), Fs)
Fpi = array([[Sx**(1/n), 0], [0, Sx**(-1/n)]])
print('Incremental deformation gradient:')
print(Fpi)
allclose(matrix_power(Fpi, n), Fp)
u,s,v = svd(Fs @ Fp)
print('Axial ratio of finite strain resulting from simple shear superposed on pure shear: {}'.format(s[0]/s[1]))
u,s,v = svd(Fp @ Fs)
print('Axial ratio of finite strain resulting from pure shear superposed on simple shear: {}'.format(s[0]/s[1]))
Fsi = array([[1, gamma/2], [0, 1]])
Fpi = array([[Sx**(1/2), 0], [0, Sx**(-1/2)]])
u,s,v = svd(Fsi @ Fpi @ Fsi @ Fpi)
print('Axial ratio of finite strain of superposed increments starting with pure shear: {}'.format(s[0]/s[1]))
u,s,v = svd(Fpi @ Fsi @ Fpi @ Fsi)
print('Axial ratio of finite strain of superposed increments starting with simple shear: {}'.format(s[0]/s[1]))
n = 100
Fsi = array([[1, gamma/n], [0, 1]])
Fpi = array([[Sx**(1/n), 0], [0, Sx**(-1/n)]])
u,s,v = svd(matrix_power(Fsi @ Fpi, n))
print('Axial ratio of finite strain of superposed increments starting with pure shear: {}'.format(s[0]/s[1]))
u,s,v = svd(matrix_power(Fpi @ Fsi, n))
print('Axial ratio of finite strain of superposed increments starting with simple shear: {}'.format(s[0]/s[1]))
arp = []
ars = []
ninc = range(1, 201)
for n in ninc:
Fsi = array([[1, gamma/n], [0, 1]])
Fpi = array([[Sx**(1/n), 0], [0, Sx**(-1/n)]])
u,s,v = svd(matrix_power(Fsi @ Fpi, n))
arp.append(s[0]/s[1])
u,s,v = svd(matrix_power(Fpi @ Fsi, n))
ars.append(s[0]/s[1])
figure(figsize=(16, 4))
semilogy(ninc, arp, 'r', label='Pure shear first')
semilogy(ninc, ars, 'g', label='Simple shear first')
legend()
xlim(1, 200)
xlabel('Number of increments')
ylabel('Finite strain axial ratio');
from scipy.linalg import expm, logm
Lp = logm(Fp)
Ls = logm(Fs)
L = Lp + Ls
F = expm(L)
u,s,v = svd(F)
sar = s[0]/s[1]
print('Axial| ratio of finite strain of simultaneous pure shear and simple shear: {}'.format(sar))
arp = []
ars = []
ninc = range(1, 201)
for n in ninc:
Fsi = array([[1, gamma/n], [0, 1]])
Fpi = array([[Sx**(1/n), 0], [0, Sx**(-1/n)]])
u,s,v = svd(matrix_power(Fsi @ Fpi, n))
arp.append(s[0]/s[1])
u,s,v = svd(matrix_power(Fpi @ Fsi, n))
ars.append(s[0]/s[1])
figure(figsize=(16, 4))
semilogy(ninc, arp, 'r', label='Pure shear first')
semilogy(ninc, ars, 'g', label='Simple shear first')
legend()
xlim(1, 200)
axhline(sar)
xlabel('Number of increments')
ylabel('Finite strain axial ratio');
L = logm(Fs)
D = (L + L.T)/2
W = (L - L.T)/2
allclose(D + W, L)
vel_field(D)
vel_field(W)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Naive concept of simultaneous deformation
Step2: To divide simple shear deformation with $\gamma$=1 to n incremental steps
Step3: To check that supperposition of those increments give as total deformation, we can use allclose numpy function
Step4: Knowing that deformation superposition is not cimmutative, we can check that axial ratio of finite strain resulting from simple shear superposed on pure shear and vice-versa is really different
Step5: Lets try to split those deformation to two increments and mutually mix them
Step6: It is now close to each other, but still quite different. So let's split it to much more increments....
Step7: Now it is very close. Let's visualize how finite strain converge with increasing number of increments
Step8: Using spatial velocity gradient
Step9: Spatial velocity gradient could be obtained as matrix logarithm of deformation gradient
Step10: Total spatial velocity gradient of simulatanous deformation could be calculated by summation of individual ones
Step11: Resulting deformation gradient could be calculated as matrix exponential of total spatial velocity gradient
Step12: Lets overlay it on previous diagram
Step13: Decomposition of spatial velocity gradient
Step14: Check that decomposition give total spatial velocity gradient
Step15: Visualize spatial velocity gradients for rate of deformation tensor
Step16: Visualize spatial velocity gradients for spin tensor
|
12 | <ASSISTANT_TASK:>
Python Code:
import os
import mne
from mne.preprocessing import (ICA, create_eog_epochs, create_ecg_epochs,
corrmap)
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60.)
# pick some channels that clearly show heartbeats and blinks
regexp = r'(MEG [12][45][123]1|EEG 00.)'
artifact_picks = mne.pick_channels_regexp(raw.ch_names, regexp=regexp)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks))
eog_evoked = create_eog_epochs(raw).average()
eog_evoked.apply_baseline(baseline=(None, -0.2))
eog_evoked.plot_joint()
ecg_evoked = create_ecg_epochs(raw).average()
ecg_evoked.apply_baseline(baseline=(None, -0.2))
ecg_evoked.plot_joint()
filt_raw = raw.copy()
filt_raw.load_data().filter(l_freq=1., h_freq=None)
ica = ICA(n_components=15, random_state=97)
ica.fit(filt_raw)
raw.load_data()
ica.plot_sources(raw)
ica.plot_components()
# blinks
ica.plot_overlay(raw, exclude=[0], picks='eeg')
# heartbeats
ica.plot_overlay(raw, exclude=[1], picks='mag')
ica.plot_properties(raw, picks=[0, 1])
ica.exclude = [0, 1] # indices chosen based on various plots above
# ica.apply() changes the Raw object in-place, so let's make a copy first:
reconst_raw = raw.copy()
ica.apply(reconst_raw)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks))
reconst_raw.plot(order=artifact_picks, n_channels=len(artifact_picks))
del reconst_raw
ica.exclude = []
# find which ICs match the EOG pattern
eog_indices, eog_scores = ica.find_bads_eog(raw)
ica.exclude = eog_indices
# barplot of ICA component "EOG match" scores
ica.plot_scores(eog_scores)
# plot diagnostics
ica.plot_properties(raw, picks=eog_indices)
# plot ICs applied to raw data, with EOG matches highlighted
ica.plot_sources(raw)
# plot ICs applied to the averaged EOG epochs, with EOG matches highlighted
ica.plot_sources(eog_evoked)
ica.exclude = []
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
ica.plot_scores(ecg_scores)
# plot diagnostics
ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
ica.plot_sources(raw)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
ica.plot_sources(ecg_evoked)
# refit the ICA with 30 components this time
new_ica = ICA(n_components=30, random_state=97)
new_ica.fit(filt_raw)
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = new_ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
new_ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
new_ica.plot_scores(ecg_scores)
# plot diagnostics
new_ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
new_ica.plot_sources(raw)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
new_ica.plot_sources(ecg_evoked)
# clean up memory before moving on
del raw, filt_raw, ica, new_ica
mapping = {
'Fc5.': 'FC5', 'Fc3.': 'FC3', 'Fc1.': 'FC1', 'Fcz.': 'FCz', 'Fc2.': 'FC2',
'Fc4.': 'FC4', 'Fc6.': 'FC6', 'C5..': 'C5', 'C3..': 'C3', 'C1..': 'C1',
'Cz..': 'Cz', 'C2..': 'C2', 'C4..': 'C4', 'C6..': 'C6', 'Cp5.': 'CP5',
'Cp3.': 'CP3', 'Cp1.': 'CP1', 'Cpz.': 'CPz', 'Cp2.': 'CP2', 'Cp4.': 'CP4',
'Cp6.': 'CP6', 'Fp1.': 'Fp1', 'Fpz.': 'Fpz', 'Fp2.': 'Fp2', 'Af7.': 'AF7',
'Af3.': 'AF3', 'Afz.': 'AFz', 'Af4.': 'AF4', 'Af8.': 'AF8', 'F7..': 'F7',
'F5..': 'F5', 'F3..': 'F3', 'F1..': 'F1', 'Fz..': 'Fz', 'F2..': 'F2',
'F4..': 'F4', 'F6..': 'F6', 'F8..': 'F8', 'Ft7.': 'FT7', 'Ft8.': 'FT8',
'T7..': 'T7', 'T8..': 'T8', 'T9..': 'T9', 'T10.': 'T10', 'Tp7.': 'TP7',
'Tp8.': 'TP8', 'P7..': 'P7', 'P5..': 'P5', 'P3..': 'P3', 'P1..': 'P1',
'Pz..': 'Pz', 'P2..': 'P2', 'P4..': 'P4', 'P6..': 'P6', 'P8..': 'P8',
'Po7.': 'PO7', 'Po3.': 'PO3', 'Poz.': 'POz', 'Po4.': 'PO4', 'Po8.': 'PO8',
'O1..': 'O1', 'Oz..': 'Oz', 'O2..': 'O2', 'Iz..': 'Iz'
}
raws = list()
icas = list()
for subj in range(4):
# EEGBCI subjects are 1-indexed; run 3 is a left/right hand movement task
fname = mne.datasets.eegbci.load_data(subj + 1, runs=[3])[0]
raw = mne.io.read_raw_edf(fname)
# remove trailing `.` from channel names so we can set montage
raw.rename_channels(mapping)
raw.set_montage('standard_1005')
# fit ICA
ica = ICA(n_components=30, random_state=97)
ica.fit(raw)
raws.append(raw)
icas.append(ica)
# use the first subject as template; use Fpz as proxy for EOG
raw = raws[0]
ica = icas[0]
eog_inds, eog_scores = ica.find_bads_eog(raw, ch_name='Fpz')
corrmap(icas, template=(0, eog_inds[0]))
for index, (ica, raw) in enumerate(zip(icas, raws)):
fig = ica.plot_sources(raw)
fig.suptitle('Subject {}'.format(index))
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9)
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9, label='blink',
plot=False)
print([ica.labels_ for ica in icas])
icas[3].plot_components(picks=icas[3].labels_['blink'])
icas[3].exclude = icas[3].labels_['blink']
icas[3].plot_sources(raws[3])
template_eog_component = icas[0].get_components()[:, eog_inds[0]]
corrmap(icas, template=template_eog_component, threshold=0.9)
print(template_eog_component)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <div class="alert alert-info"><h4>Note</h4><p>Before applying ICA (or any artifact repair strategy), be sure to observe
Step2: We can get a summary of how the ocular artifact manifests across each channel
Step3: Now we'll do the same for the heartbeat artifacts, using
Step4: Filtering to remove slow drifts
Step5: Fitting and plotting the ICA solution
Step6: Some optional parameters that we could have passed to the
Step7: Here we can pretty clearly see that the first component (ICA000) captures
Step8: <div class="alert alert-info"><h4>Note</h4><p>
Step9: We can also plot some diagnostics of each IC using
Step10: In the remaining sections, we'll look at different ways of choosing which ICs
Step11: Now that the exclusions have been set, we can reconstruct the sensor signals
Step12: Using an EOG channel to select ICA components
Step13: Note that above we used
Step14: The last of these plots is especially useful
Step15: Much better! Now we've captured both ICs that are reflecting the heartbeat
Step16: Selecting ICA components using template matching
Step17: Now let's run
Step18: The first figure shows the template map, while the second figure shows all
Step19: Notice that subject 1 does seem to have an IC that looks like it reflects
Step20: Now we get the message At least 1 IC detected for each subject (which is
Step21: Notice that the first subject has 3 different labels for the IC at index 0
Step22: As a final note, it is possible to extract ICs numerically using the
|
13 | <ASSISTANT_TASK:>
Python Code:
# import variable setting dictionaries from dkrz data ingest tool chain
# and remove __doc__ strings from dictionary (would clutter PROV graph visualizations)
from provtemplates import workflow_steps
from collections import MutableMapping
from contextlib import suppress
def delete_keys_from_dict(dictionary, keys):
for key in keys:
with suppress(KeyError):
del dictionary[key]
for value in dictionary.values():
if isinstance(value, MutableMapping):
delete_keys_from_dict(value, keys)
workflow_dict = workflow_steps.WORKFLOW_DICT
from provtemplates import provconv
import prov.model as prov
import six
import itertools
from provtemplates import workflow_steps
ns_dict = {
'prov':'http://www.w3.org/ns/prov#',
'var':'http://openprovenance.org/var#',
'vargen':'http://openprovenance.org/vargen#',
'tmpl':'http://openprovenance.org/tmpl#',
'foaf':'http://xmlns.com/foaf/0.1/',
'ex': 'http://example.org/',
'orcid':'http://orcid.org/',
#document.set_default_namespace('http://example.org/0/')
'rdf':'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdfs':'http://www.w3.org/2000/01/rdf-schema#',
'xsd':'http://www.w3.org/2001/XMLSchema#',
'ex1': 'http://example.org/1/',
'ex2': 'http://example.org/2/'
}
prov_doc01 = provconv.set_namespaces(ns_dict,prov.ProvDocument())
prov_doc02 = provconv.set_namespaces(ns_dict,prov.ProvDocument())
prov_doc03 = provconv.set_namespaces(ns_dict,prov.ProvDocument())
prov_doc1 = prov_doc01.bundle("var:data-ingest-wflow")
prov_doc2 = prov_doc02.bundle("var:data-ingest-wflow")
prov_doc3 = prov_doc03.bundle("var:data-ingest-wflow")
prov_doc01.set_default_namespace('http://enes.org/ns/ingest#')
prov_doc02.set_default_namespace('http://enes.org/ns/ingest#')
prov_doc03.set_default_namespace('http://enes.org/ns/ingest#')
def gen_bundles(workflow_dict,prov_doc):
global_in_out = prov_doc.entity('var:wf_doc')
for wflow_step, wflow_stepdict in workflow_dict.items():
nbundle = prov_doc.bundle('var:'+wflow_step)
out_node = nbundle.entity('var:'+wflow_step+'_out')
agent = nbundle.agent('var:'+wflow_step+'_agent')
activity = nbundle.activity('var:'+wflow_step+'_activity')
in_node = nbundle.entity('var:'+wflow_step+'_in')
nbundle.wasGeneratedBy(out_node,activity)
nbundle.used(activity,in_node)
nbundle.wasAssociatedWith(activity,agent)
nbundle.wasDerivedFrom(in_node,out_node)
nbundle.used(activity,global_in_out)
nbundle.wasGeneratedBy(global_in_out,activity)
def in_bundles(workflow_dict,prov_doc):
first = True
out_nodes = []
nbundle = prov_doc
for wflow_step, wflow_stepdict in workflow_dict.items():
#nbundle = prov_doc.bundle('var:'+wflow_step)
out_node = nbundle.entity('var:'+wflow_step+'_out')
agent = nbundle.agent('var:'+wflow_step+'_agent')
activity = nbundle.activity('var:'+wflow_step+'_activity')
if first:
in_node = nbundle.entity('var:'+wflow_step+'_in')
nbundle.used(activity,in_node)
first = False
out_nodes.append((nbundle,out_node,agent,activity))
return out_nodes
def chain_bundles(nodes):
'''
chaining based on "used" activity relationship
'''
i = 1
for (nbundle,out_node,agent,activity) in nodes[1:]:
(prev_bundle,prev_out,prev_agent,prev_activity) = nodes[i-1]
nbundle.used(activity,prev_out)
i += 1
for (nbundle,out_node,agent,activity) in nodes:
nbundle.wasGeneratedBy(out_node,activity)
nbundle.wasAssociatedWith(activity,agent)
def chain_hist_bundles(nodes,prov_doc):
'''
chaining based on "used" activity relationship
add an explicit end_result composing all the generated
intermediate results
'''
i = 1
for (nbundle,out_node,agent,activity) in nodes[1:]:
(prev_bundle,prev_out,prev_agent,prev_activity) = nodes[i-1]
nbundle.used(activity,prev_out)
i += 1
for (nbundle,out_node,agent,activity) in nodes:
nbundle.wasGeneratedBy(out_node,activity)
nbundle.wasAssociatedWith(activity,agent)
wf_out = prov_doc.entity("ex:wf_result")
wf_agent = prov_doc.agent("ex:workflow_handler")
wf_activity = prov_doc.activity("ex:wf_trace_composition")
prov_doc.wasGeneratedBy(wf_out,wf_activity)
prov_doc.wasAssociatedWith(wf_activity,wf_agent)
for (nbundle,out_node,agent,activity) in nodes:
prov_doc.used(wf_activity,out_node)
# generate prov_template options and print provn representation
gen_bundles(workflow_dict,prov_doc01)
print(prov_doc01.get_provn())
%matplotlib inline
prov_doc01.plot()
prov_doc01.serialize('data-ingest1.rdf',format='rdf')
nodes = in_bundles(workflow_dict,prov_doc2)
chain_bundles(nodes)
print(prov_doc02.get_provn())
%matplotlib inline
prov_doc02.plot()
from prov.dot import prov_to_dot
dot = prov_to_dot(prov_doc02)
prov_doc02.serialize('ingest-prov-version2.rdf',format='rdf')
dot.write_png('ingest-prov-version2.png')
gnodes = in_bundles(workflow_dict,prov_doc3)
chain_hist_bundles(gnodes,prov_doc3)
print(prov_doc03.get_provn())
dot = prov_to_dot(prov_doc03)
dot.write_png('ingest-prov-version3.png')
%matplotlib inline
prov_doc03.plot()
prov_doc03.serialize('data-ingest3.rdf',format='rdf')
# ------------------ to be removed --------------------------------------
# generate prov_template options and print provn representation
gen_bundles(workflow_dict,prov_doc1)
print(prov_doc1.get_provn())
nodes = in_bundles(workflow_dict,prov_doc2)
chain_bundles(nodes)
print(prov_doc2.get_provn())
gnodes = in_bundles(workflow_dict,prov_doc3)
chain_hist_bundles(gnodes,prov_doc3)
print(prov_doc3.get_provn())
%matplotlib inline
prov_doc1.plot()
prov_doc2.plot()
prov_doc3.plot()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Template representation variant 1
Step2: Template representation variant 2
Step3: Template representation variant 3
|
14 | <ASSISTANT_TASK:>
Python Code:
training = sqlContext.read.parquet("s3://zoltanctoth-flights/training.parquet")
test = sqlContext.read.parquet("s3://zoltanctoth-flights/training.parquet")
test.printSchema()
test.first()
training.cache()
test.cache()
from pyspark.sql.types import DoubleType
from pyspark.sql.functions import udf
is_late = udf(lambda delay: 1.0 if delay > 0 else 0.0, DoubleType())
training = training.withColumn("is_late",is_late(training.arrdelay))
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import VectorAssembler
from pyspark.ml import Pipeline
# Create feature vectors. Ignore arr_delay and it's derivate, is_late
feature_assembler = VectorAssembler(
inputCols=[x for x in training.columns if x not in ["is_late","arrdelay"]],
outputCol="features")
reg = LogisticRegression().setParams(
maxIter = 100,
labelCol="is_late",
predictionCol="prediction")
model = Pipeline(stages=[feature_assembler, reg]).fit(training)
predicted = model.transform(test)
predicted.take(1)
predicted = predicted.withColumn("is_late",is_late(predicted.arrdelay))
predicted.crosstab("is_late","prediction").show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate label column for the training data
Step2: Create and fit Spark ML model
Step3: Predict whether the aircraft will be late
Step4: Check model performance
|
15 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d, InterpolatedUnivariateSpline
from scipy.optimize import bisect
import json
from functools import partial
class Foo: pass
from chest import Chest
from slict import CachedSlict
from glopen import glopen, glopen_many
config = Foo()
config.name0 = "HighAspect/HA_base/HA_base"
config.name1 = "HighAspect/HA_visc/HA_visc"
#config.arch_end = "maxhutch#alpha-admin/~/pub/"
config.arch_end = "alcf#dtn_mira/projects/alpha-nek/"
c0 = Chest(path = "{:s}-results".format(config.name0),
open = partial(glopen, endpoint=config.arch_end),
open_many = partial(glopen_many, endpoint=config.arch_end))
sc0 = CachedSlict(c0)
with glopen(
"{:s}.json".format(config.name0), mode='r',
endpoint = config.arch_end,
) as f:
params0 = json.load(f)
c1 = Chest(path = "{:s}-results".format(config.name1),
open = partial(glopen, endpoint=config.arch_end),
open_many = partial(glopen_many, endpoint=config.arch_end))
sc1 = CachedSlict(c1)
with glopen(
"{:s}.json".format(config.name1), mode='r',
endpoint = config.arch_end,
) as f:
params1 = json.load(f)
c0.prefetch(sc0[:,'H'].full_keys())
c0.prefetch(sc0[:,'w_max_z'].full_keys())
c1.prefetch(sc1[:,'H'].full_keys())
c1.prefetch(sc1[:,'w_max_z'].full_keys())
spl0 = InterpolatedUnivariateSpline(sc0[:,'H'].keys(), sc0[:,'H'].values(), k=3)
dHdt0 = spl0.derivative()
Ts0 = np.linspace(sc0[:,'H'].keys()[0], sc0[:,'H'].keys()[-1], 1000)
Vb0 = -dHdt0(Ts0)
Vm0 = np.array([np.max(sc0[t,'w_max_z']) for t in sc0[:,'w_max_z'].keys()])
Vc0 = np.array([sc0[t,'w_max_z'][sc0[t,'w_max_z'].shape[0]/2] for t in sc0[:,'w_max_z'].keys()])
theory0 = np.sqrt(
params0["atwood"] * params0["g"] * params0["extent_mesh"][0] / np.pi
+ (2.*np.pi*params0["viscosity"] / params0["extent_mesh"][0])**2
) - (2.*np.pi*params0["viscosity"] / params0["extent_mesh"][0])
spl1 = InterpolatedUnivariateSpline(sc1[:,'H'].keys(), sc1[:,'H'].values(), k=3)
dHdt1 = spl1.derivative()
Ts1 = np.linspace(sc1[:,'H'].keys()[0], sc1[:,'H'].keys()[-1], 1000)
Vb1 = -dHdt1(Ts1)
Vm1 = np.array([np.max(sc1[t,'w_max_z']) for t in sc1[:,'w_max_z'].keys()])
Vc1 = np.array([sc1[t,'w_max_z'][sc1[t,'w_max_z'].shape[0]/2] for t in sc1[:,'w_max_z'].keys()])
theory1 = np.sqrt(
params1["atwood"] * params1["g"] * params1["extent_mesh"][0] / np.pi
+ (2.*np.pi*params1["viscosity"] / params1["extent_mesh"][0])**2
) - (2.*np.pi*params1["viscosity"] / params1["extent_mesh"][0])
fig, axs = plt.subplots(2,1, sharex=True)
axs[0].plot(Ts0, -spl0(Ts0));
axs[0].plot(Ts1, -spl1(Ts1));
#axs[0].plot(sc[:,'H'].keys(), -np.array(sc[:,'H'].values()), 'yo');
axs[0].set_ylabel('Depth (m)')
axs[1].plot(Ts0, Vb0, 'b-');
axs[1].plot(Ts1, Vb1, 'b-');
axs[1].plot(sc0[:,'w_max_z'].keys(), Vm0, 'g-');
axs[1].plot(sc1[:,'w_max_z'].keys(), Vm1, 'g-');
axs[1].plot(sc0[:,'w_max_z'].keys(), Vc0, 'r-');
axs[1].plot(sc1[:,'w_max_z'].keys(), Vc1, 'r-');
axs[1].plot([Ts0[0],Ts0[-1]], [theory0, theory0], 'k--');
axs[1].plot([Ts1[0],Ts1[-1]], [theory1, theory1], 'k--');
axs[1].set_ylabel('Velocity (scaled)')
axs[1].set_xlabel('Time (s)');
plt.savefig('Figure1.eps')
%install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py
%load_ext version_information
%version_information numpy, matplotlib, slict, chest, glopen, globussh
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: And some more specialized dependencies
Step2: Configuration for this figure.
Step3: Open a chest located on a remote globus endpoint and load a remote json configuration file.
Step4: We want to plot the spike depth, which is the 'H' field in the chest.
Step5: Plot the bubble height, the 'H' keys, vs. time.
|
16 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
def find_peaks(a):
Find the indices of the local maxima in a sequence.
# YOUR CODE HERE
#raise NotImplementedError()
ind=[]
#next two if checks end points
if a[0]> a[1]:
ind.append(0)
if a[len(a)-1]>a[len(a)-2]:
ind.append(len(a)-1)
#finds local maxima in string by comparing adjacent
for i in range(1,len(a)-1):
if a[i]>a[i-1] and a[i]>a[i+1]:
ind.append(i)
#sorts by increasing order
return sorted(ind)
find_peaks([2,0,1,0,2,0,1])
p1 = find_peaks([2,0,1,0,2,0,1])
assert np.allclose(p1, np.array([0,2,4,6]))
p2 = find_peaks(np.array([0,1,2,3]))
assert np.allclose(p2, np.array([3]))
p3 = find_peaks([3,2,1,0])
assert np.allclose(p3, np.array([0]))
from sympy import pi, N
pi_digits_str = str(N(pi, 10001))[2:]
# YOUR CODE HERE
#raise NotImplementedError()
def pimax(x):
'''uses find_peaks to find the local maxima then finds the space between the maxima and
plots the distribution of space between local maxima'''
pi=np.ones(10000)
for i in range(len(x)):
pi[i]=int(x[i])
m = find_peaks(pi)
dist = np.diff(m)
p = plt.hist(dist,bins=17)
plt.title('Distances Between Local Maxima in First 10000 digtis of $\pi$')
plt.xlabel('Distance Between Local Maxima')
plt.ylabel('Number of Times Occured')
plt.grid(False)
plt.xlim([1,19])
a=range(2,19)
plt.xticks(a[::2])
plt.ylim(0,1100)
plt.show()
pimax(pi_digits_str)
assert True # use this for grading the pi digits histogram
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Peak finding
Step3: Here is a string with the first 10000 digits of $\pi$ (after the decimal). Write code to perform the following
|
17 | <ASSISTANT_TASK:>
Python Code:
def g(x, alpha, beta):
assert alpha >= 0 and beta >= 0
return (alpha*x)/(1 + (beta * x))
def plot_cobg(x, alpha, beta):
y = np.linspace(x[0],x[1],300)
g_y = g(y, alpha, beta)
cobweb(lambda x: g(x, alpha, beta), y, g_y)
# configura gráfica interactiva
interact(plot_cobg,
x=widgets.FloatRangeSlider(min=0.01, max=3, step=0.01,
value=[0.02, 3],
continuous_update=False),
alpha=widgets.FloatSlider(min=0.001, max=30,step=0.01,
value=12, continuous_update=False),
beta=widgets.FloatSlider(min=0.001, max=30,step=0.01,
value=7, continuous_update=False))
# primera iterada
f0 = (alpha*x)/(1+beta*x)
Eq(f(x),f0)
# segunda iterada
# subs-tituye f0 en la x de f0 para generar f1
f1 = simplify(f0.subs(x, f0))
Eq(f(f(x)), f1)
# tercera iterada
f2 = simplify(f1.subs(x, f1))
Eq(f(f(f(x))), f2)
# cuarta iterada
f3 = simplify(f2.subs(x, f2))
Eq(f(f(f(f(x)))), f3)
# puntos fijos resolviendo la primera iterada
solveset(Eq(f1,x),x)
(alpha-1)/beta
def solve_g(a, b):
y = list(np.linspace(0,float(list(solveset(Eq(f1.subs(alpha, a).subs(beta, b), x), x)).pop()),2))
for t in range(30):
y.append(g(y[t], a, b))
zoom = plt.plot(y)
print("ultimos 15 de la serie:")
pprint(y[-15:])
print("\npuntos fijos:")
return solveset(Eq(f1.subs(alpha, a).subs(beta, b), x), x)
# gráfica interactiva
interact(solve_g,
a=widgets.IntSlider(min=0, max=30, step=1,
value=11, continuous_update=False,
description='alpha'),
b=widgets.IntSlider(min=0, max=30, step=1,
value=5, continuous_update=False,
description='beta'))
# con alfa=1 y beta=1
Eq(collect(f3, x), x/(x+1))
def plot_g(x, alpha, beta):
pprint(x)
y = np.linspace(x[0],x[1],300)
g_y = g(y, alpha, beta)
fig1 = plt.plot(y, g_y)
fig1 = plt.plot(y, y, color='red')
plt.axis('equal')
interact(plot_g,
x=widgets.FloatRangeSlider(min=0, max=30, step=0.01, value=[0,1], continuous_update=False),
alpha=widgets.IntSlider(min=0,max=30,step=1,value=1, continuous_update=False),
beta=widgets.IntSlider(min=0,max=30,step=1,value=1, continuous_update=False))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Búsqueda algebráica de puntos fijos
Step2: Punto fijo oscilatorio
Step3: ¿Qué pasará con infinitas iteraciones?
|
18 | <ASSISTANT_TASK:>
Python Code:
import os
import sys
root_folder = os.path.dirname(os.getcwd())
sys.path.append(root_folder)
import ResoFit
from ResoFit.calibration import Calibration
from ResoFit.fitresonance import FitResonance
from ResoFit.experiment import Experiment
from ResoFit._utilities import Layer
import numpy as np
import matplotlib.pyplot as plt
energy_min = 7
energy_max = 150
energy_step = 0.01
folder = 'data/_data_for_tutorial'
data_file = 'raw_data.csv'
spectra_file = 'spectra.txt'
experiment = Experiment(data_file=data_file,
spectra_file=spectra_file,
folder=folder)
experiment.data
experiment.spectra
experiment.slice(slice_start=3, slice_end=2801, reset_index=False)
experiment.x_raw(offset_us=0., source_to_detector_m=16)
experiment.x_raw(angstrom=True, offset_us=0., source_to_detector_m=16)
experiment.y_raw()
experiment.y_raw(transmission=True)
experiment.y_raw(baseline=True)
experiment.xy_scaled(energy_min=energy_min, energy_max=energy_max, energy_step=energy_step,
angstrom=False, transmission=False,
offset_us=0, source_to_detector_m=15,
baseline=True)
experiment.plot_raw(x_axis='number')
plt.show()
experiment.plot_raw(x_axis='time')
plt.show()
experiment.plot_raw(x_axis='time', time_unit='s')
plt.show()
experiment.plot_raw(x_axis='lambda', lambda_xmax=0.12,
offset_us=0, source_to_detector_m=16)
plt.show()
experiment.plot_raw(x_axis='energy', energy_xmax=150,
offset_us=0, source_to_detector_m=16)
plt.show()
experiment.plot_raw(offset_us=0, source_to_detector_m=16,
x_axis='energy', baseline=True,
energy_xmax=150)
plt.show()
layer_1 = 'U'
thickness_1 = 0.05 # mm
density_1 = None # g/cm^3 (if None or omitted, pure solid density will be used in fitting step)
layer_2 = 'Gd'
thickness_2 = 0.05 # mm
density_2 = None # g/cm^3 (if None or omitted, pure solid density will be used in fitting step)
layer = Layer()
layer.add_layer(layer=layer_1, thickness_mm=thickness_1, density_gcm3=density_1)
layer.add_layer(layer=layer_2, thickness_mm=thickness_2, density_gcm3=density_2)
source_to_detector_m = 16.
offset_us = 0
calibration = Calibration(data_file=data_file,
spectra_file=spectra_file,
raw_layer=layer,
energy_min=energy_min,
energy_max=energy_max,
energy_step=energy_step,
folder=folder,
baseline=True)
calibration.calibrate(source_to_detector_m=source_to_detector_m,
offset_us=offset_us,
vary='all')
calibration.calibrated_offset_us
calibration.calibrated_source_to_detector_m
calibration.plot()
calibration.plot(table=False)
calibration.plot(table=False, grid=False)
calibration.plot(table=False, grid=False, before=True)
calibration.plot(table=False, grid=False, all_elements=True)
calibration.plot(table=False, grid=False, all_isotopes=True)
calibration.plot(table=False, grid=False, items_to_plot=['U', 'U-238', 'Gd-156'])
calibration.plot(table=False, grid=False, items_to_plot=['U', 'U*', 'Gd-156'])
fit = FitResonance(spectra_file=spectra_file,
data_file=data_file,
folder=folder,
energy_min=energy_min,
energy_max=energy_max,
energy_step=energy_step,
calibrated_offset_us=calibration.calibrated_offset_us,
calibrated_source_to_detector_m=calibration.calibrated_source_to_detector_m,
baseline=True)
fit_result = fit.fit(layer, vary='density')
fit.molar_conc()
fit.plot()
fit.plot(error=False)
fit.plot(table=False)
fit.plot(table=False, grid=False, before=True)
fit.plot(table=False, grid=False, all_elements=True)
fit.plot(table=False, grid=False, all_isotopes=True)
fit.plot(table=False, grid=False, items_to_plot=['U', 'U-238', 'Gd-156'])
fit.plot(table=False, grid=False, items_to_plot=['U', 'U*', 'Gd-156'])
fit.fit_iso(layer='U')
fit.molar_conc()
fit.plot()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Global paramters
Step2: File locations
Step3: Preview data using Experiment()
Step4: Data
Step5: Spectra
Step6: Remove unwanted data points
Step7: Retrieve values for X-axis
Step8: In 'neutron wavelength'
Step9: Retrieve values for Y-axis
Step10: In 'neutron transmission'
Step11: In 'neutron attenuation' & remove baseline
Step12: Retrieve interpolated values for both X-axis & Y-axis
Step13: Plot raw in various ways
Step14: Attenuation vs. recorded time
Step15: Attenuation vs. neutron wavelength
Step16: Attenuation vs. neutron energy
Step17: Remove baseline for plot
Step18: Operation to experiment data
Step19: Create sample layer
Step20: Calibration
Step21: Class initialization
Step22: Equations for (time-wavelength-energy) conversion
Step23: Retrieve calibrated parameters
Step24: Plot calibration result
Step25: Hide table
Step26: Hide grid
Step27: Plot before to compare
Step28: Show all elements
Step29: Show all isotopes
Step30: Show specified items
Step31: Note
Step32: Fit resonances
Step33: Fitting equations
Step34: Output fitted result in molar concentration
Step35: Plot fitting result
Step36: Hide difference plot
Step37: Hide table
Step38: Hide grid & show before
Step39: Show all elements
Step40: Show all isotopes
Step41: Show specified items
Step42: Fit isotopic ratios
|
19 | <ASSISTANT_TASK:>
Python Code:
#Like before, we're going to select the relevant columns from the database:
connection = psycopg2.connect('dbname= threeoneone user=threeoneoneadmin password=threeoneoneadmin')
cursor = connection.cursor()
cursor.execute('''SELECT createddate, closeddate, borough FROM service;''')
data = cursor.fetchall()
data = pd.DataFrame(data)
data.columns = ['createddate','closeddate','borough']
data['cryear'] = [x.year for x in data['createddate']]
data['crmonth'] = [x.month for x in data['createddate']]
#filter out bad casesa
import datetime
today = datetime.date(2016,5,29)
janone = datetime.date(2010,1,1)
data = data.loc[(data['closeddate'] > data['createddate']) | (data['closeddate'].isnull() == True)]
databyyear = data.groupby(by='cryear').count()
databyyear
data['timedelta'] = data['closeddate'] - data['createddate']
data['timedeltaint'] = [x.days if pd.isnull(x) == False else None for x in data['timedelta']]
data.groupby(by='cryear').mean()
databyyear['propclosed'] = databyyear['closeddate']/databyyear['createddate']
databyyear
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's extract years and months again
Step2: And now, we're going to filter out bad cases again. However, this time, we're going to be a bit more selective. We're going to include cases where the closed date is missing.
Step3: This table shows exactly what I'm talking about - as we get closer to the current day, the average resolution time falls more and more. If censorig is occuring, we might expect that the proportion of cases closed is also decreasing over time. This is generally the case
|
20 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set() # for plot styling
import numpy as np
import threading
import time
from sklearn.datasets.samples_generator import make_blobs
from sklearn.cluster import KMeans
import sys
sys.path.append("../")
from IoTPy.core.stream import Stream, StreamArray, run
from IoTPy.agent_types.op import map_window, map_element
from IoTPy.agent_types.sink import sink_element
from examples.K_Means.kmeans import normally_distributed_points
from IoTPy.concurrency.multicore import get_processes, get_processes_and_procs
from IoTPy.concurrency.multicore import terminate_stream
from IoTPy.concurrency.multicore import get_proc_that_inputs_source
from IoTPy.concurrency.multicore import extend_stream
# CREATE POINTS IN BLOBS AROUND CENTERS
# centers is the list of centers of the blobs.
CENTERS = np.array([[1.0, 1.0], [1.0, -1.0],
[-1.0, -1.0], [-1.0, 1.0]])
# n_features is the number of features which is also the dimension
# of the space.
n_features=2
# n_centers is the number of centers.
n_centers = len(CENTERS)
# n_samples is the number of points.
n_samples=20
# cluster_std is the standard deviation of points around the centers.
stdev = 0.2
# Create the points
points, blob_cluster_ids = make_blobs(
n_samples, n_features, centers=CENTERS,
cluster_std=stdev, random_state=0)
# random_state=0 starts random number generator from the same
# initial value for every run.
# points is the set of points in the blobs
# centers[blob_cluster_ids[i]] is the center that generated points[i]
# Use kmeans to cluster points into n_cluster clusters.
n_clusters = 4
kmeans = KMeans(n_clusters)
# Function to compute k-means cluster, plot clusters and centers, and
# return centers.
def plot_clusters_and_return_centers(points):
kmeans.fit(points)
# cluster_ids[j] is the id of the cluster associated with points[j].
cluster_ids = kmeans.predict(points)
# Plot the points. The parameter c identifies colors of points.
plt.scatter(points[:, 0], points[:, 1], c=cluster_ids,
s=50, cmap='viridis')
# Plot the centers of the clusters computed by k-means.
# kmeans.cluster_centers_ is the list of computed centers
# of the clusters
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c='black',
s=50, alpha=0.5);
plt.show()
return centers
# FUNCTION TO CHANGE THE POINT SET BY A FEW POINTS
# Generate more points around the centers. The number of points
# generated is n_more_points
# Create a sliding window in which n_more_points are generated around
# each center, and the previous n_more_points*n_clusters points are
# removed.
n_more_points=2
def change_points(points, n_more_points, n_clusters):
for center in CENTERS:
# points_around_center is list of n_more_points points
# distributed normally around the theoretical centers.
# Create new points
new_points, new_blob_cluster_ids = make_blobs(
n_more_points, n_features, centers=CENTERS,
cluster_std=stdev, random_state=0)
#points_around_center = normally_distributed_points(
#center, stdev=stdev, num_points=n_more_points)
# Append these new points to the previous set of points.
points = np.append(points, new_points, axis=0)
# Remove earlier points from the set of points.
# Since we added n_more_points to each cluster, we remove
# n_more_points*n_clusters.
points = points[n_more_points*n_clusters:]
return points
# Plot points where color of a point is associated with
# the cluster center that generated that point.
plt.scatter(points[:, 0], points[:, 1],
c=blob_cluster_ids, s=50, cmap='viridis');
# Plot the centers around which the blobs were generated.
plt.scatter(CENTERS[:, 0], CENTERS[:, 1], c='black', s=200, alpha=0.5);
# Plot points where color of a point is determined by
# the kmeans cluster associated with that point.
# Note that previously we associated the color of a point with
# the center that generated the point. This is the theoretical center.
# In this plot the color is associated with the cluster center computed
# using kmeans.
# The theoretical centers of the clusters are likely to be
# different from the centers computed by kmeans.
plot_clusters_and_return_centers(points)
points = change_points(points, n_more_points, n_clusters)
plot_clusters_and_return_centers(points)
def test_kmeans(n_steps, n_more_points,
n_samples, n_features, CENTERS, stdev):
# Create streams
stream_of_points = StreamArray(
name='stream_of_points', dimension=n_features)
stream_of_centers = Stream(name='stream_of_centers')
# Create agents
map_window(func=plot_clusters_and_return_centers,
in_stream=stream_of_points,
out_stream=stream_of_centers,
window_size=n_samples,
step_size=n_more_points);
sink_element(func=lambda v: print ('center: \n', v),
in_stream=stream_of_centers);
# Start the computation.
# First extend the stream with points around the true centers.
# Compute points around true centers.
points, blob_cluster_ids = make_blobs(
n_samples, n_features,
centers= CENTERS, cluster_std=stdev, random_state=0)
stream_of_points.extend(points)
# Sleep to mimic a stream coming from an external source.
time.sleep(0.001)
run()
# Extend the stream with n_more_points at each step.
for i in range(n_steps-1):
# Compute points to be added to the stream.
new_points, new_blob_cluster_ids = make_blobs(
n_more_points, n_features, centers=CENTERS,
cluster_std=stdev, random_state=0)
# Extend the stream with the new points and run.
stream_of_points.extend(new_points)
# Sleep to mimic a stream coming from an external source.
time.sleep(0.001)
run()
test_kmeans(n_steps=3, n_more_points=2, n_samples=20, n_features=2,
CENTERS= np.array(
[[1.0, 1.0], [1.0, -1.0], [-1.0, 1.0], [-1.0, -1.0]]),
stdev=0.1)
# Step 0: Define agent functions, source threads
# and actuator threads (if any).
# Step 0.0: Define agent functions.
# kmeans_agent is the agent for the process called ComputeKmeansProcess.
def kmeans_agent(in_streams, out_streams):
def h(window):
print ('plot')
return plot_clusters_and_return_centers(np.array(window))
map_window(func=h,
in_stream=in_streams[0],
out_stream=out_streams[0],
window_size=n_samples,
step_size=n_more_points);
# print_centers_agent is the agent for the process called
# PrintCentersProcess.
def print_centers_agent(in_streams, out_streams):
sink_element(func=lambda v: print ('center: \n', v),
in_stream=in_streams[0]);
# Step 0.1: Define source thread target.
n_steps = 2
def generate_points_thread_target(procs):
# Create a set of random points around CENTERS
points, blob_cluster_ids = make_blobs(
n_samples, n_features, centers=CENTERS,
cluster_std=stdev, random_state=0)
# Add points incrementally.
for _ in range(n_steps):
points_as_list_of_lists = list([
list(v) for v in points
])
# Extend stream with these additional points
#extend_stream(procs, data=points_as_list_of_lists,
#stream_name='stream_of_points')
extend_stream(procs, data=points,
stream_name='stream_of_points')
time.sleep(0.001)
# Create additional random points around CENTERS
points, blob_cluster_ids = make_blobs(
n_more_points, n_features, centers=CENTERS,
cluster_std=stdev, random_state=0)
# Terminate stream because the stream will not be extended
# further.
terminate_stream(procs, stream_name='stream_of_points')
# Step 1: multicore_specification of streams and processes.
# Specify Streams: list of pairs (stream_name, stream_type).
# Specify Processes: name, agent function,
# lists of inputs and outputs and sources, additional arguments.
multicore_specification = [
# Streams
[('stream_of_points', 'x'), ('stream_of_centers', 'x')],
# Processes
[{'name': 'ComputeKmeansProcess', 'agent': kmeans_agent,
'inputs':['stream_of_points'], 'sources':['stream_of_points'],
'outputs': ['stream_of_centers']},
{'name': 'PrintCentersProcess', 'agent': print_centers_agent,
'inputs':['stream_of_centers']},
]]
# Step 2: Create processes.
processes, procs = get_processes_and_procs(multicore_specification)
# Step 3: Create threads (if any)
generate_points_thread = threading.Thread(
target=generate_points_thread_target, args=(procs,))
# Step 4: Specify which process each thread runs in.
# thread_0 runs in the process called 'coordinator'
procs['Compute_kmeans'].threads = [generate_points_thread]
# Step 5: Start, join and terminate processes.
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 1
Step2: Sklearn function to generate random points
Step3: Function to compute kmeans and plot clusters.
Step4: Function to change the point set by a few points
Step5: Plot points and theoretical cluster centers
Step6: Plot points and k-means cluster centers
Step7: Change points and recompute cluster centers.
Step8: Part 2
Step9: A Multiprocessor Implementation
|
21 | <ASSISTANT_TASK:>
Python Code:
# Importando Bibliotecas
import csv
import matplotlib.pyplot as plt
from math import sqrt
from random import randrange
# Definição da função que transforma um conjunto de dados inteiro em float
def str_column_to_float(data):
newData = []
for lines in data:
aux = [float(x) for x in lines]
newData.append(aux)
return newData
# Definição da função que carrega o arquivo .csv
def load_csv(filename):
lines = csv.reader(open(filename + ".csv", "r"))
data = list(lines)
return str_column_to_float(data)
# Função responsável por separar o Conjunto de Treino de Conjunto de Teste
def split_train_test(data, test_size=0.4):
dataTrain = list(data)
dataTest = []
size = int(len(dataTrain)*test_size)
for _ in range(size):
idx = randrange(len(dataTrain))
dataTest.append(dataTrain.pop(idx))
return [dataTrain, dataTest]
# Definição da função que calcula média de um conjunto de valores
def mean(values):
return sum(values)/len(values)
# Definição da função que calcula a variância de um conjunto de valores
def variance(values, mean):
return sum([(x-mean)**2 for x in values])
# Calcula a covariância entre duas variáveis X e Y
def covariance(x, mean_x, y, mean_y):
covar = 0.0
for i in range(len(x)):
covar += (x[i] - mean_x) * (y[i] - mean_y)
return covar
# Calcula os coeficientes da regressão de um data conjunto de dados
def coefficients(dataset):
x = [row[0] for row in dataset]
y = [row[1] for row in dataset]
x_mean, y_mean = mean(x), mean(y)
b1 = covariance(x, x_mean, y, y_mean) / variance(x, x_mean)
b0 = y_mean - b1 * x_mean
return [b0, b1]
# Realiza uma Regressão Linear, obtendo os parâmetros por um conjunto de treino
# e realizando predições sobre um conjunto de teste
def simple_linear_regression(train, test):
predictions = list()
b0, b1 = coefficients(train)
for row in test:
ypred = b0 + b1 * row[0]
predictions.append(ypred)
return predictions
# Calcula o valor da métrica RMSE (Root Mean Squared Error)
def rmse_metric(actual, predicted):
sum_error = 0.0
for i in range(len(actual)):
prediction_error = predicted[i] - actual[i]
sum_error += (prediction_error ** 2)
mean_error = sum_error / float(len(actual))
return sqrt(mean_error)
# Avalia a qualidade de um determinado algoritmo sobre um Dataset utilizando
# o RMSE como métrica
def evaluate_algorithm(train_set, test_set, algorithm):
predicted = algorithm(train_set, test_set)
actual = [row[-1] for row in test_set]
rmse = rmse_metric(actual, predicted)
return rmse
# Baseline: assume que o valor de resposta é sempre a média dos preços
def baseline(train_set, test_set):
pays = [row[-1] for row in train_set]
meanValue = mean(pays)
predictions = [meanValue for i in range(len(test_set))]
return predictions
data = load_csv("insurance")
[trainData, testData] = split_train_test(data, test_size=0.4)
coef = coefficients(trainData)
predRegre = simple_linear_regression(trainData, testData)
predBase = baseline(trainData, testData)
avalRegre = evaluate_algorithm(trainData, testData, simple_linear_regression)
avalBase = evaluate_algorithm(trainData, testData, baseline)
print("A Regressão Linear Simples obteve os coeficientes: alfa = {:.2f} e beta = {:.2f}".format(coef[0], coef[1]))
print("O erro RMSE desse algoritmo é {:.2f}".format(avalRegre))
print("O erro RMSE para o baseline (média dos pagamentos) é {:.2f}".format(avalBase))
print("\nPrimeiras 10 predições:")
for i in range(10):
print("Y real: {:.2f} \t| Y previsto: {:.2f} \t | Y baseline: {:.2f}".format(testData[i][-1], predRegre[i], predBase[i]))
# Separando entrada de saída
X_train = [row[0] for row in trainData]; X_test = [row[0] for row in testData]
Y_train = [row[1] for row in trainData]; Y_test = [row[1] for row in testData]
predTrain = simple_linear_regression(trainData, trainData)
predTest = predRegre
plt.figure(figsize=[10, 5])
plt.subplot(1,2,1); plt.title("Dados de Treino")
plt.scatter(X_train, Y_train, label="Train Data")
plt.plot(X_train, predTrain, 'r-', label="Regression Line")
plt.grid()
plt.legend()
plt.subplot(1,2,2); plt.title("Dados de Teste")
plt.scatter(X_test, Y_test, c="green", label="Test Data")
plt.plot(X_test, predTest, 'r-', label="Regression Line")
plt.grid()
plt.legend()
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Utilização da Regressão Linear e Avaliação do Algoritmo
Step2: Visualização da Regressão Linear
|
22 | <ASSISTANT_TASK:>
Python Code:
!rm -rf hello setup.py && mkdir hello
%%file hello/hello.py
#pythran export hello()
def hello():
Wave hello.
print("Hello from Pythran o/")
%%file hello/__init__.py
Hello package, featuring a Pythran kernel.
from hello import hello
%%file setup.py
from distutils.core import setup
try:
from pythran.dist import PythranExtension, PythranBuildExt
setup_args = {
'cmdclass': {"build_ext": PythranBuildExt},
'ext_modules': [PythranExtension('hello.hello', sources = ['hello/hello.py'])],
}
except ImportError:
print("Not building Pythran extension")
setup_args = {}
setup(name = 'hello',
version = '1.0',
description = 'Yet another demo package',
packages = ['hello'],
**setup_args)
%%sh
rm -rf build dist
python setup.py sdist 2>/dev/null 1>/dev/null
tar tf dist/hello-1.0.tar.gz | grep -E 'hello/hello.(py|cpp)' -o | sort
%%sh
rm -rf build dist
PYTHONPATH= python setup.py sdist 2>/dev/null 1>/dev/null
tar tf dist/hello-1.0.tar.gz | grep -E 'hello/hello.py' -o
%%sh
rm -rf build dist
python setup.py bdist 2>/dev/null 1>/dev/null
tar tf dist/hello-1.0.linux-x86_64.tar.gz | grep -E 'hello/hello.(py|cpp)' -o
%%sh
rm -rf build dist
PYTHONPATH= python setup.py bdist 2>/dev/null 1>/dev/null
tar tf dist/hello-1.0.linux-x86_64.tar.gz | grep -E 'hello/hello.py' -o
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Project layout
Step4: And so is the __init__.py file.
Step5: The setup.py file contains the classical metadata, plus a special header. this header basically states if pythran is available, use it, otherwise fallback to the python file.
Step6: Running setup.py
Step7: But if pythran is no longer in the PYTHONPATH, the installation does not fail
Step8: In case of binary distribution, the native module is generated alongside the original source.
Step9: And if pythran is not in the PYTHONPATH, this still work \o/
|
23 | <ASSISTANT_TASK:>
Python Code:
from pylab import *
t = arange(0.0, 2.0,0.01)
y = sin(2*pi*t)
plot(t, y)
xlabel('Time (s)')
ylabel('Voltage (mV)')
title('The simplest one, buddies')
grid(True)
show()
from pylab import *
t = arange(0.0, 2.0,0.01)
y = sin(2*pi*t)
plot(t, y, color='red')
xlabel('Time (s)')
ylabel('Voltage (mV)')
title('The simplest one, buddies')
grid(True)
show()
from pylab import *
t = arange(0.0, 2.0,0.01)
y = sin(2*pi*t)
plot(t, y, color='green', linestyle='-.', linewidth=3)
xlabel('Time (s)', fontweight='bold', fontsize=14)
ylabel('Voltage (mV)', fontweight='bold', fontsize=14)
title('The simplest one, buddies')
grid(True)
show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Custom plot line
Step2: A custom 2D plot, based on our first example.
|
24 | <ASSISTANT_TASK:>
Python Code:
#Example conditional statements
x = 1
y = 2
x<y #x is less than y
#x is greater than y
x>y
#x is less-than or equal to y
x<=y
#x is greater-than or equal to y
x>=y
#Example of and operator
(1<2)and(2<3)
#Example of or operator
(1<2)or(2>3)
#Example of not operator
not(1>2)
x = 1
y = 2
if (x < y):
print("Yup, totally true!")
else:
print("Nope, completely wrong!")
x = 1
y = 2
if (x>y):
print("The condition is True!")
x+y
x = 1
while (x <= 10):
print(x)
x = x+1
x = 2
i = 0 #dummy variable
while (i<10):
x = 2*x
print(x)
i = i+1
#another way to write this is i+=1, but it's idiosyncratic and we won't use it here
#Defining a square root function
def sqrt(x):
if (x < 0):
print("Your input is not positive!")
else:
return x**(1/2)
sqrt(4)
sqrt(-4)
import math
print(math.sqrt(25))
print(math.sin(math.pi/2))
print(math.exp(math.pi)-math.pi)
def gcd(a, b):
Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
while b > 0:
a, b = b, a%b
return a
print(gcd(120,16))
#Your code goes here
#Your code goes here
import numpy as np
x = np.array([1,2,3,4,5])
print(x)
y = x**2
print(y)
data = np.zeros(10)
print(data)
data[0] = fib(0)
print(data[0])
#Your code goes here
#Your code goes here
x = np.linspace(0,10,100)
x[0:3]
#Your code goes here
#Your code here
def intV(v,g,dt):
deltaVy = g*dt
vXnew = v[0]
vYnew = v[1]+deltaVy
return np.array([vXnew,vYnew])
dt = 0.1
intV(v,g,dt)
#Your code goes here
#Your code goes here
x = np.array([]) #defining an empty array that will store x position
y = np.array([]) #defining an empty array that will store y position
#Your code goes here
%matplotlib inline
import matplotlib.pyplot as plt
timeseriesData = np.loadtxt("./lecture2_data/timeseries_data.txt")
timeseriesData.shape
t = timeseriesData[0,:]
signal = timeseriesData[1,:]
#Your code goes here
cutOff = 15.
signalFix = signal[signal < cutOff]
tFix = t[signal < cutOff]
#Your code goes here
dataFix = np.array([tFix,signalFix])
np.save('./lecture2_data/dataFix.npy',dataFix)
np.savetxt('./lecture2_data/dataFix.txt',dataFix)
data = np.load('./lecture2_data/dataFix.npy')
t = data[0,:]
signal = data[1,:]
plt.plot(t,signal)
plt.show()
#Your code goes here
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you let a and b be conditional statements (like the above statements, e.g. a = x < y), then you can combine the two together using logical operators, which can be thought of as functions for conditional statements.
Step2: Now, these might not seem especially useful at first, but they're the bread and butter of programming. Even more importantly, they are used when we are doing if/else statements or loops, which we will now cover.
Step3: The idea here is that Python checks to see if the statement (in this case "x < y") is True. If it is, then it will do what is below the if statement. The else statement tells Python what to do if the condition is False.
Step4: While-loops are similar to if statements, in the sense that they also have a conditional statement that is built into it and it executes when the conditional is True. However, the only difference is, it will KEEP executing that segment of code until the conditional statement becomes False.
Step5: Note here that we tell Python to print the number x (x starts at 1) and then redefining x as itself +1 (so, x=1 gets redefined to x = x+1 = 1+1 = 2). Python then executes the loop again, but now x has been incremented by 1. We continue this process from x = 1 to x = 10, printing out x every time. Thus, with a fairly compact bit of code, you get 10 lines of output.
Step6: But...what if the conditional statement is always true?
Step7: So the outline for a function is
Step9: When defining your own functions, you can also use multiple input variables. For example, if we want to find the greatest common divisor (gcd) of two numbers, we could apply something called Euclid's algorithm.
Step10: Challenge 1 - Fibonacci Sequence and the Golden Ratio
Step11: The ratio of successive elements in the Fibonacci sequence converges to
Step12: C. Numpy Arrays - Review of Basics and Some More Advanced Topics
Step13: Here we are calling in the contents of numpy and giving it the shorthand name 'np' for convenience.
Step14: As we learned in Lecture 1, numpy arrays are convenient because they allow us to do math across the whole array and not just individual numbers.
Step15: The documentation of possible functions that can be applied to integers and floats (i.e. single numbers), as well as numpy arrays, can be found here
Step16: Now how do we assign a new value to an element of the array? We use the following "square bracket" notation
Step17: Now you try it. Store the second Fibonacci number in the second position of your array and use a print statement to verify that you have done so.
Step18: Python array indexing is fairly straightforward once you get the hang of it.
Step19: Now, sometimes its useful to access more than one element of an array. Let's say that we have an array with 100 elements in the range [0,10] (including endpoints). If you recall, this can be done via the np.linspace() function.
Step20: Now then, in order to get a range of elements rather than simply a single one, we use the notation
Step21: If you want everything passed a certain point of the array (including that point), then you would just eliminate the right number, for example
Step22: Finally, simply using the "
Step23: Now then, remember that,
Step24: Now that we've defined intV (short for "integrate v"), let's use it real quick, just to test it out. Let dt = 0.1 (meaning, your taking a step forward in time by 0.1 seconds).
Step25: As you can see, $V_{x}$ hasn't changed, but $V_{y}$ has decreased, representing the projectile slowing down as it's going upward.
Step26: Now we have the functions that calculate the changes in the position and velocity vectors. We're almost there!
Step27: Now, note that we've defined the while-loop such that it doesn't stop exactly at 0. Firstly, this was strategic, since the initial y = 0, and thus the while-loop wouldn't initialize to begin with (you can try to change it). One way you can overcome this issue is to decrease dt, meaning that you're letting less time pass between each step. Ideally, you'd want dt to be infinitely small, but we don't have that convenience in reality. Re-run the cells, but with dt = 0.01 and we will get much closer to the correct answer.
Step28: Now, all you have to do, is each time the while-loop executes, you use np.append() for the x and y arrays, adding the new values to the end of them.
Step29: If everything turns out alright, you should get the characteristic parabola.
Step30: Now then, let's say we are doing a timing experiment, where we look at the brightness of an object as a function of time. This is actually a very common type of measurement that you may do in research, such as looking for dips in the brightness of stars as a way to detect planets.
Step31: Now we have the data loaded into Python as a numpy array, and one handy thing you can do is to use Python to find the dimensions of the array. This is done by using ".shape" as so.
Step32: In this format, we know that this is a 2x1000 array (two rows, 1000 columns). Another way you can think about this is that you have two 1000-element arrays contained within another array, where each of those arrays are elements (think of it as an array of arrays).
Step33: Here, you have 2 dimensions with the array timeseriesData, and as such much specify the row first and then the column. So,
Step34: Looking at our data, you see clear spikes that jump well above most of the signal. (I've added this to the data to represent outliers that may sometimes appear when you're messing with raw data, and those must be dealt with). In astronomy, you sometimes have relativistic charged particles, not from your source, that hit the detector known as cosmic rays, and we often have to remove these.
Step35: In this case, the conditional statement that we have used is signal < cutOff.
Step36: Now let's plot it. You try.
Step37: Now that you have your data all cleaned up, it would be nice if we could save it for later and not have to go through the process of cleaning it up every time. Fear not! Python has you covered.
Step38: Then, we can use either the np.save() function or the np.savetxt function, the first saving the array into a '.npy' file and the other, into a '.txt' file. The syntax is pretty much the same for each.
Step39: Now that your data files are saved, you can load them up again, using np.loadtxt() and np.load() for .txt and .npy files respectively. We used np.loadtxt() above, and np.load works the same way. So, let's load in the .npy file and see if our data was saved correctly.
Step40: Now, let's see if you can do the same thing, but with the .txt file that we saved.
|
25 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.array([[ 0, 1, 2, 3, 4, 5],
[ 5, 6, 7, 8, 9, 10],
[10, 11, 12, 13, 14, 15],
[15, 16, 17, 18, 19, 20],
[20, 21, 22, 23, 24, 25]])
result = np.diag(np.fliplr(a))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
26 | <ASSISTANT_TASK:>
Python Code:
# загрузка из файла
reviews_test = pd.read_csv('data/reviews_test.csv', header=0, encoding='utf-8')
reviews_train = pd.read_csv('data/reviews_train.csv', header=0, encoding='utf-8')
reviews_internet = pd.read_csv('data/internet_reviews.csv', header=0, encoding='utf-8')
# обучающая выборка
X_train_raw = reviews_train.comment
y_train_raw = reviews_train.reting
# проверочная выборка
X_test_raw = reviews_test.comment
y_test_raw = reviews_test.reting
# дополнительная обучающая выборка
X_internet = reviews_internet.comment
y_internet = reviews_internet.rating
DIR = 'data/w2v_models/'
MODEL_NAME = 'tenth.norm-sz500-w7-cb0-it5-min5.w2v'
#MODEL_NAME = 'all.norm-sz100-w10-cb0-it1-min100.w2v'
# initialazing comment preprocessor
sp = SentenceProcessor(DIR + MODEL_NAME)
sp.stop_list = stop_words
sp.sample_len = 100
WORD_VECTOR_LENGTH = sp.w2v.vector_size # 500 each word
SENTENCE_LENGTH = sp.sample_len # 100 words
print ('Размерность коментария: {}x{}'.format(SENTENCE_LENGTH, WORD_VECTOR_LENGTH))
# приводим оба датафрема к одному формату
del reviews_internet['Unnamed: 0']
reviews_internet = reviews_internet.rename(columns={'rating':'reting'})
# склеиваем данные
new_reviews = reviews_train.append(reviews_internet, ignore_index=True)
# добавляем новый столбец
new_reviews['bag_of_words'] = ' '
# первичная предобработка данных(токенизация, нормализация, исправление орфографических ошибок)
bag_of_bags = []
for i in tqdm_notebook(range(len(new_reviews.comment))):
bag_of_bags.append(' '.join(sp.process(new_reviews.comment[i])))
# добавляем обработанные комментарии в новый столбец
new_reviews['bag_of_words'] = bag_of_bags
# сохраняем наши данные в .csv формате
new_reviews.to_csv('data/new_reviews.csv')
# на всякий случай сохраняем обработанные комментарии в .pkl
with open('bag_{}.pkl'.format(len(bag_of_bags)), 'wb') as f:
pickle.dump(bag_of_bags, f)
# читаем из файла если предыдущие шаги уже выполнены
new_reviews = pd.read_csv('data/new_reviews.csv', encoding='cp1251')
# полная обучающая выборка
X_train_all = new_reviews.bag_of_words
y_train_all = new_reviews.reting
# обучающая выборка от МВидео (без дополнительных отзывов об интернет провайдерах)
X_train = new_reviews.bag_of_words[:12410]
y_train = new_reviews.reting[:12410]
# тестовая выборка
X_val = reviews_test.comment
y_val = reviews_test.reting
from utils.generators import train_generator, test_generator
from keras.regularizers import l2 as L2
l2 = L2(0.01)
MODELS_DIR = 'saved_models/'
# создаем модель нейросети
input = keras.layers.Input(shape=(SENTENCE_LENGTH, WORD_VECTOR_LENGTH))
l_0 = keras.layers.Dense(256)(input)
l_1 = keras.layers.Conv1D(nb_filter=256, filter_length=2, activation='relu', W_regularizer=l2)(input)
l_2 = keras.layers.Conv1D(nb_filter=256, filter_length=3, activation='relu', W_regularizer=l2)(input)
l_3 = keras.layers.Conv1D(nb_filter=256, filter_length=4, activation='relu', W_regularizer=l2)(input)
merge = keras.layers.Merge(mode='concat', concat_axis=1) ([l_0, l_1, l_2, l_3])
l_5 = keras.layers.GlobalMaxPooling1D()(merge)
l_6 = keras.layers.Dropout(0.5)(l_5)
output = keras.layers.Dense(1, activation=None, W_regularizer=l2)(l_6)
model = keras.models.Model(input, output, name='model_5')
model.summary()
# загрузка обученной модели
model = keras.models.load_model('saved_models/model_5_mse_0.9056_extra')
# обучение
optimizer = keras.optimizers.SGD(lr=0.01)
model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['mse'])
model.fit_generator(
train_generator(X_train, y_train, sp, shuffle=True),
nb_epoch=10,
samples_per_epoch=len(y_train),
validation_data=train_generator(X_val, y_val, sp, shuffle=False),
nb_val_samples=len(y_val),
verbose=2,
)
# сохранение обчунной модели
model.save('saved_models/trained_model_1(keras==1.2.2)')
# метрики
errors = get_score(model, X_val_vec, y_val.values, sparse=40)
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score
def to_bool(y):
y_bool = []
for a in y:
if a == 5.0:
y_bool.append(1)
else:
y_bool.append(0)
return y_bool
y_train_bool = to_bool(y_train)
y_test_bool = to_bool(y_test)
p = PCA(n_components=100)
p.fit(X_train.reshape((X_train.shape[0], -1)))
X_train_transformed = p.transform(X_train.reshape((X_train.shape[0], -1)))
X_test_transformed = p.transform(X_test.reshape((X_test.shape[0], -1)))
clf = SVC(C=100.0)
clf.fit(X_train_transformed, y_train_bool)
y_pred = clf.predict(X_test_transformed)
accuracy_score(y_test_bool, y_pred)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Загрузка обработчика комментариев
Step2: Обработка данных
Step3: Обучение модели
Step4: Результаты
Step5: Классификатор 5 / не 5
Step6: Cнизижаем размерность
Step7: Обучаем классификатор
|
27 | <ASSISTANT_TASK:>
Python Code:
# Python
// JavaScript
# No output
# Plain text output
"Hello world"
True
False
42
import math
math.pi
dict(a=1,b=2)
list(range(10))
dict(a='string', b=1, c=3.14, d=[1, 2, 3], e=dict(f=1))
# Stream output
print("Just a string")
# Matplotlib
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 10, 100)
plt.plot(x, np.sin(x))
plt.show()
# Plotly
import plotly.express as px
fig = px.scatter(px.data.iris(), x="sepal_width", y="sepal_length")
fig.show()
# Altair
import altair as alt
import pandas as pd
alt.renderers.enable('mimetype')
source = pd.DataFrame({
'a': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],
'b': [28, 55, 43, 91, 81, 53, 19, 87, 52]
})
alt.Chart(source).mark_bar().encode(
x='a',
y='b'
)
import pandas as pd
import numpy as np
df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=['a', 'b', 'c'])
# By default a `text/html` representation of the data frame is generated
df
# Print the data.frame so that is output as `text/plain`
print(df)
A syntax error
foo()
import sys
print("Whooaah!", file=sys.stderr)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The following code cells illustrate how different types of cell outputs are decoded.
Step2: Primitive outputs
Step3: Image outputs
Step4: HTML outputs
Step5: Preformatted text outputs
Step6: Errors
|
28 | <ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
from distutils.version import StrictVersion
import sklearn
print(sklearn.__version__)
assert StrictVersion(sklearn.__version__ ) >= StrictVersion('0.18.1')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
assert StrictVersion(tf.__version__) >= StrictVersion('1.1.0')
import keras
print(keras.__version__)
assert StrictVersion(keras.__version__) >= StrictVersion('2.0.0')
import pandas as pd
print(pd.__version__)
assert StrictVersion(pd.__version__) >= StrictVersion('0.20.0')
# graph definition
matrix1 = tf.constant([[3., 3.]])
matrix2 = tf.constant([[2.],[2.]])
product = tf.matmul(matrix1, matrix2)
# launching the graph in a session
with tf.Session() as sess:
result = sess.run([product])
print(result)
sess = tf.InteractiveSession()
x = tf.Variable([1.0, 2.0])
a = tf.constant([3.0, 3.0])
# Initialize 'x' using the run() method of its initializer op.
x.initializer.run()
# Add an op to subtract 'a' from 'x'. Run it and print the result
sub = tf.subtract(x, a)
print(sub.eval())
# ==> [-2. -1.]
# Close the Session when we're done.
sess.close()
!cat sample_iris.json
# Example for iris, model exported as Tensorflow
# gsutil cp -R 1 gs://irisnn
# create model and version at https://console.cloud.google.com/mlengine
# in a DOS shell on local machine in this folder
# gcloud ml-engine predict --model=irisnn --json-instances=./sample_iris.json
# SCORES
# [0.9954029321670532, 0.004596732556819916, 3.3544753819114703e-07]
!curl -O https://raw.githubusercontent.com/DJCordhose/speed-limit-signs/master/data/speed-limit-signs.zip
from zipfile import ZipFile
zip = ZipFile(r'speed-limit-signs.zip')
zip.extractall('.')
# Calling a Keras based model to classify speed limit signs
!curl -H "Content-Type: application/json" -X GET -d '{"url": "https://github.com/DJCordhose/speed-limit-signs/raw/master/data/real-world/4/100-sky-cutoff-detail.jpg", "model": "default" }' http://ec2-52-43-39-37.us-west-2.compute.amazonaws.com:8888
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How does Tensorflow Low Level API look like?
Step2: Interactive usage of Low Level API
Step3: Calling a TensorFlow Model deployed on Google Cloud ML Service
Step4: What can Keras do for us?
|
29 | <ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot
# We have this here to trigger matplotlib's font cache stuff.
# This cell is hidden from the output
import pandas as pd
import numpy as np
np.random.seed(24)
df = pd.DataFrame({'A': np.linspace(1, 10, 10)})
df = pd.concat([df, pd.DataFrame(np.random.randn(10, 4), columns=list('BCDE'))],
axis=1)
df.iloc[3, 3] = np.nan
df.iloc[0, 2] = np.nan
df.style
df.style.highlight_null().render().split('\n')[:10]
def color_negative_red(val):
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
color = 'red' if val < 0 else 'black'
return 'color: %s' % color
s = df.style.applymap(color_negative_red)
s
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
df.style.apply(highlight_max)
df.style.\
applymap(color_negative_red).\
apply(highlight_max)
def highlight_max(data, color='yellow'):
'''
highlight the maximum in a Series or DataFrame
'''
attr = 'background-color: {}'.format(color)
if data.ndim == 1: # Series from .apply(axis=0) or axis=1
is_max = data == data.max()
return [attr if v else '' for v in is_max]
else: # from .apply(axis=None)
is_max = data == data.max().max()
return pd.DataFrame(np.where(is_max, attr, ''),
index=data.index, columns=data.columns)
df.style.apply(highlight_max, color='darkorange', axis=None)
df.style.apply(highlight_max, subset=['B', 'C', 'D'])
df.style.applymap(color_negative_red,
subset=pd.IndexSlice[2:5, ['B', 'D']])
df.style.format("{:.2%}")
df.style.format({'B': "{:0<4.0f}", 'D': '{:+.2f}'})
df.style.format({"B": lambda x: "±{:.2f}".format(abs(x))})
df.style.format("{:.2%}", na_rep="-")
df.style.highlight_max().format(None, na_rep="-")
df.style.highlight_null(null_color='red')
import seaborn as sns
cm = sns.light_palette("green", as_cmap=True)
s = df.style.background_gradient(cmap=cm)
s
# Uses the full color range
df.loc[:4].style.background_gradient(cmap='viridis')
# Compress the color range
(df.loc[:4]
.style
.background_gradient(cmap='viridis', low=.5, high=0)
.highlight_null('red'))
df.style.highlight_max(axis=0)
df.style.set_properties(**{'background-color': 'black',
'color': 'lawngreen',
'border-color': 'white'})
df.style.bar(subset=['A', 'B'], color='#d65f5f')
df.style.bar(subset=['A', 'B'], align='mid', color=['#d65f5f', '#5fba7d'])
import pandas as pd
from IPython.display import HTML
# Test series
test1 = pd.Series([-100,-60,-30,-20], name='All Negative')
test2 = pd.Series([10,20,50,100], name='All Positive')
test3 = pd.Series([-10,-5,0,90], name='Both Pos and Neg')
head =
<table>
<thead>
<th>Align</th>
<th>All Negative</th>
<th>All Positive</th>
<th>Both Neg and Pos</th>
</thead>
</tbody>
aligns = ['left','zero','mid']
for align in aligns:
row = "<tr><th>{}</th>".format(align)
for series in [test1,test2,test3]:
s = series.copy()
s.name=''
row += "<td>{}</td>".format(s.to_frame().style.bar(align=align,
color=['#d65f5f', '#5fba7d'],
width=100).render()) #testn['width']
row += '</tr>'
head += row
head+=
</tbody>
</table>
HTML(head)
df2 = -df
style1 = df.style.applymap(color_negative_red)
style1
style2 = df2.style
style2.use(style1.export())
style2
with pd.option_context('display.precision', 2):
html = (df.style
.applymap(color_negative_red)
.apply(highlight_max))
html
df.style\
.applymap(color_negative_red)\
.apply(highlight_max)\
.set_precision(2)
df.style.set_caption('Colormaps, with a caption.')\
.background_gradient(cmap=cm)
from IPython.display import HTML
def hover(hover_color="#ffff99"):
return dict(selector="tr:hover",
props=[("background-color", "%s" % hover_color)])
styles = [
hover(),
dict(selector="th", props=[("font-size", "150%"),
("text-align", "center")]),
dict(selector="caption", props=[("caption-side", "bottom")])
]
html = (df.style.set_table_styles(styles)
.set_caption("Hover to highlight."))
html
html = html.set_table_styles({
'B': [dict(selector='', props=[('color', 'green')])],
'C': [dict(selector='td', props=[('color', 'red')])],
}, overwrite=False)
html
(df.style
.set_na_rep("FAIL")
.format(None, na_rep="PASS", subset=["D"])
.highlight_null("yellow"))
df.style.hide_index()
df.style.hide_columns(['C','D'])
from IPython.html import widgets
@widgets.interact
def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0., 99.9), l=(0., 99.9)):
return df.style.background_gradient(
cmap=sns.palettes.diverging_palette(h_neg=h_neg, h_pos=h_pos, s=s, l=l,
as_cmap=True)
)
def magnify():
return [dict(selector="th",
props=[("font-size", "4pt")]),
dict(selector="td",
props=[('padding', "0em 0em")]),
dict(selector="th:hover",
props=[("font-size", "12pt")]),
dict(selector="tr:hover td:hover",
props=[('max-width', '200px'),
('font-size', '12pt')])
]
np.random.seed(25)
cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)
bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()
bigdf.style.background_gradient(cmap, axis=1)\
.set_properties(**{'max-width': '80px', 'font-size': '1pt'})\
.set_caption("Hover to magnify")\
.set_precision(2)\
.set_table_styles(magnify())
df.style.\
applymap(color_negative_red).\
apply(highlight_max).\
to_excel('styled.xlsx', engine='openpyxl')
from jinja2 import Environment, ChoiceLoader, FileSystemLoader
from IPython.display import HTML
from pandas.io.formats.style import Styler
with open("templates/myhtml.tpl") as f:
print(f.read())
class MyStyler(Styler):
env = Environment(
loader=ChoiceLoader([
FileSystemLoader("templates"), # contains ours
Styler.loader, # the default
])
)
template = env.get_template("myhtml.tpl")
MyStyler(df)
HTML(MyStyler(df).render(table_title="Extending Example"))
EasyStyler = Styler.from_custom_template("templates", "myhtml.tpl")
EasyStyler(df)
with open("templates/template_structure.html") as f:
structure = f.read()
HTML(structure)
# Hack to get the same style in the notebook as the
# main site. This is hidden in the docs.
from IPython.display import HTML
with open("themes/nature_with_gtoc/static/nature.css_t") as f:
css = f.read()
HTML('<style>{}</style>'.format(css))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here's a boring example of rendering a DataFrame, without any (visible) styles
Step2: Note
Step4: The row0_col2 is the identifier for that particular cell. We've also prepended each row/column identifier with a UUID unique to each DataFrame so that the style from one doesn't collide with the styling from another within the same notebook or page (you can set the uuid if you'd like to tie together the styling of two DataFrames).
Step5: In this case, the cell's style depends only on its own value.
Step6: Notice the similarity with the standard df.applymap, which operates on DataFrames elementwise. We want you to be able to reuse your existing knowledge of how to interact with DataFrames.
Step7: In this case the input is a Series, one column at a time.
Step8: Above we used Styler.apply to pass in each column one at a time.
Step9: When using Styler.apply(func, axis=None), the function must return a DataFrame with the same index and column labels.
Step10: Building Styles Summary
Step11: For row and column slicing, any valid indexer to .loc will work.
Step12: Only label-based slicing is supported right now, not positional.
Step13: Use a dictionary to format specific columns.
Step14: Or pass in a callable (or dictionary of callables) for more flexible handling.
Step15: You can format the text displayed for missing values by na_rep.
Step16: These formatting techniques can be used in combination with styling.
Step17: Builtin styles
Step18: You can create "heatmaps" with the background_gradient method. These require matplotlib, and we'll use Seaborn to get a nice colormap.
Step19: Styler.background_gradient takes the keyword arguments low and high. Roughly speaking these extend the range of your data by low and high percent so that when we convert the colors, the colormap's entire range isn't used. This is useful so that you can actually read the text still.
Step20: There's also .highlight_min and .highlight_max.
Step21: Use Styler.set_properties when the style doesn't actually depend on the values.
Step22: Bar charts
Step23: New in version 0.20.0 is the ability to customize further the bar chart
Step26: The following example aims to give a highlight of the behavior of the new align options
Step27: Sharing styles
Step28: Notice that you're able to share the styles even though they're data aware. The styles are re-evaluated on the new DataFrame they've been used upon.
Step29: Or through a set_precision method.
Step30: Setting the precision only affects the printed number; the full-precision values are always passed to your style functions. You can always use df.round(2).style if you'd prefer to round from the start.
Step31: Table styles
Step32: table_styles should be a list of dictionaries.
Step33: Missing values
Step34: Hiding the Index or Columns
Step35: CSS classes
Step36: Export to Excel
Step37: A screenshot of the output
Step38: We'll use the following template
Step39: Now that we've created a template, we need to set up a subclass of Styler that
Step40: Notice that we include the original loader in our environment's loader.
Step41: Our custom template accepts a table_title keyword. We can provide the value in the .render method.
Step42: For convenience, we provide the Styler.from_custom_template method that does the same as the custom subclass.
Step43: Here's the template structure
Step44: See the template in the GitHub repo for more details.
|
30 | <ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.4
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.cluster.spectral import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
# We'll also plot a sample time onset for each trial
plt_times = np.linspace(0, .2, len(epochs))
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=.5,
order=order_func, vmin=-250, vmax=250,
overlay_times=plt_times, show=True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Show event related fields images
|
31 | <ASSISTANT_TASK:>
Python Code:
#export
from exp.nb_01 import *
def get_data():
path = datasets.download_data(MNIST_URL, ext='.gz')
with gzip.open(path, 'rb') as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')
return map(tensor, (x_train,y_train,x_valid,y_valid))
def normalize(x, m, s): return (x-m)/s
x_train,y_train,x_valid,y_valid = get_data()
train_mean,train_std = x_train.mean(),x_train.std()
train_mean,train_std
x_train = normalize(x_train, train_mean, train_std)
# NB: Use training, not validation mean for validation set
x_valid = normalize(x_valid, train_mean, train_std)
train_mean,train_std = x_train.mean(),x_train.std()
train_mean,train_std
#export
def test_near_zero(a,tol=1e-3): assert a.abs()<tol, f"Near zero: {a}"
test_near_zero(x_train.mean())
test_near_zero(1-x_train.std())
n,m = x_train.shape
c = y_train.max()+1
n,m,c
# num hidden
nh = 50
# standard xavier init
w1 = torch.randn(m,nh)/math.sqrt(m)
b1 = torch.zeros(nh)
w2 = torch.randn(nh,1)/math.sqrt(nh)
b2 = torch.zeros(1)
test_near_zero(w1.mean())
test_near_zero(w1.std()-1/math.sqrt(m))
# This should be ~ (0,1) (mean,std)...
x_valid.mean(),x_valid.std()
def lin(x, w, b): return x@w + b
t = lin(x_valid, w1, b1)
#...so should this, because we used xavier init, which is designed to do this
t.mean(),t.std()
def relu(x): return x.clamp_min(0.)
t = relu(lin(x_valid, w1, b1))
#...actually it really should be this!
t.mean(),t.std()
# kaiming init / he init for relu
w1 = torch.randn(m,nh)*math.sqrt(2/m)
w1.mean(),w1.std()
t = relu(lin(x_valid, w1, b1))
t.mean(),t.std()
#export
from torch.nn import init
w1 = torch.zeros(m,nh)
init.kaiming_normal_(w1, mode='fan_out')
t = relu(lin(x_valid, w1, b1))
init.kaiming_normal_??
w1.mean(),w1.std()
t.mean(),t.std()
w1.shape
import torch.nn
torch.nn.Linear(m,nh).weight.shape
torch.nn.Linear.forward??
torch.nn.functional.linear??
torch.nn.Conv2d??
torch.nn.modules.conv._ConvNd.reset_parameters??
# what if...?
def relu(x): return x.clamp_min(0.) - 0.5
# kaiming init / he init for relu
w1 = torch.randn(m,nh)*math.sqrt(2./m )
t1 = relu(lin(x_valid, w1, b1))
t1.mean(),t1.std()
def model(xb):
l1 = lin(xb, w1, b1)
l2 = relu(l1)
l3 = lin(l2, w2, b2)
return l3
%timeit -n 10 _=model(x_valid)
assert model(x_valid).shape==torch.Size([x_valid.shape[0],1])
model(x_valid).shape
#export
def mse(output, targ): return (output.squeeze(-1) - targ).pow(2).mean()
y_train,y_valid = y_train.float(),y_valid.float()
preds = model(x_train)
preds.shape
mse(preds, y_train)
def mse_grad(inp, targ):
# grad of loss with respect to output of previous layer
inp.g = 2. * (inp.squeeze() - targ).unsqueeze(-1) / inp.shape[0]
def relu_grad(inp, out):
# grad of relu with respect to input activations
inp.g = (inp>0).float() * out.g
def lin_grad(inp, out, w, b):
# grad of matmul with respect to input
inp.g = out.g @ w.t()
w.g = (inp.unsqueeze(-1) * out.g.unsqueeze(1)).sum(0)
b.g = out.g.sum(0)
def forward_and_backward(inp, targ):
# forward pass:
l1 = inp @ w1 + b1
l2 = relu(l1)
out = l2 @ w2 + b2
# we don't actually need the loss in backward!
loss = mse(out, targ)
# backward pass:
mse_grad(out, targ)
lin_grad(l2, out, w2, b2)
relu_grad(l1, l2)
lin_grad(inp, l1, w1, b1)
forward_and_backward(x_train, y_train)
# Save for testing against later
w1g = w1.g.clone()
w2g = w2.g.clone()
b1g = b1.g.clone()
b2g = b2.g.clone()
ig = x_train.g.clone()
xt2 = x_train.clone().requires_grad_(True)
w12 = w1.clone().requires_grad_(True)
w22 = w2.clone().requires_grad_(True)
b12 = b1.clone().requires_grad_(True)
b22 = b2.clone().requires_grad_(True)
def forward(inp, targ):
# forward pass:
l1 = inp @ w12 + b12
l2 = relu(l1)
out = l2 @ w22 + b22
# we don't actually need the loss in backward!
return mse(out, targ)
loss = forward(xt2, y_train)
loss.backward()
test_near(w22.grad, w2g)
test_near(b22.grad, b2g)
test_near(w12.grad, w1g)
test_near(b12.grad, b1g)
test_near(xt2.grad, ig )
class Relu():
def __call__(self, inp):
self.inp = inp
self.out = inp.clamp_min(0.)-0.5
return self.out
def backward(self): self.inp.g = (self.inp>0).float() * self.out.g
class Lin():
def __init__(self, w, b): self.w,self.b = w,b
def __call__(self, inp):
self.inp = inp
self.out = inp@self.w + self.b
return self.out
def backward(self):
self.inp.g = self.out.g @ self.w.t()
# Creating a giant outer product, just to sum it, is inefficient!
self.w.g = (self.inp.unsqueeze(-1) * self.out.g.unsqueeze(1)).sum(0)
self.b.g = self.out.g.sum(0)
class Mse():
def __call__(self, inp, targ):
self.inp = inp
self.targ = targ
self.out = (inp.squeeze() - targ).pow(2).mean()
return self.out
def backward(self):
self.inp.g = 2. * (self.inp.squeeze() - self.targ).unsqueeze(-1) / self.targ.shape[0]
class Model():
def __init__(self, w1, b1, w2, b2):
self.layers = [Lin(w1,b1), Relu(), Lin(w2,b2)]
self.loss = Mse()
def __call__(self, x, targ):
for l in self.layers: x = l(x)
return self.loss(x, targ)
def backward(self):
self.loss.backward()
for l in reversed(self.layers): l.backward()
w1.g,b1.g,w2.g,b2.g = [None]*4
model = Model(w1, b1, w2, b2)
%time loss = model(x_train, y_train)
%time model.backward()
test_near(w2g, w2.g)
test_near(b2g, b2.g)
test_near(w1g, w1.g)
test_near(b1g, b1.g)
test_near(ig, x_train.g)
class Module():
def __call__(self, *args):
self.args = args
self.out = self.forward(*args)
return self.out
def forward(self): raise Exception('not implemented')
def backward(self): self.bwd(self.out, *self.args)
class Relu(Module):
def forward(self, inp): return inp.clamp_min(0.)-0.5
def bwd(self, out, inp): inp.g = (inp>0).float() * out.g
class Lin(Module):
def __init__(self, w, b): self.w,self.b = w,b
def forward(self, inp): return inp@self.w + self.b
def bwd(self, out, inp):
inp.g = out.g @ self.w.t()
self.w.g = torch.einsum("bi,bj->ij", inp, out.g)
self.b.g = out.g.sum(0)
class Mse(Module):
def forward (self, inp, targ): return (inp.squeeze() - targ).pow(2).mean()
def bwd(self, out, inp, targ): inp.g = 2*(inp.squeeze()-targ).unsqueeze(-1) / targ.shape[0]
class Model():
def __init__(self):
self.layers = [Lin(w1,b1), Relu(), Lin(w2,b2)]
self.loss = Mse()
def __call__(self, x, targ):
for l in self.layers: x = l(x)
return self.loss(x, targ)
def backward(self):
self.loss.backward()
for l in reversed(self.layers): l.backward()
w1.g,b1.g,w2.g,b2.g = [None]*4
model = Model()
%time loss = model(x_train, y_train)
%time model.backward()
test_near(w2g, w2.g)
test_near(b2g, b2.g)
test_near(w1g, w1.g)
test_near(b1g, b1.g)
test_near(ig, x_train.g)
class Lin(Module):
def __init__(self, w, b): self.w,self.b = w,b
def forward(self, inp): return inp@self.w + self.b
def bwd(self, out, inp):
inp.g = out.g @ self.w.t()
self.w.g = inp.t() @ out.g
self.b.g = out.g.sum(0)
w1.g,b1.g,w2.g,b2.g = [None]*4
model = Model()
%time loss = model(x_train, y_train)
%time model.backward()
test_near(w2g, w2.g)
test_near(b2g, b2.g)
test_near(w1g, w1.g)
test_near(b1g, b1.g)
test_near(ig, x_train.g)
#export
from torch import nn
class Model(nn.Module):
def __init__(self, n_in, nh, n_out):
super().__init__()
self.layers = [nn.Linear(n_in,nh), nn.ReLU(), nn.Linear(nh,n_out)]
self.loss = mse
def __call__(self, x, targ):
for l in self.layers: x = l(x)
return self.loss(x.squeeze(), targ)
model = Model(m, nh, 1)
%time loss = model(x_train, y_train)
%time loss.backward()
!./notebook2script.py 02_fully_connected.ipynb
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Foundations version
Step2: Tinker practice
Step3: From pytorch docs
Step4: Loss function
Step5: We need squeeze() to get rid of that trailing (,1), in order to use mse. (Of course, mse is not a suitable loss function for multi-class classification; we'll use a better loss function soon. We'll use mse for now to keep things simple.)
Step6: Gradients and backward pass
Step7: We cheat a little bit and use PyTorch autograd to check our results.
Step8: Refactor model
Step9: Module.forward()
Step10: Without einsum
Step11: nn.Linear and nn.Module
Step12: Export
|
32 | <ASSISTANT_TASK:>
Python Code:
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
# sphinx_gallery_thumbnail_number = 9
data_path = mne.datasets.sample.data_path()
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
evoked = mne.read_evokeds(fname, baseline=(None, 0), proj=True)
print(evoked)
evoked_l_aud = evoked[0]
evoked_r_aud = evoked[1]
evoked_l_vis = evoked[2]
evoked_r_vis = evoked[3]
fig = evoked_l_aud.plot(exclude=(), time_unit='s')
fig.tight_layout()
picks = mne.pick_types(evoked_l_aud.info, meg=True, eeg=False, eog=False)
evoked_l_aud.plot(spatial_colors=True, gfp=True, picks=picks, time_unit='s')
evoked_l_aud.plot_topomap(time_unit='s')
times = np.arange(0.05, 0.151, 0.05)
evoked_r_aud.plot_topomap(times=times, ch_type='mag', time_unit='s')
evoked_r_aud.plot_topomap(times='peaks', ch_type='mag', time_unit='s')
fig, ax = plt.subplots(1, 5, figsize=(8, 2))
kwargs = dict(times=0.1, show=False, vmin=-300, vmax=300, time_unit='s')
evoked_l_aud.plot_topomap(axes=ax[0], colorbar=True, **kwargs)
evoked_r_aud.plot_topomap(axes=ax[1], colorbar=False, **kwargs)
evoked_l_vis.plot_topomap(axes=ax[2], colorbar=False, **kwargs)
evoked_r_vis.plot_topomap(axes=ax[3], colorbar=False, **kwargs)
for ax, title in zip(ax[:4], ['Aud/L', 'Aud/R', 'Vis/L', 'Vis/R']):
ax.set_title(title)
plt.show()
ts_args = dict(gfp=True, time_unit='s')
topomap_args = dict(sensors=False, time_unit='s')
evoked_r_aud.plot_joint(title='right auditory', times=[.09, .20],
ts_args=ts_args, topomap_args=topomap_args)
conditions = ["Left Auditory", "Right Auditory", "Left visual", "Right visual"]
evoked_dict = dict()
for condition in conditions:
evoked_dict[condition.replace(" ", "/")] = mne.read_evokeds(
fname, baseline=(None, 0), proj=True, condition=condition)
print(evoked_dict)
colors = dict(Left="Crimson", Right="CornFlowerBlue")
linestyles = dict(Auditory='-', visual='--')
pick = evoked_dict["Left/Auditory"].ch_names.index('MEG 1811')
mne.viz.plot_compare_evokeds(evoked_dict, picks=pick, colors=colors,
linestyles=linestyles, split_legend=True)
evoked_r_aud.plot_image(picks=picks, time_unit='s')
title = 'MNE sample data\n(condition : %s)'
evoked_l_aud.plot_topo(title=title % evoked_l_aud.comment,
background_color='k', color=['white'])
mne.viz.plot_evoked_topo(evoked, title=title % 'Left/Right Auditory/Visual',
background_color='w')
subjects_dir = data_path + '/subjects'
trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
maps = mne.make_field_map(evoked_l_aud, trans=trans_fname, subject='sample',
subjects_dir=subjects_dir, n_jobs=1)
# Finally, explore several points in time
field_map = evoked_l_aud.plot_field(maps, time=.1)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we read the evoked object from a file. Check out
Step2: Notice that evoked is a list of
Step3: Let's start with a simple one. We plot event related potentials / fields
Step4: All plotting functions of MNE-python return a handle to the figure instance.
Step5: Now we will make it a bit fancier and only use MEG channels. Many of the
Step6: Notice the legend on the left. The colors would suggest that there may be two
Step7: By default the topomaps are drawn from evenly spread out points of time over
Step8: Or we can automatically select the peaks.
Step9: You can take a look at the documentation of
Step10: Notice that we created five axes, but had only four categories. The fifth
Step11: Sometimes, you may want to compare two or more conditions at a selection of
Step12: We can also plot the activations as images. The time runs along the x-axis
Step13: Finally we plot the sensor data as a topographical view. In the simple case
Step14: Visualizing field lines in 3D
|
33 | <ASSISTANT_TASK:>
Python Code:
# run this cell first!
fruits = {"apple":"red", "banana":"yellow", "grape":"purple"}
print fruits["banana"]
query = "apple"
print fruits[query]
print fruits[0]
print fruits.keys()
print fruits.values()
for key in fruits:
print fruits[key]
del fruits["banana"]
print fruits
print fruits["pear"]
fruits["pear"] = "green"
print fruits["pear"]
fruits["apple"] = fruits["apple"] + " or green"
print fruits["apple"]
favFoods = {"Wilfred":"Steak", "Manfred":"French fries", "Wadsworth":"Spaghetti", "Jeeves":"Ice cream"}
print favFoods
for name in favFoods:
print name + "'s favorite food is " + favFoods[name]
favFoods["Wilfred"] = "Pizza"
favFoods["Mitsworth"] = "Tuna"
print favFoods
name = raw_input("Enter a name: ")
if name in favFoods:
print favFoods[name]
else:
print "That name is not in our database"
for name in sorted(favFoods):
print name
for name in sorted(favFoods, key=favFoods.get):
print name
outs = open("hello.txt", 'w')
outs.write("Hello, world")
outs.close()
outs = open("meow.txt", 'w')
outs.write("Dear Mitsworth,\n\n\tMeow, meow meow meow.\n\nSincerely,\nA friend")
outs.close()
inFile = "genes.txt"
outFile = "genes_unique.txt"
unique = []
ins = open(inFile, 'r')
outs = open(outFile, 'w')
for line in ins:
line = line.rstrip('\n')
if line not in unique:
outs.write(line + "\n")
unique.append(line)
outs.close()
ins.close()
# hint code:
tallyDict = {}
seq = "ATGCTGATCGATATA"
length = len(seq)
if length not in tallyDict:
tallyDict[length] = 1 #initialize to 1 if this is the first occurrence of the length...
else:
tallyDict[length] = tallyDict[length] + 1 #...otherwise just increment the count.
inFile = "sequences3.txt"
lenDict = {}
ins = open(inFile, 'r')
for line in ins:
line = line.rstrip('\n') #important to do here, since '\n' counts as a character and thus increases the length of the sequence by 1
seqLen = len(line)
if seqLen not in lenDict:
lenDict[seqLen] = 1
else:
lenDict[seqLen] += 1
ins.close()
# print all tallies for informational purposes
for name in sorted(lenDict.keys()):
print name, ":", lenDict[name]
# now to get the max length, we can sort the keys by value
sortedLens = sorted(lenDict, key=lenDict.get) #this returns a list of the keys sorted by value
# this sorts from smallest to largest, so we take the last element
mostCommon = sortedLens[-1]
print "Most common length is", mostCommon, "nts"
inFile = "codon_table.txt"
codon2aa = {}
ins = open(inFile, 'r')
ins.readline() #skip header
for line in ins:
line = line.rstrip('\n')
lineParts = line.split()
codon = lineParts[0]
aa = lineParts[1]
if codon not in codon2aa:
codon2aa[codon] = aa
else:
print "Warning! Multiple entries found for the same codon (" + codon + "). Skipping."
ins.close()
request = raw_input("Codon to translate: ").upper() #covert to uppercase, just in case
if request in codon2aa:
print request, "-->", codon2aa[request]
else:
print "Did not recognize that codon."
inFile = "codon_table.txt"
codon2aa = {}
ins = open(inFile, 'r')
ins.readline() #skip header
for line in ins:
line = line.rstrip('\n')
lineParts = line.split()
codon = lineParts[0]
aa = lineParts[1]
if codon not in codon2aa:
codon2aa[codon] = aa
else:
print "Warning! Multiple entries found for the same codon (" + codon + "). Skipping."
ins.close()
# get user input
request = raw_input("Sequence to translate (multiple of 3): ").upper()
protSeq = ""
if (len(request) % 3) == 0:
# this indexing/slicing is tricky! definitely test this sort of thing to make sure you get it right.
for i in range(0,len(request),3):
codon = request[i:i+3]
if codon in codon2aa:
protSeq = protSeq + codon2aa[codon]
else:
print "Warning! Unrecognized codon ("+codon+"). Using X as a place holder."
protSeq = protSeq + "X"
print "Your protein sequence is:", protSeq
else:
print "Please enter a sequence length that is a multiple of 3."
inFile = "codon_table.txt"
codon2aa = {}
ins = open(inFile, 'r')
ins.readline() #skip header
for line in ins:
line = line.rstrip('\n')
lineParts = line.split()
codon = lineParts[0]
aa = lineParts[1]
if codon not in codon2aa:
codon2aa[codon] = aa
else:
print "Warning! Multiple entries found for the same codon (" + codon + "). Skipping."
ins.close()
# read file of sequences
inFile = "sequences3.txt"
outFile = "proteins.txt"
ins = open(inFile, 'r')
outs = open(outFile, 'w')
for line in ins:
line = line.rstrip('\n')
protSeq = "" #best to define this with the loop so it's re-created for each separate sequence.
if (len(line) % 3) == 0:
for i in range(0,len(line),3):
codon = line[i:i+3]
if codon in codon2aa:
protSeq += codon2aa[codon]
else:
print "Warning! Unrecognized codon ("+codon+"). Using X as a place holder."
protSeq += "X"
outs.write(protSeq + '\n') # write to output file
else:
print "Encountered sequence length that is not a multiple of 3. Skipping it. Sequence:", line
outs.close()
ins.close()
ins = open("horrible.fasta", 'r')
seqDict = {}
activeID = ""
for line in ins:
line = line.rstrip('\n')
# if the first character is >, this line is a header.
if line[0] == ">":
activeID = line[1:]
if activeID in seqDict:
print ">>> Warning: repeat id:", activeID, "-- overwriting previous ID."
seqDict[activeID] = ""
# otherwise, this is a sequence line--add it to the activeID entry
else:
seqDict[activeID] = seqDict[activeID] + line
ins.close()
error = False
if ">varlen2_uc001pmn.3_3476" in seqDict:
print "Remove > chars from headers!"
error = True
elif "varlen2_uc001pmn.3_3476" not in seqDict:
print "Something's wrong with your dictionary: missing keys"
error = True
if "varlen2_uc021qfk.1>2_1472" not in seqDict:
print "Only remove the > chars from the beginning of the header!"
error = True
if len(seqDict["varlen2_uc009wph.3_423"]) > 85:
if "\n" in seqDict["varlen2_uc009wph.3_423"]:
print "Remove newline chars from sequences"
error = True
else:
print "Length of sequences longer than expected for some reason"
error = True
elif len(seqDict["varlen2_uc009wph.3_423"]) < 85:
print "Length of sequences shorter than expected for some reason"
error = True
if error == False:
print "Congrats, you passed all my tests!"
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There's no concept of "first element" in a dictionary, since it's unordered. (Of course, if you happened to have a key in your dictionary that was literally 0, this code would work.)
Step2: Since fruits["apple"] contains a string, we can concatenate to that string just like we would normally. And if instead the value was a number, we could add to it. Basically, you can treat the "dict[key]" as a variable that holds whatever the value is.
Step3: (B) Using a for loop, go through the dictionary you created above and print each name and food combination in the format
Step4: (C) (1) Change the dictionary so that Wilfred's favorite food is Pizza. (2) Add a new entry for Mitsworth, whose favorite food is Tuna.
Step5: (D) Prompt the user to input a name. Check if the name they entered is a valid key in the dictionary using an if statement. If the name is in the dictionary, print out the corresponding favorite food. If not, print a message saying "That name is not in our database".
Step6: (E) Print just the names in the dictionary in alphabetical order. Use the sorting example from the slides.
Step7: (F) Print just the names in sorted order based on their favorite food. Use the value-sorting example from the slides.
Step8: 3. File writing (3pts)
Step9: (B) Write code that prints the following text to a file called meow.txt. It must be formatted exactly as it here (you will need to use \n and \t)
Step10: (C) Write code that reads in the gene IDs from genes.txt and prints the unique gene IDs to a new file called genes_unique.txt. (You can re-use your code or the answer sheet from lab4 for getting the unique IDs.)
Step11: 4. The "many counters" problem (4pts)
Step12: (B) Using the tally dictionary you created above, figure out which sequence length was the most common, and print it to the screen.
Step13: 5. Codon table (6pts)
Step14: (C) Now we will adapt the code in (B) to translate a longer sequence. Instead of prompting the user for a single codon, allow them to enter a longer sequence. First, check that the sequence they entered has a length that is a multiple of 3 (Hint
Step15: (D) Now, instead of taking user input, you will apply your translator to a set of sequences stored in a file. Read in the sequences from sequences3.txt (assume each line is a separate sequence), translate it to amino acids, and print it to a new file called proteins.txt.
Step16: Bonus question
Step17: After you've written your code above and you think it works, run it and then run the following code to to spot-check whether you did everything correctly. If you didn't name your dictionary seqDict, you'll need to change it below to whatever you named your dictionary.
|
34 | <ASSISTANT_TASK:>
Python Code:
# download sample files
!wget -P data -nc ftp://ftp.nersc.no/nansat/test_data/obpg_l2/A2015121113500.L2_LAC.NorthNorwegianSeas.hdf
!wget -P data -nc ftp://ftp.nersc.no/nansat/test_data/obpg_l2/A2015122122000.L2_LAC.NorthNorwegianSeas.hdf
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import Image
%matplotlib inline
from nansat import *
n1 = Nansat('data/A2015121113500.L2_LAC.NorthNorwegianSeas.hdf')
chlor_a1 = n1['chlor_a']
n2 = Nansat('data/A2015122122000.L2_LAC.NorthNorwegianSeas.hdf')
chlor_a2 = n2['chlor_a']
plt.figure(figsize=(5,5))
plt.subplot(121)
plt.imshow(chlor_a1, vmin=0, vmax=3)
plt.subplot(122)
plt.imshow(chlor_a2, vmin=0, vmax=3)
plt.show()
# define domain in longlat projection
d = Domain('+proj=stere +lat_0=58 +lon_0=5 +no_defs',
'-te -300000 -300000 300000 300000 -tr 3000 3000')
# reproject first image and get matrix with reprojected chlorophyll
n1.reproject(d)
chlor_a1 = n1['chlor_a']
# reproject second image and get matrix with reprojected chlorophyll
n2.reproject(d)
chlor_a2 = n2['chlor_a']
# get mask of land and set values of land pixels to NAN (not-a-number)
mask1 = n1.watermask()[1]
chlor_a1[mask1 == 2] = np.nan
chlor_a2[mask1 == 2] = np.nan
# prepare landmask for plotting: land pixels=1, water pixels=NaN
landmask = 1 - mask1.astype(float)
landmask[landmask == 0] = np.nan
plt.figure(figsize=(10,10))
plt.subplot(121)
plt.imshow(chlor_a1, vmin=0, vmax=5)
plt.imshow(landmask, cmap='gray')
plt.subplot(122)
plt.imshow(chlor_a2, vmin=0, vmax=5)
plt.imshow(landmask, cmap='gray')
plt.show()
# replace negative values (clouds) by NAN
chlor_a1[chlor_a1 < 0] = np.nan
chlor_a2[chlor_a2 < 0] = np.nan
# find difference
chlor_diff = chlor_a2 - chlor_a1
# plot
plt.figure(figsize=(5,5))
plt.imshow(chlor_diff, vmin=-0.1, vmax=2);plt.colorbar()
plt.imshow(landmask, cmap='gray')
plt.show()
# get transect - vector of data from 2D matrix from known locations
points = [[200, 75], [150, 150]]
t1 = n1.get_transect(points, ['chlor_a'], lonlat=False)
chl1 = t1['chlor_a']
lon1 = t1['lon']
lat1 = t1['lat']
t2 = n2.get_transect(points, ['chlor_a'], lonlat=False)
chl2 = t2['chlor_a']
# replace negative values with NAN
chl1 = np.array(chl1)
chl2 = np.array(chl2)
chl1[(chl1 < 0) + (chl1 > 5)] = np.nan
chl2[(chl2 < 0) + (chl2 > 5)] = np.nan
print (n1.time_coverage_start)
# plot
plt.plot(lon1, chl1, '-', label=n1.time_coverage_start)
plt.plot(lon1, chl2, '-', label=n2.time_coverage_start)
plt.legend()
plt.xlabel('longitude')
plt.ylabel('chlorphyll-a')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Open MODIS/Aqua files with chlorophyll in the North Sea and fetch data
Step2: Plot chlorophyll-a maps in swath projection
Step3: Colocate data. Reproject both images onto the same Domain.
|
35 | <ASSISTANT_TASK:>
Python Code:
import itertools
# heads = True
# tails = False
# Initialize coins to all heads
coins = [True]*100
for factor in range(100):
# This will generate N zeros, then a 1. This repeats forever
flip_generator = itertools.cycle([0]*factor+[1])
# This will take the first 100 items from the generator
flips = itertools.islice(flip_generator,100)
for index, flip in enumerate(flips):
if flip:
coins[index] = not coins[index]
# 1 has to be added to account for python 0-indexing
coins_tails = [index+1 for index,state in enumerate(coins) if state == False]
print(coins_tails)
import numpy as np
import itertools
# Alternative approach which counts the amount of flips. If even, the coin remains heads up.
# If odd, the coin would end up tails up.
total_flips = [0]*100
for factor in range(100):
# This will generate N zeros, then a 1. This repeats forever
flip_generator = itertools.cycle([0]*factor+[1])
# This will take the first 100 items from the generator
flips = list(itertools.islice(flip_generator,100))
total_flips = np.sum((total_flips,flips),axis=0)
# 1 has to be added to account for python 0-indexing
odd_flips = [index+1 for index,num_flips in enumerate(coins) if num_flips % 2 == 0]
print(odd_flips)
%matplotlib inline
import numpy as np
NUM_SPACES = 1000
probs = np.zeros((1000,1000))
# Seed first column of probabilities
# The first 6 values should be 1/6
probs[0:6,0] = np.array([1/6]*6)
for col in np.arange(1,NUM_SPACES):
for row in np.arange(NUM_SPACES):
target_col = col-1
start_row = max(0,row-6)
end_row = max(0,row)
new_val = sum(probs[start_row:end_row,target_col])/6
probs[row,col] = new_val
from matplotlib import pyplot as plt
sum_probs = np.sum(probs,axis=1)
x1 = np.arange(1,31)
y1 = sum_probs[:30]
plt.plot(x1,y1,marker='.',color='b')
plt.ylim(0)
plt.draw()
print(np.argmax(sum_probs)+1)
second_probs = np.zeros((1000,1000))
# Seed first column of probabilities
# The first 5 values should be 1/6
second_probs[0:5,0] = np.array([1/6]*5)
for col in np.arange(1,NUM_SPACES):
for row in np.arange(NUM_SPACES):
target_col = col-1
start_row = max(0,row-6)
end_row = max(0,row)
new_val = sum(second_probs[start_row:end_row,target_col])/6
if row == 5:
second_probs[row,col] = 0
else:
second_probs[row,col] = new_val
from matplotlib import pyplot as plt
sum_second_probs = np.sum(second_probs,axis=1)
x2 = np.arange(1,31)
y2 = sum_second_probs[:30]
plt.plot(x2[:5],y2[:5],marker='.',color='b')
plt.plot(x2[6:31],y2[6:31],marker='.',color='b')
plt.ylim(0)
plt.draw()
print(np.argmax(sum_second_probs)+1)
third_probs = np.zeros((1000,1000))
# Seed first column of probabilities
# The first 4 values should be 1/6
third_probs[0:4,0] = np.array([1/6]*4)
for col in np.arange(1,NUM_SPACES):
for row in np.arange(NUM_SPACES):
target_col = col-1
start_row = max(0,row-6)
end_row = max(0,row)
new_val = sum(third_probs[start_row:end_row,target_col])/6
if row == 5 or row == 4:
third_probs[row,col] = 0
else:
third_probs[row,col] = new_val
from matplotlib import pyplot as plt
sum_third_probs = np.sum(third_probs,axis=1)
x3 = np.arange(1,31)
y3 = sum_third_probs[:30]
plt.plot(x3[:4],y3[:4],marker='.',color='b')
plt.plot(x3[6:31],y3[6:31],marker='.',color='b')
plt.ylim(0)
plt.draw()
print(np.argmax(sum_third_probs)+1)
plt.plot(x1,y1,marker='.',color='k')
plt.plot(x2[:5],y2[:5],marker='.',color='b')
plt.plot(x2[6:31],y2[6:31],marker='.',color='b')
plt.plot(x3[:4],y3[:4],marker='.',color='r')
plt.plot(x3[6:31],y3[6:31],marker='.',color='r')
plt.ylim(0)
plt.draw()
print([np.argmax(sum_probs)+1,
np.argmax(sum_second_probs)+1,
np.argmax(sum_third_probs)+1])
# Implementing the recursive solution from
# http://www.laurentlessard.com/bookproofs/the-deadly-board-game/
p_cache = dict()
def p(k):
try:
return p_cache[k]
except KeyError:
if k == 0:
answer = float(1)
elif k < 0:
answer = float(0)
else:
answer = float((p(k-1)+p(k-2)+p(k-3)+p(k-4)+p(k-5)+p(k-6))/6)
p_cache[k] = answer
return answer
def q(k,m):
return p(k)+p(m)-p(k)*p(m-k)
def r(k,m,n):
return p(k)+p(m)+p(n)-p(k)*p(m-k)-p(k)*p(n-k)-p(m)*p(n-m)+p(k)*p(m-k)*p(n-m)
v = range(1,20)
#single = [p(k) for k in v]
#double = [[q(k,m) for k in v] for m in v]
p_vec = np.vectorize(p)
q_vec = np.vectorize(q)
r_vec = np.vectorize(r)
single = np.fromfunction(p_vec,(20,))
double = np.fromfunction(q_vec,(20,20))
triple = np.fromfunction(r_vec,(20,20,20))
np.argmax(triple[1:20,1:20,1:20])
plt.plot(v,single[1:],marker='.')
plt.show()
plt.imshow(double[1:,1:], cmap='viridis',interpolation ='nearest',extent = (0.5,19.5,19.5,0.5))
plt.show()
import itertools
fig = plt.figure()
im = plt.imshow(triple[1:20,1:20,1],
cmap='viridis',
interpolation='nearest',
extent = (0.5,19.5,19.5,0.5))
cycler = itertools.cycle(v)
def updatefig(i):
z = next(cycler)
im.set_array(triple[1:20,1:20,z])
return [im]
ani = animation.FuncAnimation(fig, updatefig, interval=200, blit=True)
HTML(ani.to_html5_video())
m = np.max(triple[1:,1:,1:])
i = np.argmax(m)
np.unravel_index(i,(20,20,20))
m = np.max(double[1:,1:])
i = np.argmax(m)
np.unravel_index(i,(20,20))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Classic Riddler
Step2: If I would not have seen this particular tweet (https
|
36 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
df= pd.read_excel("NHL 2014-15.xls")
!pip install xlrd
df.columns.value_counts()
df.head()
df.columns
df['Ctry'].value_counts().head(10)
df['Nat'].value_counts().head(10)
df['Birth City'].value_counts().head(10)
df[(df['Birth City'] == 'Toronto') & (df['Draft'] < 2006.0)].head()
df[(df['Birth City'] == 'Edmonton') & (df['Draft'] < 2006.0)].head()
df[(df['Birth City'] == 'Minneapolis') & (df['Draft'] < 2006.0)].head()
usa_concussion = df[(df['Ctry'] == 'USA') & (df['Injury'] == 'Concussion')]
usa_concussion[["First Name", "Last Name"]]
usa_mystery_injury = df[(df['Ctry'] == 'USA') & (df['Injury'] == 'Undisclosed')]
usa_mystery_injury[["First Name", "Last Name"]]
us_concussion
can_concussion = df[(df['Ctry'] == 'CAN') & (df['Injury'] == 'Concussion')]
can_concussion[["First Name", "Last Name"]]
can_mystery_injury = df[(df['Ctry'] == 'CAN') & (df['Injury'] == 'Undisclosed')]
can_mystery_injury[["First Name", "Last Name"]]
che_concussion = df[(df['Ctry'] == 'CHE') & (df['Injury'] == 'Concussion')]
che_concussion[["First Name", "Last Name"]]
che_mystery_injury = df[(df['Ctry'] == 'CHE') & (df['Injury'] == 'Undisclosed')]
che_mystery_injury[["First Name", "Last Name"]]
swe_concussion = df[(df['Ctry'] == 'SWE') & (df['Injury'] == 'Concussion')]
swe_concussion[["First Name", "Last Name"]]
swe_mystery_injury = df[(df['Ctry'] == 'SWE') & (df['Injury'] == 'Undisclosed')]
swe_mystery_injury[["First Name", "Last Name"]]
deu_concussion = df[(df['Ctry'] == 'DEU') & (df['Injury'] == 'Concussion')]
deu_concussion[["First Name", "Last Name"]]
deu_mystery_injury = df[(df['Ctry'] == 'DEU') & (df['Injury'] == 'Undisclosed')]
deu_mystery_injury[["First Name", "Last Name"]]
cze_concussion= df[(df['Ctry'] == 'CZE') & (df['Injury'] == 'Concussion')]
cze_concussion[["First Name", "Last Name"]]
cze_mystery_injury = df[(df['Ctry'] == 'CZE') & (df['Injury'] == 'Undisclosed')]
cze_mystery_injury[["First Name", "Last Name"]]
rus_concussion = df[(df['Ctry'] == 'RUS') & (df['Injury'] == 'Concussion')]
rus_concussion[["First Name", "Last Name"]]
rus_mystery_injury = df[(df['Ctry'] == 'RUS') & (df['Injury'] == 'Undisclosed')]
rus_mystery_injury[["First Name", "Last Name"]]
ltu_concussion = df[(df['Ctry'] == 'LTU') & (df['Injury'] == 'Concussion')]
ltu_concussion[["First Name", "Last Name"]]
ltu_mystery_injury = df[(df['Ctry'] == 'LTU') & (df['Injury'] == 'Undisclosed')]
ltu_mystery_injury[["First Name", "Last Name"]]
nor_concussion = df[(df['Ctry'] == 'NOR') & (df['Injury'] == 'Concussion')]
nor_concussion[["First Name", "Last Name"]]
nor_mystery_injury = df[(df['Ctry'] == 'NOR') & (df['Injury'] == 'Undisclosed')]
nor_mystery_injury[["First Name", "Last Name"]]
df
birthdate = df[df['DOB']].replace("DOB", "")
birthdate
df['birthyear'] = df['DOB'].astype(str).str.split("'").str.get(1).astype(int)
df
young_usa_players = df[(df['Ctry'] == 'USA') & (df['birthyear'] >= 94 )]
young_usa_players[["First Name", "Last Name"]]
young_can_players = df[(df['Ctry'] == 'CAN') & (df['birthyear'] >= 94 )]
young_can_players[["First Name", "Last Name"]]
old_usa_players = df[(df['Ctry'] == 'USA') & (df['birthyear'] <= 80 )]
old_usa_players[["First Name", "Last Name"]]
old_can_players = df[(df['Ctry'] == 'CAN') & (df['birthyear'] <= 80 )]
old_can_players[["First Name", "Last Name"]]
df['HT'].describe()
df['Wt'].describe()
plt.style.use('ggplot')
df.plot(kind='scatter', x='Wt', y='HT')
df['S'].value_counts()
df.groupby(['Ctry', 'S']).agg(['count'])
usa_left_shot = df[(df['Ctry'] == 'USA') & (df['S'] == 'L')]
usa_left_shot[["First Name", "Last Name"]]
can_left_shot = df[(df['Ctry'] == 'CAN') & (df['S'] == 'L')]
can_left_shot[["First Name", "Last Name"]]
usa_right_shot = df[(df['Ctry'] == 'USA') & (df['S'] == 'R')]
usa_right_shot[["First Name", "Last Name"]]
can_right_shot = df[(df['Ctry'] == 'CAN') & (df['S'] == 'R')]
can_right_shot[["First Name", "Last Name"]]
plt.style.use('seaborn-deep')
df.head(5).plot(kind='bar', x='Ctry', y='Draft')
df
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here's all of our data
Step2: Here are each of the columns in the data set
Step3: Let's count how many players are from each country
Step4: Let's count how many players are from each country
Step5: And now let's look at the top ten highest birth cities
Step6: Let's look at how many of those Toronto-born players were drafted before 2006
Step7: Let's look at how many of those Edmonton-born players were drafted before 2006
Step8: Let's look at how many of those Minneapolis-born players were drafted before 2006
Step9: Concussions...that's always a fun topic. Let's look at the players from each country that reported a concussion. We'll start with the United States
Step10: Hmmm... only two reported concussions in professional hockey?! highly doubtful...let's look at the injuries that were reported as 'Undisclosed' and call them mystery injuries
Step11: Let's look at Canada's reported concussions
Step12: Hmmm...not a lot either. Let's look at the "undisclosed" injuries that were reported
Step13: Switzerland Concussions
Step14: Switzerland "Undisclosed Injuries"
Step15: Sweden Concussions
Step16: Sweden "Undisclosed" Injuries
Step17: Germany Concussions
Step18: Germany "Undisclosed" Injuries
Step19: Czech Republic Concussions
Step20: Czech Republic "Undisclosed Injuries"
Step21: Russia Concussions
Step22: Russia "Undisclosed Injuries"
Step23: Lithuania Concussions
Step24: Lithuania "Undisclosed Injuries"
Step25: Norway Concussions
Step26: Norway "Undisclosed" Injuries
Step27: Let's look at how old the players are
Step28: Young Players (24 years old or younger) for the United States
Step29: Young Players (24 years old or younger) for Canada
Step30: Old Players (36 years old or older) for the United States
Step31: Old Players (36 years old or younger) for Canada
Step32: Let's examine the correlation between height and weight
Step33: And a visual of the correlation...nice
Step34: Let's examine how many lefty's versus righty's (in shooting) each country has
Step35: Interesting...Canada has significantly more left-handed shooters (280) than right-handed shooters. Meanwhile, the USA is pretty even with 110 lefty's and 107 righty's.
Step36: Correlation between Country and Draft Year
|
37 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import numpy as np
x = np.linspace(0, 10, 50)
dy = 0.8
y = np.sin(x) + dy * np.random.randn(50)
# yerr表示y的误差
plt.errorbar(x, y, yerr=dy, fmt='.k');
plt.errorbar(x, y, yerr=dy, fmt='o', color='black',
ecolor='lightgray', elinewidth=3, capsize=0);
# GaussianProcessRegressor实现高斯回归
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
# define the model and draw some data
model = lambda x: x * np.sin(x)
xdata = np.array([1, 3, 5, 6, 8])
ydata = model(xdata)
#
kernel = 1.0 * RBF(length_scale=10.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0)
# # Compute the Gaussian process fit
# gp = GaussianProcessRegressor(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1E-1,
# random_start=100)
gp.fit(xdata[:, np.newaxis], ydata)
xfit = np.linspace(0, 10, 1000)
# MSE,yfit = gp.predict(xfit[:, np.newaxis], return_cov=True)
y_mean, y_cov= gp.predict(xfit[:, np.newaxis], return_cov=True)
dyfit = 2 * np.sqrt(np.diag(y_cov)) # 2*sigma ~ 95% confidence region
# Visualize the result
plt.plot(xdata, ydata, 'or')
plt.plot(xfit, model(xfit), '-', color='gray')
plt.fill_between(xfit, y_mean - dyfit,
y_mean + dyfit,
alpha=0.5, color='gray')
plt.xlim(0, 10);
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 这里的fmt是控制线和点外观的格式代码,并且具有与plt.plot中使用的简写相同的语法,在Simple Line Plots和Simple Scatter Plots中进行了概述。
Step2: 除了这些选项之外,还可以指定水平误差线(xerr),单面误差线和许多其他变体。有关可用选项的更多信息,请参考plt.errorbar的文档字符串。
Step3: 现在,我们有了xfit,yfit和dyfit,它们可以对我们的数据进行连续拟合。我们可以像上面那样将它们传递给plt.errorbar函数,但是我们真的不想绘制带有1,000个误差线的1,000点。相反,我们可以将plt.fill_between函数与浅色配合使用以可视化此连续错误:
|
38 | <ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pandas as pd
import numpy as np
import pkg_resources
import matplotlib.pyplot as plt
import seaborn as sns
import time
import scipy.stats as stats
from sklearn import metrics
from keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Embedding
from keras.layers import Input
from keras.layers import Conv1D
from keras.layers import MaxPooling1D
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import Dense
from tensorflow.keras.optimizers import RMSprop
from keras.models import Model
%matplotlib inline
# autoreload makes it easier to interactively work on code in imported libraries
%load_ext autoreload
%autoreload 2
# These files will be provided to tutorial participants via Google Cloud Storage
train_v1_df = pd.read_csv('../input/fat-star-tutorial-data/public_train_v1.csv')
validate_df = pd.read_csv('../input/fat-star-tutorial-data/public_validate.csv')
test_df = pd.read_csv('../input/fat-star-tutorial-data/public_test.csv')
train_v1_df[['toxicity', 'male', 'comment_text']].query('male >= 0').head()
# List all identities
identity_columns = [
'male', 'female', 'transgender', 'other_gender', 'heterosexual', 'homosexual_gay_or_lesbian',
'bisexual', 'other_sexual_orientation', 'christian', 'jewish', 'muslim', 'hindu', 'buddhist',
'atheist', 'other_religion', 'black', 'white', 'asian', 'latino', 'other_race_or_ethnicity',
'physical_disability', 'intellectual_or_learning_disability', 'psychiatric_or_mental_illness', 'other_disability']
def convert_to_bool(df, col_name):
df[col_name] = np.where(df[col_name] >= 0.5, True, False)
for df in [train_v1_df, validate_df, test_df]:
for col in ['toxicity'] + identity_columns:
convert_to_bool(df, col)
train_v1_df[['toxicity', 'male', 'comment_text']].head()
MAX_SEQUENCE_LENGTH = 250
MAX_NUM_WORDS = 10000
TOXICITY_COLUMN = 'toxicity'
TEXT_COLUMN = 'comment_text'
EMBEDDINGS_PATH = '../data/glove.6B/glove.6B.100d.txt'
EMBEDDINGS_DIMENSION = 100
DROPOUT_RATE = 0.3
LEARNING_RATE = 0.00005
NUM_EPOCHS = 1 # TODO: increase this
BATCH_SIZE = 128
def pad_text(texts, tokenizer):
return pad_sequences(tokenizer.texts_to_sequences(texts), maxlen=MAX_SEQUENCE_LENGTH)
def train_model(train_df, validate_df, tokenizer):
# Prepare data
train_text = pad_text(train_df[TEXT_COLUMN], tokenizer)
train_labels = to_categorical(train_df[TOXICITY_COLUMN])
validate_text = pad_text(validate_df[TEXT_COLUMN], tokenizer)
validate_labels = to_categorical(validate_df[TOXICITY_COLUMN])
# Load embeddings
embeddings_index = {}
with open(EMBEDDINGS_PATH) as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
embedding_matrix = np.zeros((len(tokenizer.word_index) + 1,
EMBEDDINGS_DIMENSION))
num_words_in_embedding = 0
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
num_words_in_embedding += 1
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# Create model layers.
def get_convolutional_neural_net_layers():
Returns (input_layer, output_layer)
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedding_layer = Embedding(len(tokenizer.word_index) + 1,
EMBEDDINGS_DIMENSION,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
x = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu', padding='same')(x)
x = MaxPooling1D(5, padding='same')(x)
x = Conv1D(128, 5, activation='relu', padding='same')(x)
x = MaxPooling1D(5, padding='same')(x)
x = Conv1D(128, 5, activation='relu', padding='same')(x)
x = MaxPooling1D(40, padding='same')(x)
x = Flatten()(x)
x = Dropout(DROPOUT_RATE)(x)
x = Dense(128, activation='relu')(x)
preds = Dense(2, activation='softmax')(x)
return sequence_input, preds
# Compile model.
input_layer, output_layer = get_convolutional_neural_net_layers()
model = Model(input_layer, output_layer)
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(lr=LEARNING_RATE),
metrics=['acc'])
# Train model.
model.fit(train_text,
train_labels,
batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS,
validation_data=(validate_text, validate_labels),
verbose=2)
return model
MODEL_NAME_V1 = 'fat_star_tutorial_v1'
tokenizer_v1 = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer_v1.fit_on_texts(train_v1_df[TEXT_COLUMN])
model_v1 = train_model(train_v1_df, validate_df, tokenizer_v1)
test_comments_padded = pad_text(test_df[TEXT_COLUMN], tokenizer_v1)
test_df[MODEL_NAME_V1] = model_v1.predict(test_comments_padded)[:, 1]
# Print some records to compare our model resulsts with the correct labels
test_df[[TOXICITY_COLUMN, TEXT_COLUMN, MODEL_NAME_V1]].head(10)
# Get a list of identity columns that have >= 100 True records. This will remove groups such
# as "other_disability" which do not have enough records to calculate meaningful metrics.
identities_with_over_100_records = []
for identity in identity_columns:
num_records = len(test_df.query(identity + '==True'))
if num_records >= 100:
identities_with_over_100_records.append(identity)
def compute_normalized_pinned_auc(df, subgroup, model_name):
subgroup_non_toxic = df[df[subgroup] & ~df[TOXICITY_COLUMN]]
subgroup_toxic = df[df[subgroup] & df[TOXICITY_COLUMN]]
background_non_toxic = df[~df[subgroup] & ~df[TOXICITY_COLUMN]]
background_toxic = df[~df[subgroup] & df[TOXICITY_COLUMN]]
within_subgroup_mwu = normalized_mwu(subgroup_non_toxic, subgroup_toxic, model_name)
cross_negative_mwu = normalized_mwu(subgroup_non_toxic, background_toxic, model_name)
cross_positive_mwu = normalized_mwu(background_non_toxic, subgroup_toxic, model_name)
return np.mean([1 - within_subgroup_mwu, 1 - cross_negative_mwu, 1 - cross_positive_mwu])
def normalized_mwu(data1, data2, model_name):
Returns the number of pairs where the datapoint in data1 has a greater score than that from data2.
scores_1 = data1[model_name]
scores_2 = data2[model_name]
n1 = len(scores_1)
n2 = len(scores_2)
u, _ = stats.mannwhitneyu(scores_1, scores_2, alternative = 'less')
return u/(n1*n2)
def compute_pinned_auc(df, identity, model_name):
# Create combined_df, containing an equal number of comments that refer to the identity, and
# that belong to the background distribution.
identity_df = df[df[identity]]
nonidentity_df = df[~df[identity]].sample(len(identity_df), random_state=25)
combined_df = pd.concat([identity_df, nonidentity_df])
# Calculate the Pinned AUC
true_labels = combined_df[TOXICITY_COLUMN]
predicted_labels = combined_df[model_name]
return metrics.roc_auc_score(true_labels, predicted_labels)
def get_bias_metrics(df, model_name):
bias_metrics_df = pd.DataFrame({
'subgroup': identities_with_over_100_records,
'pinned_auc': [compute_pinned_auc(df, identity, model_name)
for identity in identities_with_over_100_records],
'normalized_pinned_auc': [compute_normalized_pinned_auc(df, identity, model_name)
for identity in identities_with_over_100_records]
})
# Re-order columns and sort bias metrics
return bias_metrics_df[['subgroup', 'pinned_auc', 'normalized_pinned_auc']].sort_values('pinned_auc')
def calculate_overall_auc(df, model_name):
true_labels = df[TOXICITY_COLUMN]
predicted_labels = df[model_name]
return metrics.roc_auc_score(true_labels, predicted_labels)
bias_metrics_df = get_bias_metrics(test_df, MODEL_NAME_V1)
bias_metrics_df
calculate_overall_auc(test_df, MODEL_NAME_V1)
# Plot toxicity distributions of different identities to visualize bias.
def plot_histogram(identity):
toxic_scores = test_df.query(identity + ' == True & toxicity == True')[MODEL_NAME_V1]
non_toxic_scores = test_df.query(identity + ' == True & toxicity == False')[MODEL_NAME_V1]
sns.distplot(non_toxic_scores, color="skyblue", axlabel=identity)
sns.distplot(toxic_scores, color="red", axlabel=identity)
plt.figure()
for identity in bias_metrics_df['subgroup']:
plot_histogram(identity)
# Load new training data and convert fields to booleans.
train_v2_df = pd.read_csv('../input/fat-star-tutorial-data/public_train_v2.csv')
for col in ['toxicity'] + identity_columns:
convert_to_bool(train_v2_df, col)
# Create a new model using the same structure as our model_v1.
MODEL_NAME_V2 = 'fat_star_tutorial_v2'
tokenizer_v2 = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer_v2.fit_on_texts(train_v2_df[TEXT_COLUMN])
model_v2 = train_model(train_v2_df, validate_df, tokenizer_v2)
test_comments_padded_v2 = pad_text(test_df[TEXT_COLUMN], tokenizer_v2)
test_df[MODEL_NAME_V2] = model_v2.predict(test_comments_padded_v2)[:, 1]
bias_metrics_v2_df = get_bias_metrics(test_df, MODEL_NAME_V2)
bias_metrics_v2_df
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and pre-process data sets
Step2: Let's examine some rows in these datasets. Note that columns like toxicity and male are percent scores.
Step3: We will need to convert toxicity and identity columns to booleans, in order to work with our neural net and metrics calculcations. For this tutorial, we will consider any value >= 0.5 as True (i.e. a comment should be considered toxic if 50% or more crowd raters labeled it as toxic). Note that this code also converts missing identity fields to False.
Step5: Create and Train Models
Step6: Score test set with the new model
Step8: Measure bias
Step9: We can graph a histogram of comment scores in each identity. In the following graphs, the X axis represents the toxicity score given by our new model, and the Y axis represents the comment count. Blue values are comment whose true label is non-toxic, while red values are those whose true label is toxic.
Step10: Retrain model to reduce bias
|
39 | <ASSISTANT_TASK:>
Python Code:
# read raw data
raw_data = pd.read_csv('/home/phoenix/Documents/session_1_data_train.csv')
test_data = pd.read_csv('/home/phoenix/Documents/session_1_data_test.csv')
test_data.columns = raw_data.columns
raw_data.head()
raw_data.label.value_counts().keys()
test_data.label.value_counts().keys()
# remove ". " from labels
raw_data['label'] = raw_data['label'].apply(lambda x: x[:-1])
test_data['label'] = test_data['label'].apply(lambda x: x[:-1])
pd.isnull(raw_data).sum()
raw_data = raw_data.drop_duplicates()
raw_data = raw_data.dropna()
# distribution of labels
sns.set_color_codes()
fig, ax1 = plt.subplots(1,1, figsize = (18,6))
sns.countplot('label', data = raw_data,palette="Set2", ax = ax1)
plt.xticks(rotation=30)
# combining labels as normal and attack
# normal is 1 , attack is 0
def get_label_grouping(label):
if label == 'normal':
return 'normal'
else:
return 'attack'
raw_data['label_attack_type']= raw_data['label'].apply(get_label_grouping)
test_data['label_attack_type']= test_data['label'].apply(get_label_grouping)
raw_data['label_attack_type'].value_counts()
test_data['label_attack_type'].value_counts()
# distribution of label_attack_type
fig, ax1 = plt.subplots(1,1, figsize = (18,6))
sns.countplot('label_attack_type', data = raw_data,palette="Set2", ax = ax1)
# plt.xticks(rotation=30)
raw_data.columns
# distribution of categorical variables with 'label_attack_type'
sns.set()
categorical_cols = ['protocol_type','flag','land','logged_in','is_host_login','is_guest_login']
for column in categorical_cols:
plt.figure()
sns.countplot(x=column, hue="label_attack_type",data=raw_data, palette="Set2")
## Checking distributions of continuous variables with default_status by plotting boxplots
for column in raw_data.columns:
if column not in categorical_cols+['index','service','label','label_attack_type']:
plt.figure()
sns.boxplot(x="label_attack_type", y=column,data=raw_data , palette = "Set3")
# converting label_attack_type to 0 and 1
raw_data.loc[raw_data['label_attack_type'] == 'normal', 'final_label'] = 0
raw_data.loc[raw_data['label_attack_type'] == 'attack', 'final_label'] = 1
# converting label_attack_type to 0 and 1
test_data.loc[test_data['label_attack_type'] == 'normal', 'final_label'] = 0
test_data.loc[test_data['label_attack_type'] == 'attack', 'final_label'] = 1
#one hot encoding of categorical variables
flag_encoding_raw = pd.get_dummies(raw_data['flag'],prefix = 'flag')
protocol_encoding_raw = pd.get_dummies(raw_data['protocol_type'],prefix = 'protocol')
# concat with blm dataframe
raw_data = pd.concat([raw_data, flag_encoding_raw,protocol_encoding_raw], axis =1 )
#one hot encoding of categorical variables
flag_encoding_test = pd.get_dummies(test_data['flag'],prefix = 'flag')
protocol_encoding_test = pd.get_dummies(test_data['protocol_type'],prefix = 'protocol')
# concat with blm dataframe
test_data = pd.concat([test_data, flag_encoding_test,protocol_encoding_test], axis =1 )
predictors = [c for c in raw_data.columns if c not in ['label', 'label_attack_type', 'index', 'protocol_type',
'flag','service','is_host_login','final_label']]
X_train = raw_data[predictors]
y_train = raw_data['final_label']
X_test = test_data[predictors]
y_test = test_data['final_label']
# X_train, X_test, y_train, y_test = train_test_split(test_data[predictors], test_data['final_label'],
# test_size=0.30, random_state=3, stratify = test_data['final_label'])
print(y_test.value_counts())
print(y_train.value_counts())
def get_performance_metrics(y_test,model_predictions):
# Accuracy
model_accuracy = accuracy_score(y_test,model_predictions)
print("Accuracy is ", model_accuracy)
# precision, recall, f1 score
model_precision, model_recall, model_f1, _ = precision_recall_fscore_support(y_test,model_predictions)
print('Precision for each class is ', model_precision)
print('Recall/sensitivity for each class is ', model_recall)
print('F1 Score for each class is ', model_f1)
# roc_auc
model_roc_auc = roc_auc_score(y_test,model_predictions)
print('AUC-ROC score is ', model_roc_auc)
# confusion matrix
model_confusion_matrix = confusion_matrix(y_test,model_predictions)
print('confusion matrix is :-->')
print(model_confusion_matrix)
# create instance of Naive Bayes model
nb_model = GaussianNB()
nb_model.fit(X_train, y_train)
#making predictions
nb_predictions = nb_model.predict(X_test)
get_performance_metrics(y_test,nb_predictions)
# create instance of logistic model
lr_model = LogisticRegression(random_state = 3)
lr_model.fit(X_train, y_train)
# predictions
lr_predictions = lr_model.predict(X_test)
get_performance_metrics(y_test,lr_predictions)
svc_model = SVC()
svc_model.fit(X_train,y_train)
svc_predictions = svc_model.predict(X_test)
get_performance_metrics(y_test,svc_predictions)
# choose set of parameters to tune
params = { 'C' : [0.001, 0.01, 0.1, 1, 10],
'penalty': ['l1', 'l2'],
'fit_intercept': [True, False]}
# create instance of GridSearch and fit the data
grid = GridSearchCV(estimator = lr_model, param_grid = params)
grid.fit(X_train, y_train)
# evaluate the best grid searched model on the testing data
grid_search_accuracy = grid.score(X_test, y_test)
print('Grid Search Accuracy is {0}'.format(grid_search_accuracy))
print("grid search best parameters: {}".format(grid.best_params_))
#create instance of decision tree
dt_model = DecisionTreeClassifier(random_state = 3)
dt_model.fit(X_train, y_train)
# predictions
dt_predictions = dt_model.predict(X_test)
get_performance_metrics(y_test,dt_predictions)
#create instance of random forest model
rf_model = RandomForestClassifier(n_estimators=500,n_jobs = -1, random_state=3)
# fitting data to random forest model
rf_model.fit(X_train,y_train)
# predictions
rf_predictions = rf_model.predict(X_test)
get_performance_metrics(y_test,rf_predictions)
# Class weights can be cutomized in this format: class_weight={0: 100,1: 1}
# Class weights can be given 'balanced': class_weight= 'balanced'
rf_model = RandomForestClassifier(n_estimators=500,n_jobs = -1, random_state=3, class_weight={0: 100,1: 1})
rf_model.fit(X_train,y_train)
rf_predictions = rf_model.predict(X_test)
get_performance_metrics(y_test,rf_predictions)
## Adjust the decision threshold of default value of 0.5
## Finding threshold cutoff probability value
def predict_label_given_cutoff(clf_rf,X_train,cutoff_prob):
return (clf_rf.predict_proba(X_train)[:,1]>cutoff_prob)
scores = []
cutoff_prob_list = []
def f1_for_given_cutoff(cutoff_prob):
def f1_cutoff(clf,X_train,y_train):
y_predict = predict_label_given_cutoff(clf_rf,X_train,cutoff_prob)
return sklearn.metric.f1_score(y_train,y_predict)
clf_rf = RandomForestClassifier(n_estimators= 50,n_jobs = -1)
for cutoff_prob in np.arange(0.1,0.9,0.1):
lb = LabelBinarizer()
y_train = np.array([number[0] for number in lb.fit_transform(y_train)])
validated = cross_val_score(clf_rf,X_train,y_train, cv=10, scoring =f1_for_given_cutoff(cutoff_prob))
scores.append(validated)
cutoff_prob_list.append(cutoff_prob)
print(cutoff_prob)
sns.boxplot(cutoff_prob_list,scores)
plt.xlabel('cutoff_probability')
plt.ylabel('Classification F1 scores')
plt.title('Classifcation score for number of trees')
plt.show()
## Observed cutoff_prob from above graph
cutoff_prob = 0.1
## Model
rf_model = RandomForestClassifier(n_estimators= 50,n_jobs = -1)
rf_model.fit(X_train,y_train)
# predictions, for given cutoff prob value
rf_predictions = predict_label_given_cutoff(rf_model,X_test,cutoff_prob)
get_performance_metrics(y_test,rf_predictions)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Outcomes of sprint 1
Step2: Train test split
Step3: Evaluation function
Step4: Objective of Sprint 3
Step5: Logistic Regression
Step6: Support Vector Machine
Step7: Hyperparameter tuning - using GridSearchCV for Logistic Regression
Step8: Decision Tree
Step9: Random Forest Model
Step10: Objective of Sprint 4
Step11: Adjust the decision threshold
|
40 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import division
import numpy as np
from numpy import linalg as LA
k_a=0.2
k_b=0.2
k_p = 0.5
P = np.matrix([[1-k_a-k_b, k_a ,k_b, 0, 0, 0],
[k_a, 1-k_a-k_b, 0, k_b, 0, 0],
[k_b, 0, 1-k_a-k_b, k_a, 0, 0],
[0, k_b, k_a, 1-k_a-k_b-k_p, k_p, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0]])
q = [[k_a-k_b,k_a,k_b,0,0],
[k_a,k_a+k_b,0,k_b,0],
[k_b,0,k_a+k_b,0,0],
[0,k_b,k_a,k_a+k_b+k_p,k_p],
[0,0,0,0,0]]
qq = np.array(q)
print(P)
states = ['phi', 'alpha', 'beta', 'ab', 'pol', 'd']
import networkx as nx
G=nx.from_numpy_matrix(P,create_using=nx.MultiDiGraph())
G.edges(data=True)
#nx.draw_graphviz(G)# labels=states)
nx.write_dot(G,'G.dot')
!neato -T png G.dot > multi.png
w, v = LA.eig(P)
for i in range(0,6):
print 'Eigen value: {}\n Eigen vector: {}\n'.format(w[i],v[:,i])
## Solve for (I-Q)^{-1}
iq = np.linalg.inv(np.eye(5)-qq)
iq_phi = iq[0,0]
iq_alpha = iq[1,1]
iq_beta = iq[2,2]
iq_alphabeta = iq[3,3]
iq_pol = iq[4,4]
A = np.eye(6)-P.T
A[-1,:] = [1,1,1,1,1,1]
B = [0,0,0,0,0,1]
X=np.linalg.solve(A,B)
print(X)
#EDIT: I made correction to solve for corrected $\pi$, by acounting for $P^T$ and not $P$
print('\pi*P={}\n'.format(X*P))
print('But \pi={}'.format(X))
## phi
np.random.seed(1)
PP = {}
PP['phi']= [1-k_a-k_b, k_a ,k_b, 0, 0, 0]
PP['alpha'] = [k_a, 1-k_a-k_b, 0, k_b, 0, 0]
PP['beta'] = [k_b, 0, 1-k_a-k_b, k_a, 0, 0]
PP['ab']= [0, k_b, k_a, 1-k_a-k_b-k_p, k_p, 0]
PP['pol']= [0, 0, 0, 0, 0, 1]
PP['d']= [0, 0, 0, 1, 0, 0]
##For $h(\phi)$
x0='phi'
x='phi'
def h(x):
s=0
new_state=x
for i in range(1,1000):
old_state=new_state
probs = PP[old_state]
z=np.random.choice(6, 1, p=probs)
new_state = states[z[0]]
#print('{} --> {}'.format(old_state, new_state))
s+=z[0]
return s/1000
print(r'$h(\phi)$: From simulation: {}; From calculation: {}'.format(h('phi'),iq_phi))
print(r'$h(\alpha)$: From simulation: {}; From calculation: {}'.format(h('alpha'),iq_alpha))
print(r'$h(\beta)$: From simulation: {}; From calculation: {}'.format(h('beta'),iq_beta))
print(r'$h(\alpha+\beta)$: From simulation: {}; From calculation: {}'.format(h('ab'),iq_alphabeta))
print(r'$h(\pol)$: From simulation: {}; From calculation: {}'.format(h('pol'),iq_pol))
old_state = [0.1,0.2,0.3,0.4,0,0]
def perturb(old_state):
new_state = old_state*P
return new_state
new_state = [0,0,0,0,0,1]
while not np.allclose(old_state, new_state):
old_state, new_state = new_state, perturb(old_state)
print old_state
# EDIT: I made correction to solve for corrected $\pi$, by acounting for $P^T$ and not $P$
print('From calculation(which is NO LONGER wrong!), stationary distribution:{}'.format(X))
print('From simulation, stationary distribution: {}'.format(old_state))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The markov chain seems to be irreducible
Step2: EDIT
Step3: Stationary state is given by $\pi = (0.1667, 0.1667, 0.1667, 0.1667, 0.1667, 0.1667)$ The mean number of visits per unit time to $\dagger$ are $\frac{1}{\pi_6} = 6$ However strangely this does not satisfy $\pi=P\pi$. I was not able to figure out where I went wrong.
Step4: Simulating the chain
Step5: Part (a,b,c)
|
41 | <ASSISTANT_TASK:>
Python Code:
from reprophylo import *
pj = unpickle_pj('outputs/my_project.pkpj', git=False)
genera_with_porocalices = ['Cinachyrella',
'Cinachyra',
'Amphitethya',
'Fangophilina',
'Acanthotetilla',
'Paratetilla']
genera_without_porocalices = ['Craniella',
'Tetilla',
'Astrophorida']
for genus in genera_with_porocalices:
pj.if_this_then_that(genus, 'genus', 'present', 'porocalices')
for genus in genera_without_porocalices:
pj.if_this_then_that(genus, 'genus', 'absent', 'porocalices')
pj.propagate_metadata()
bg_colors = {'present':'red',
'absent': 'white'}
supports = {'black': [100,99],
'gray': [99,80]}
pj.annotate('./images/', # Path to write figs to
'genus', 'Astrophorida', # Set OTUs that have 'Astrophorida'
# in their 'genus' qualifier
# as outgroup
['source_organism', 'record_id'], # leaf labels
node_bg_meta='porocalices', # The qualifier that
# will determine bg colors
node_bg_color=bg_colors, # The colors assigned to
# each qualifier value
node_support_dict=supports,
html='./images/figs.html'
)
pj.clear_tree_annotations()
from IPython.display import Image
Image('./images/example1.png', width=300)
bg_colors = {'Cinachyrella': 'gray',
'Cinachyra': 'silver',
'Amphitethya': 'white',
'Fangophilina':'white',
'Acanthotetilla':'silver',
'Paratetilla':'white',
'Craniella': 'gray',
'Tetilla': 'silver',
'Astrophorida': 'white'}
pj.clear_tree_annotations()
pj.annotate('./images/', # Path to write figs to
'mid', 'mid', # Set midpoint root
['source_organism'], # leaf labels
fsize=13,
node_bg_meta='genus', # The qualifier that
# will determine bg colors
node_bg_color=bg_colors, # The colors assigned to
# each qualifier value
# heatmap columns
heat_map_meta=['porocalyx', 'cortex', 'calthrops'],
heat_map_colour_scheme=0,
branch_color='black',
html='./images/figs.html'
)
from IPython.display import Image
Image('./images/example2.png', width=300)
publish(pj, 'my_report', './images/', size='large')
pickle_pj(pj, 'outputs/my_project.pkpj')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3.10.1 Updating the metadata after the tree has been built
Step2: while others do not
Step3: The following command will add the value 'present' to a new qualifier called 'porocalices' in sequence features of species that belong to genera_with_porocalices
Step4: and the following command will add the value 'absent' to a new qualifier called 'porocalices' to sequence features of species that belong to genera_without_porocalices
Step5: The new qualifier porocalices in now updated in the SeqRecord objects within the pj.records list (more on this in section 3.4). But in order for it to exist in the Tree objects, stored in the pj.trees dictionary, we have to run this command
Step6: Only now the new qualifier is available for tree annotation. Note that qualifiers that existed in the Project when we built the trees, will be included in the Tree object by default.
Step7: In the resulting figure (below), clades of species with porocalices have red background, node with maximal relBootstrap support have black bullets, and nodes with branch support > 80 has gray bullets.
Step8: 3.10.2.2 Example 2, the metadata as a heatmap
Step9: And this is what it looks like
Step10: 2.10.3 Archive the analysis as a zip file
|
42 | <ASSISTANT_TASK:>
Python Code:
password = input("Please enter the password:")
if password == "Simsim":
print("\t> Welcome to the cave")
x = "Mayank"
y = "TEST"
if y == "TEST":
print(x)
if y:
print("Hello World")
z = None
if z:
print("TEST")
x = 11
if x > 10:
print("Hello")
if x > 10.999999999999:
print("Hello again")
if x % 2 == 0:
print("Bye bye bye ...")
x = 10
y = None
z = "111"
print(id(y))
if x:
print("Hello in x")
if y:
print("Hello in Y")
if z:
print("Hello in Z")
x = "Anuja"
if x == "mayank":
print("Name is mayank")
else:
print("Name is not mayank and its", x)
# temperature value used to test
temp = 31
if temp < 0:
print ('Freezing...')
elif 0 <= temp <= 20:
print ('Cold')
elif 21 <= temp <= 25:
print ('Room Temprature')
elif 26 <= temp <= 35:
print ('Hot')
else:
print ('Its very HOT!, lets stay at home... \nand drink lemonade.')
# temperature value used to test
temp = 60
if temp < 0:
print ('Freezing...')
elif 0 <= temp <= 20:
print ('Cold')
elif 21 <= temp <= 25:
print ('Room Temprature')
elif 26 <= temp <= 35:
print ('Hot')
else:
print ('Its very HOT!, lets stay at home... \nand drink lemonade.')
a = "apple"
b = "banana"
c = "Mango"
if a == "apple":
print("apple")
elif b == "Mango":
print("mango")
elif c == "Mango":
print("My Mango farm")
x = 20
if x > 10: print ("Hello ")
print("-"*30)
val = 1 if x < 10 else 24
print(val)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: if ... else statement
Step2: if ...elif ... else statement
Step3: Imagine that in the above program, 23 is the temperature which was read by some sensor or manually entered by the user and Normal is the response of the program.
Step4: One line if
|
43 | <ASSISTANT_TASK:>
Python Code:
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
Y = iris.target
# print(iris.DESCR)
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier(random_state=1960)
clf.fit(X, Y)
#clf.__dict__
def test_ws_sql_gen(pickle_data):
WS_URL="https://sklearn2sql.herokuapp.com/model"
b64_data = base64.b64encode(pickle_data).decode('utf-8')
data={"Name":"model1", "PickleData":b64_data , "SQLDialect":"postgresql"}
r = requests.post(WS_URL, json=data)
#print(r.__dict__)
content = r.json()
# print(content)
lSQL = content["model"]["SQLGenrationResult"][0]["SQL"]
return lSQL;
pickle_data = pickle.dumps(clf)
lSQL = test_ws_sql_gen(pickle_data)
print(lSQL[0:2000])
# save the dataset in a database table
#engine = sa.create_engine('sqlite://' , echo=False)
engine = sa.create_engine("postgresql://db:db@localhost/db?port=5432", echo=False)
conn = engine.connect()
lTable = pd.DataFrame(X);
lTable.columns = ['Feature_0', 'Feature_1', 'Feature_2', 'Feature_3']
lTable['KEY'] = range(lTable.shape[0])
lTable.to_sql("INPUT_DATA" , conn, if_exists='replace', index=False)
sql_output = pd.read_sql(lSQL , conn);
conn.close()
sql_output.sample(12, random_state=1960)
sql_output.Decision.value_counts()
skl_outputs = pd.DataFrame()
skl_output_key = pd.DataFrame(list(range(X.shape[0])), columns=['KEY']);
skl_output_score = pd.DataFrame(columns=['Score_0', 'Score_1', 'Score_2']);
skl_output_proba = pd.DataFrame(clf.predict_proba(X), columns=['Proba_0', 'Proba_1', 'Proba_2'])
skl_output_log_proba = pd.DataFrame(clf.predict_log_proba(X), columns=['LogProba_0', 'LogProba_1', 'LogProba_2'])
skl_output_decision = pd.DataFrame(clf.predict(X), columns=['Decision'])
skl_output = pd.concat([skl_output_key, skl_output_score, skl_output_proba, skl_output_log_proba, skl_output_decision] , axis=1)
skl_output.sample(12, random_state=1960)
sql_skl_join = skl_output.join(sql_output , how='left', on='KEY', lsuffix='_skl', rsuffix='_sql')
sql_skl_join.sample(12, random_state=1960)
condition = (sql_skl_join.Decision_sql != sql_skl_join.Decision_skl)
sql_skl_join[condition]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate SQL Code from the Model
Step2: Execute the SQL Code
Step3: Scikit-learn Prediction
Step4: Comparing the SQL and Scikit-learn Predictions
|
44 | <ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
def split_data(chars, batch_size, num_steps, split_frac=0.9):
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 200)
train_x.shape
train_x[:,:10]
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Build the RNN layers
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
# Run the data through the RNN layers
rnn_inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(x_one_hot, num_steps, 1)]
outputs, state = tf.contrib.rnn.static_rnn(cell, rnn_inputs, initial_state=initial_state)
final_state = state
# Reshape output so it's a bunch of rows, one row for each cell output
seq_output = tf.concat(outputs, axis=1,name='seq_output')
output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')
# Now connect the RNN putputs to a softmax layer and calculate the cost
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),
name='softmax_w')
softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')
logits = tf.matmul(output, softmax_w) + softmax_b
preds = tf.nn.softmax(logits, name='predictions')
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')
cost = tf.reduce_mean(loss, name='cost')
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
# Export the nodes
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
file_writer = tf.summary.FileWriter('./logs/1', sess.graph)
!mkdir -p checkpoints/anna
epochs = 1
save_every_n = 200
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/anna20.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 0.5,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
saver.save(sess, "checkpoints/anna/i{}_l{}_{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
tf.train.get_checkpoint_state('checkpoints/anna')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
prime = "Far"
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/anna/i178_l512_2.530.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use.
Step3: Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.
Step4: I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size batch_size X num_steps. For example, if we want our network to train on a sequence of 100 characters, num_steps = 100. For the next batch, we'll shift this window the next sequence of num_steps characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
Step5: Hyperparameters
Step6: Write out the graph for TensorBoard
Step7: Training
Step8: Sampling
|
45 | <ASSISTANT_TASK:>
Python Code:
import re
import pubchempy as pcp
import logging
logging.getLogger('pubchempy').setLevel(logging.DEBUG)
def get_substructure_cas(smiles):
cas_rns = []
results = pcp.get_synonyms(smiles, 'smiles', searchtype='substructure')
for result in results:
for syn in result.get('Synonym', []):
match = re.match('(\d{2,7}-\d\d-\d)', syn)
if match:
cas_rns.append(match.group(1))
return cas_rns
cas_rns = get_substructure_cas('[Pb]')
print(len(cas_rns))
print(cas_rns[:10])
cas_rns = get_substructure_cas('[Se]')
print(len(cas_rns))
print(cas_rns[:10])
cas_rns = get_substructure_cas('[Ti]')
print(len(cas_rns))
print(cas_rns[:10])
cas_rns = get_substructure_cas('[Pd]')
print(len(cas_rns))
print(cas_rns[:10])
cids = pcp.get_cids('[Pd]', 'smiles', searchtype='substructure')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enable debug logging to make it easier to see what is going on
Step2: A function to get the CAS registry numbers for compounds with a particular SMILES substructure
Step3: Test some inputs
Step4: We could potentially get a TimeoutError if there are too many results. In this case, it might be better to perform the substructure search and then get the synonyms separately
|
46 | <ASSISTANT_TASK:>
Python Code:
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage tensorflow $USER_FLAG
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import google.cloud.aiplatform as aip
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
IMPORT_FILE = "gs://cloud-ml-tables-data/bank-marketing.csv"
count = ! gsutil cat $IMPORT_FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $IMPORT_FILE | head
heading = ! gsutil cat $IMPORT_FILE | head -n1
label_column = str(heading).split(",")[-1].split("'")[0]
print("Label Column Name", label_column)
if label_column is None:
raise Exception("label column missing")
dataset = aip.TabularDataset.create(
display_name="Bank Marketing" + "_" + TIMESTAMP, gcs_source=[IMPORT_FILE]
)
print(dataset.resource_name)
dag = aip.AutoMLTabularTrainingJob(
display_name="bank_" + TIMESTAMP,
optimization_prediction_type="classification",
optimization_objective="minimize-log-loss",
)
print(dag)
model = dag.run(
dataset=dataset,
model_display_name="bank_" + TIMESTAMP,
training_fraction_split=0.6,
validation_fraction_split=0.2,
test_fraction_split=0.2,
budget_milli_node_hours=1000,
disable_early_stopping=False,
target_column=label_column,
)
# Get model resource ID
models = aip.Model.list(filter="display_name=bank_" + TIMESTAMP)
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aip.gapic.ModelServiceClient(client_options=client_options)
model_evaluations = model_service_client.list_model_evaluations(
parent=models[0].resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
! gsutil cat $IMPORT_FILE | head -n 1 > tmp.csv
! gsutil cat $IMPORT_FILE | tail -n 10 >> tmp.csv
! cut -d, -f1-16 tmp.csv > batch.csv
gcs_input_uri = BUCKET_NAME + "/test.csv"
! gsutil cp batch.csv $gcs_input_uri
batch_predict_job = model.batch_predict(
job_display_name="bank_" + TIMESTAMP,
gcs_source=gcs_input_uri,
gcs_destination_prefix=BUCKET_NAME,
instances_format="csv",
predictions_format="csv",
sync=False,
)
print(batch_predict_job)
batch_predict_job.wait()
import tensorflow as tf
bp_iter_outputs = batch_predict_job.iter_outputs()
prediction_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("prediction"):
prediction_results.append(blob.name)
tags = list()
for prediction_result in prediction_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
print(line)
endpoint = model.deploy(machine_type="n1-standard-4")
INSTANCE = {
"Age": "58",
"Job": "managment",
"MaritalStatus": "married",
"Education": "teritary",
"Default": "no",
"Balance": "2143",
"Housing": "yes",
"Loan": "no",
"Contact": "unknown",
"Day": "5",
"Month": "may",
"Duration": "261",
"Campaign": "1",
"PDays": "-1",
"Previous": "0",
"POutcome": "unknown",
}
instances_list = [INSTANCE]
prediction = endpoint.predict(instances_list)
print(prediction)
endpoint.undeploy_all()
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage and tensorflow libraries as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Initialize Vertex SDK for Python
Step12: Location of Cloud Storage training data.
Step13: Quick peek at your data
Step14: Create a dataset
Step15: Example Output
Step16: Example output
Step17: Example output
Step18: Example output
Step19: Make the batch prediction request
Step20: Example output
Step21: Example Output
Step22: Example Output
Step23: Example output
Step24: Make the prediction
Step25: Example output
Step26: Cleaning up
|
47 | <ASSISTANT_TASK:>
Python Code:
df['Age'].describe()
df.groupby('Gender')['Income'].describe()
df['Income'].describe()
df['SchoolMajor'].value_counts()
df['SchoolDegree'].value_counts()
df.sort_values(by='StudentDebtOwe', ascending=False).head()
df[(df['BootcampFullJobAfter']==1) & (df['BootcampLoanYesNo']==1)].head()
df[(df['IsEthnicMinority']==1)& (df['CountryCitizen']=='United States of America')]
df.groupby('CountryCitizen')['Income'].sum().sort_values(ascending=False)
df['BootcampMonthsAgo'].value_counts()
df[(df['EmploymentStatus']== 'Employed for wages')& (df['BootcampMonthsAgo'] <=4)].head()
df['Age'].hist()
plt.style.available
plt.style.use('fivethirtyeight')
df['Age'].hist()
df['Age'].hist(bins=50, xlabelsize=10, ylabelsize=10, range=(0,50))# number of bins tells different stories
plt.style.use('fivethirtyeight')
df['StudentDebtOwe'].head(10).plot(kind='bar')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. What are the maximum income for female programmers?
Step2: 3. how much does a programmer make on average per year?
Step3: 4. what is the most common major programmers studied?
Step4: 5.what is the highest degree have these programmers completed?
Step5: 6. rank programmer with the amount of debt
Step6: 7. Show programmers are taking loans to attend bootcamp and have full-time job after attending bootcamp?
Step7: 8. How many programmers are citizens of United States of America and are ethnic minority?
Step8: 9.programmers from which country earn the most?
Step9: 10. How many programmers found jobs after graduating from bootcamp less and equal than 4 months?
Step10: this is a bar chart of programmers' age
Step11: change the style of the previous plot into fivethirtyeight
Step12: customize the chart by changing bin size, xlabelsize, ylabelsize and range
Step13: Make a graph that shows the amount of student debt the first 10 programmes from the dataframe own?
|
48 | <ASSISTANT_TASK:>
Python Code:
import torch
import torch.nn as nn
import torch.nn.functional as F # adds some efficiency
from torch.utils.data import DataLoader # lets us load data in batches
from torchvision import datasets, transforms
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix # for evaluating results
import matplotlib.pyplot as plt
%matplotlib inline
transform = transforms.ToTensor()
train_data = datasets.MNIST(root='../Data', train=True, download=True, transform=transform)
train_data
test_data = datasets.MNIST(root='../Data', train=False, download=True, transform=transform)
test_data
train_data[0]
image, label = train_data[0]
print('Shape:', image.shape, '\nLabel:', label)
plt.imshow(train_data[0][0].reshape((28,28)), cmap="gray");
plt.imshow(train_data[0][0].reshape((28,28)), cmap="gist_yarg");
torch.manual_seed(101) # for consistent results
train_loader = DataLoader(train_data, batch_size=100, shuffle=True)
test_loader = DataLoader(test_data, batch_size=500, shuffle=False)
from torchvision.utils import make_grid
np.set_printoptions(formatter=dict(int=lambda x: f'{x:4}')) # to widen the printed array
# Grab the first batch of images
for images,labels in train_loader:
break
# Print the first 12 labels
print('Labels: ', labels[:12].numpy())
# Print the first 12 images
im = make_grid(images[:12], nrow=12) # the default nrow is 8
plt.figure(figsize=(10,4))
# We need to transpose the images from CWH to WHC
plt.imshow(np.transpose(im.numpy(), (1, 2, 0)));
class MultilayerPerceptron(nn.Module):
def __init__(self, in_sz=784, out_sz=10, layers=[120,84]):
super().__init__()
self.fc1 = nn.Linear(in_sz,layers[0])
self.fc2 = nn.Linear(layers[0],layers[1])
self.fc3 = nn.Linear(layers[1],out_sz)
def forward(self,X):
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = self.fc3(X)
return F.log_softmax(X, dim=1)
torch.manual_seed(101)
model = MultilayerPerceptron()
model
def count_parameters(model):
params = [p.numel() for p in model.parameters() if p.requires_grad]
for item in params:
print(f'{item:>6}')
print(f'______\n{sum(params):>6}')
count_parameters(model)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# Load the first batch, print its shape
for images, labels in train_loader:
print('Batch shape:', images.size())
break
# EQUIVALENT TO:
# dataiter = iter(train_loader)
# images, labels = dataiter.next()
# print('Batch shape:', images.size())
images.view(100,-1).size()
import time
start_time = time.time()
epochs = 10
train_losses = []
test_losses = []
train_correct = []
test_correct = []
for i in range(epochs):
trn_corr = 0
tst_corr = 0
# Run the training batches
for b, (X_train, y_train) in enumerate(train_loader):
b+=1
# Apply the model
y_pred = model(X_train.view(100, -1)) # Here we flatten X_train
loss = criterion(y_pred, y_train)
# Tally the number of correct predictions
predicted = torch.max(y_pred.data, 1)[1]
batch_corr = (predicted == y_train).sum()
trn_corr += batch_corr
# Update parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Print interim results
if b%200 == 0:
print(f'epoch: {i:2} batch: {b:4} [{100*b:6}/60000] loss: {loss.item():10.8f} \
accuracy: {trn_corr.item()*100/(100*b):7.3f}%')
# Update train loss & accuracy for the epoch
train_losses.append(loss)
train_correct.append(trn_corr)
# Run the testing batches
with torch.no_grad():
for b, (X_test, y_test) in enumerate(test_loader):
# Apply the model
y_val = model(X_test.view(500, -1)) # Here we flatten X_test
# Tally the number of correct predictions
predicted = torch.max(y_val.data, 1)[1]
tst_corr += (predicted == y_test).sum()
# Update test loss & accuracy for the epoch
loss = criterion(y_val, y_test)
test_losses.append(loss)
test_correct.append(tst_corr)
print(f'\nDuration: {time.time() - start_time:.0f} seconds') # print the time elapsed
plt.plot(train_losses, label='training loss')
plt.plot(test_losses, label='validation loss')
plt.title('Loss at the end of each epoch')
plt.legend();
plt.plot([t/600 for t in train_correct], label='training accuracy')
plt.plot([t/100 for t in test_correct], label='validation accuracy')
plt.title('Accuracy at the end of each epoch')
plt.legend();
print(test_correct) # contains the results of all 10 epochs
print()
print(f'Test accuracy: {test_correct[-1].item()*100/10000:.3f}%') # print the most recent result as a percent
# Extract the data all at once, not in batches
test_load_all = DataLoader(test_data, batch_size=10000, shuffle=False)
with torch.no_grad():
correct = 0
for X_test, y_test in test_load_all:
y_val = model(X_test.view(len(X_test), -1)) # pass in a flattened view of X_test
predicted = torch.max(y_val,1)[1]
correct += (predicted == y_test).sum()
print(f'Test accuracy: {correct.item()}/{len(test_data)} = {correct.item()*100/(len(test_data)):7.3f}%')
# print a row of values for reference
np.set_printoptions(formatter=dict(int=lambda x: f'{x:4}'))
print(np.arange(10).reshape(1,10))
print()
# print the confusion matrix
print(confusion_matrix(predicted.view(-1), y_test.view(-1)))
misses = np.array([])
for i in range(len(predicted.view(-1))):
if predicted[i] != y_test[i]:
misses = np.append(misses,i).astype('int64')
# Display the number of misses
len(misses)
# Display the first 10 index positions
misses[:10]
# Set up an iterator to feed batched rows
r = 12 # row size
row = iter(np.array_split(misses,len(misses)//r+1))
nextrow = next(row)
print("Index:", nextrow)
print("Label:", y_test.index_select(0,torch.tensor(nextrow)).numpy())
print("Guess:", predicted.index_select(0,torch.tensor(nextrow)).numpy())
images = X_test.index_select(0,torch.tensor(nextrow))
im = make_grid(images, nrow=r)
plt.figure(figsize=(10,4))
plt.imshow(np.transpose(im.numpy(), (1, 2, 0)));
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the MNIST dataset
Step2: Load the training set
Step3: Load the test set
Step4: Examine a training record
Step5: Calling the first record from <tt>train_data</tt> returns a two-item tuple. The first item is our 28x28 tensor representing the image. The second is a label, in this case the number "5".
Step6: View the image
Step7: Batch loading with DataLoader
Step8: In the cell above, <tt><strong>train_data</strong></tt> is a PyTorch <a href='https
Step9: Define the model
Step10: <div class="alert alert-info"><strong>NOTE
Step11: Define loss function & optimizer
Step12: Flatten the training data
Step13: We can flatten them using <tt>.view()</tt>
Step14: We'll do this just before applying the model to our data.
Step15: Plot the loss and accuracy comparisons
Step16: This shows some evidence of overfitting the training data.
Step17: Evaluate Test Data
Step18: However, we'd like to compare the predicted values to the ground truth (the y_test labels), so we'll run the test set through the trained model all at once.
Step19: Not bad considering that a random guess gives only 10% accuracy!
Step20: This shows that the model had the greatest success with ones, twos and sevens, and the lowest with fives, sixes and eights.
Step21: Now that everything is set up, run and re-run the cell below to view all of the missed predictions.<br>
|
49 | <ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
%matplotlib inline
#format the book
import book_format
book_format.set_style()
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
N = 5000
a = np.pi/2. + (randn(N) * 0.35)
r = 50.0 + (randn(N) * 0.4)
xs = r * np.cos(a)
ys = r * np.sin(a)
plt.scatter(xs, ys, label='Sensor', color='k',
alpha=0.4, marker='.', s=1)
xmean, ymean = sum(xs) / N, sum(ys) / N
plt.scatter(0, 50, c='k', marker='o', s=200, label='Intuition')
plt.scatter(xmean, ymean, c='r', marker='*', s=200, label='Mean')
plt.axis('equal')
plt.legend();
from numpy.random import normal
data = normal(loc=0., scale=1., size=500000)
plt.hist(2*data + 1, 1000);
from kf_book.book_plots import set_figsize, figsize
from kf_book.nonlinear_plots import plot_nonlinear_func
def g1(x):
return 2*x+1
plot_nonlinear_func(data, g1)
def g2(x):
return (np.cos(3*(x/2 + 0.7))) * np.sin(0.3*x) - 1.6*x
plot_nonlinear_func(data, g2)
N = 30000
plt.subplot(121)
plt.scatter(data[:N], range(N), alpha=.1, s=1.5)
plt.title('Input')
plt.subplot(122)
plt.title('Output')
plt.scatter(g2(data[:N]), range(N), alpha=.1, s=1.5);
y = g2(data)
plot_nonlinear_func(y, g2)
print('input mean, variance: %.4f, %.4f' %
(np.mean(data), np.var(data)))
print('output mean, variance: %.4f, %.4f' %
(np.mean(y), np.var(y)))
def g3(x):
return -1.5 * x
plot_nonlinear_func(data, g3)
out = g3(data)
print('output mean, variance: %.4f, %.4f' %
(np.mean(out), np.var(out)))
out = g3(data)
out2 = g2(data)
for i in range(10):
out = g3(out)
out2 = g2(out2)
print('linear output mean, variance: %.4f, %.4f' %
(np.average(out), np.std(out)**2))
print('nonlinear output mean, variance: %.4f, %.4f' %
(np.average(out2), np.std(out2)**2))
def g3(x):
return -x*x
data = normal(loc=1, scale=1, size=500000)
plot_nonlinear_func(data, g3)
import kf_book.nonlinear_internal as nonlinear_internal
nonlinear_internal.plot1()
nonlinear_internal.plot2()
nonlinear_internal.plot3()
nonlinear_internal.plot4()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: We can see that out intuition failed us because the nonlinearity of the problem forced all of the errors to be biased in one direction. This bias, over many iterations, can cause the Kalman filter to diverge. Even if it doesn't diverge the solution will not be optimal. Linear approximations applied to nonlinear problems yields inaccurate results.
Step3: This is an unsurprising result. The result of passing the Gaussian through $f(x)=2x+1$ is another Gaussian centered around 1. Let's look at the input, nonlinear function, and output at once.
Step4: I explain how to plot Gaussians, and much more, in the Notebook Computing_and_Plotting_PDFs in the
Step5: This result may be somewhat surprising to you. The function looks "fairly" linear, but the probability distribution of the output is completely different from a Gaussian. Recall the equations for multiplying two univariate Gaussians
Step6: The original data is clearly Gaussian, but the data passed through g2(x) is no longer normally distributed. There is a thick band near -3, and the points are unequally distributed on either side of the band. If you compare this to the pdf labelled 'output' in the previous chart you should be able to see how the pdf shape matches the distribution of g(data).
Step7: As you can see the probability function is further distorted from the original Gaussian. However, the graph is still somewhat symmetric around x=0, let's see what the mean is.
Step8: Let's compare that to the linear function that passes through (-2,3) and (2,-3), which is very close to the nonlinear function we have plotted. Using the equation of a line we have
Step9: Although the shapes of the output are very different, the mean and variance of each are almost the same. This may lead us to reasoning that perhaps we can ignore this problem if the nonlinear equation is 'close to' linear. To test that, we can iterate several times and then compare the results.
Step10: Unfortunately the nonlinear version is not stable. It drifted significantly from the mean of 0, and the variance is half an order of magnitude larger.
Step11: Despite the curve being smooth and reasonably straight at $x=1$ the probability distribution of the output doesn't look anything like a Gaussian and the computed mean of the output is quite different than the value computed directly. This is not an unusual function - a ballistic object moves in a parabola, and this is the sort of nonlinearity your filter will need to handle. If you recall we've tried to track a ball and failed miserably. This graph should give you insight into why the filter performed so poorly.
Step12: What happens when we try to linearize this problem? The radar gives us a range to the aircraft. Suppose the radar is directly under the aircraft (x=10) and the next measurement states that the aircraft is 3 miles away (y=3). The positions that could match that measurement form a circle with radius 3 miles, like so.
Step13: We can see by inspection that the probable position of the aircraft is somewhere near x=11.4, y=2.7 because that is where the covariance ellipse and range measurement overlap. But the range measurement is nonlinear so we have to linearize it. We haven't covered this material yet, but the Extended Kalman filter will linearize at the last position of the aircraft - (10,2). At x=10 the range measurement has y=3, and so we linearize at that point.
Step14: Now we have a linear representation of the problem (literally a straight line) which we can solve. Unfortunately you can see that the intersection of the line and the covariance ellipse is a long way from the actual aircraft position.
|
50 | <ASSISTANT_TASK:>
Python Code:
#$HIDE_INPUT$
from google.cloud import bigquery
# Create a "Client" object
client = bigquery.Client()
# Construct a reference to the "nhtsa_traffic_fatalities" dataset
dataset_ref = client.dataset("nhtsa_traffic_fatalities", project="bigquery-public-data")
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
# Construct a reference to the "accident_2015" table
table_ref = dataset_ref.table("accident_2015")
# API request - fetch the table
table = client.get_table(table_ref)
# Preview the first five lines of the "accident_2015" table
client.list_rows(table, max_results=5).to_dataframe()
# Query to find out the number of accidents for each day of the week
query =
SELECT COUNT(consecutive_number) AS num_accidents,
EXTRACT(DAYOFWEEK FROM timestamp_of_crash) AS day_of_week
FROM `bigquery-public-data.nhtsa_traffic_fatalities.accident_2015`
GROUP BY day_of_week
ORDER BY num_accidents DESC
# Set up the query (cancel the query if it would use too much of
# your quota, with the limit set to 1 GB)
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**9)
query_job = client.query(query, job_config=safe_config)
# API request - run the query, and convert the results to a pandas DataFrame
accidents_by_day = query_job.to_dataframe()
# Print the DataFrame
accidents_by_day
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Let's use the table to determine how the number of accidents varies with the day of the week. Since
Step3: As usual, we run it as follows
|
51 | <ASSISTANT_TASK:>
Python Code:
import twothirds
import random
N = 2000
guesses = [int(round(random.triangular(0, 100, 44), 0)) for k in range(N)]
g = twothirds.TwoThirdsGame(guesses)
g.two_thirds_of_the_average()
g.find_winner()
import string
def randomword(length):
A function to generate a random name: http://stackoverflow.com/questions/2030053/random-strings-in-python
return ''.join(random.choice(string.lowercase) for i in range(length))
guesses = {randomword(8):guess for guess in guesses}
g = twothirds.TwoThirdsGame(guesses)
g.two_thirds_of_the_average()
g.find_winner()
import pandas
df = pandas.DataFrame(guesses.items())
df.to_csv('demo.csv', index=False)
data = twothirds.Data('demo.csv')
data.read()
data.df.head()
guesses = data.out()[0]
g = twothirds.TwoThirdsGame(guesses)
g.find_winner()
guesses = [[key, guesses[key], int(random.triangular(0, guesses[key], 1.0 * guesses[key] / 3))] for key in guesses]
df = pandas.DataFrame(guesses)
df.to_csv('demo.csv', index=False)
activity = twothirds.Activity('demo.csv')
activity.raw_data.df.head()
activity.games
[g.find_winner()[-1] for g in activity.games]
activity.games[0].find_winner()[:-1]
activity.games[1].find_winner()[:-1]
%matplotlib inline
activity.analyse()
activity.distplot();
activity.pairplot();
activity
import twothirds
activity = twothirds.Activity('data.csv')
activity.analyse()
%matplotlib inline
activity.distplot();
activity.pairplot();
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let as assume we have the following list of random guesses
Step2: Now we create a single game instance
Step3: Let's find the two thirds of the average
Step4: We can identify the winning guess
Step6: Note that the data could also be in the form of a dictionary that maps names of players to guesses
Step7: We see that quite a few people won.
Step8: Handling data
Step9: We can now read in this data
Step10: The data file has a dataframe attribute
Step11: We can get the data in a nicer format and ready for use. The format is a list of objects representing every play of the game (so for example we could have a file with muliple columns for each game).
Step12: Here we create the game (as above)
Step13: Managing an activity
Step14: Here we write the data to file again
Step15: We have still got access to the raw data
Step16: We also have an instance for each game
Step17: The winning guess for each game can be found below
Step18: The winners of the first game
Step19: The winners of the second game (there are more of them)
Step20: The library has some inbuilt plots
Step21: Finally you can see a summary of everything here
Step22: Here is a larger example
|
52 | <ASSISTANT_TASK:>
Python Code:
shopping_list = [ 'Milk', 'Eggs', 'Bread', 'Beer']
item_count = len(shopping_list)
print("List: %s has %d items" % (shopping_list, item_count))
for item in shopping_list:
print("I need to buy some %s " % (item))
# or with f-strings
for item in shopping_list:
print(f"I need to buy some {item}")
stocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']
#TODO: Write code here
print("The first item in the list is:", shopping_list[0])
print("The last item in the list is:", shopping_list[3])
print("This is also the last item in the list:", shopping_list[-1])
print("This is the second to last item in the list:", shopping_list[-2])
for i in range(len(shopping_list)):
print("I need to buy some %s " % (shopping_list[i]))
stocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']
#TODO: Write code here
shopping_list = [ 'Milk', 'Eggs', 'Bread', 'Beer']
print(f"Before: {shopping_list}")
shopping_list[-1] = 'Craft Beer'
shopping_list[1] = 'Organic Eggs'
print(f"After {shopping_list}")
def print_shopping_list(mylist):
print(f"My shopping list: {mylist}")
shopping_list = [ 'Milk', 'Eggs', 'Bread', 'Beer']
print_shopping_list(shopping_list)
print("Adding 'Cheese' to the end of the list...")
shopping_list.append('Cheese') #add to end of list
print_shopping_list(shopping_list)
print("Adding 'Cereal' to position 0 in the list...")
shopping_list.insert(0,'Cereal') # add to the beginning of the list (position 0)
print_shopping_list(shopping_list)
print("Removing 'Cheese' from the list...")
shopping_list.remove('Cheese') # remove 'Cheese' from the list
print_shopping_list(shopping_list)
print("Removing item from position 0 in the list...")
del shopping_list[0] # remove item at position 0
print_shopping_list(shopping_list)
# TODO: debug this code
stocks = []
while false:
choice = input("Enter Command: A, R, Q ?").upper()
if choice == 'Q':
break
elif choice == 'A':
stock = input("Enter symbol to ADD: ").upper()
stocks.insert(stock,0)
print(f"Your Stocks stocks")
elif choice == 'R':
stock = input("Enter symbol to REMOVE: ").upper()
stoscks.delete(stock)
print("Your Stocks {stocks}")
else:
print("Invalid Command!")
shopping_list = [ 'Milk', 'Eggs', 'Bread', 'Beer']
print("Before Sort:", shopping_list)
shopping_list.sort()
print("After Sort:", shopping_list)
sentence = "I like cheese"
words = sentence.split()
print(f"words is a {type(words)} values: {words}")
words.insert(2,'swiss')
print(words)
new_sentence = " ".join(words)
print(f"Joined back into a sentence: {new_sentence}")
with open('shopping_list.txt','r') as f:
lines = f.readlines()
print(f"This is a list: {lines}")
print(f"Unstripped: {lines}")
# List comprehension
stripped_lines = [ line.strip() for line in lines ]
print(f"Stripped: {stripped_lines}")
raw_input = input("Enter a comma-separated list of numbers: ")
raw_list = raw_input.split(',')
number_list = [ float(number) for number in raw_list ]
print(f"Raw Input: {raw_input}")
print(f"Tokenized Input {raw_list}")
print(f"Parsed to Numbers: {number_list}")
## TODO: Write program here:
# run this code to turn in your work!
from coursetools.submission import Submission
Submission().submit()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enumerating the Items in a List
Step2: 1.1 You Code
Step3: Indexing Lists
Step4: For Loop with Index
Step5: 1.2 You Code
Step6: Lists are Mutable
Step7: List Methods
Step8: 1.3 You Code
Step9: Sorting
Step10: The Magic behind S.split() and S.join(list)
Step11: To demonstrate it's really a list, let's add a word to the list and then regenerate the sentence with the S.join(list) method.
Step12: The Magic behind file.readlines()
Step13: List Comprehensions
Step14: In the above example
Step15: Putting it all together
Step16: Metacognition
|
53 | <ASSISTANT_TASK:>
Python Code:
PROJECT = "cloud-training-demos" # Replace with your PROJECT
BUCKET = "cloud-training-bucket" # Replace with your BUCKET
REGION = "us-central1" # Choose an available region for Cloud MLE
TFVERSION = "1.14" # TF version for CMLE to use
import os
os.environ["BUCKET"] = BUCKET
os.environ["PROJECT"] = PROJECT
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = TFVERSION
%%bash
if ! gsutil ls -r gs://${BUCKET} | grep -q gs://${BUCKET}/babyweight/trained_model/; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight/trained_model
fi
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
# Check to see if the model and version already exist,
# if so, delete them to deploy anew
if gcloud ai-platform models list | grep "$MODEL_NAME \+ $MODEL_VERSION"; then
echo "Deleting the version '$MODEL_VERSION' of model '$MODEL_NAME'"
yes | gcloud ai-platform versions delete ${MODEL_VERSION} --model=$MODEL_NAME
echo "Deleting the model '$MODEL_NAME'"
yes |gcloud ai-platform models delete ${MODEL_NAME}
else
echo "The model '$MODEL_NAME' with version '$MODEL_VERSION' does not exist."
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deploying the model '$MODEL_NAME', version '$MODEL_VERSION' from $MODEL_LOCATION"
echo "... this will take a few minutes"
gcloud # TODO: Your code goes here
gcloud # TODO: Your code goes here
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = # TODO: Your code goes here
MODEL_VERSION = # TODO: Your code goes here
token = # TODO: Your code goes here
api = # TODO: Your code goes here
headers = {"Authorization": "Bearer " + token }
data = {
"instances": [
{
"is_male": "True",
"mother_age": 26.0,
"plurality": "Single(1)",
"gestation_weeks": 39
},
{
"is_male": "False",
"mother_age": 29.0,
"plurality": "Single(1)",
"gestation_weeks": 38
},
{
"is_male": "True",
"mother_age": 26.0,
"plurality": "Triplets(3)",
"gestation_weeks": 39
},
# TODO: Your code goes here
]
}
response = # TODO: Your code goes here
print(response.content)
%%writefile inputs.json
{"is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil # TODO: Your code goes here
gsutil # TODO: Your code goes here
gcloud ai-platform # TODO: Your code goes here
--data-format= # TODO: Your code goes here
--region= # TODO: Your code goes here
--input-paths= # TODO: Your code goes here
--output-path= # TODO: Your code goes here
--model= # TODO: Your code goes here
--version= # TODO: Your code goes here
!gsutil ls gs://$BUCKET/babyweight/batchpred/outputs
!gsutil cat gs://$BUCKET/babyweight/batchpred/outputs/prediction.results*
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Deploy trained model
Step2: We'll now deploy our model. This will take a few minutes. Once the cell below completes, you should be able to see your newly deployed model in the 'Models' portion of the AI Platform section of the GCP console.
Step3: Exercise 1
Step4: Use the deployed model to make online predictions
Step5: Use model for batch prediction
Step6: Exercise 3
Step7: Check the ML Engine jobs submitted to the GCP console to make sure the prediction job has completed, then let's have a look at the results of our predictions.
|
54 | <ASSISTANT_TASK:>
Python Code:
from keras.datasets import imdb
idx = imdb.get_word_index()
idx_arr = sorted(idx, key=idx.get)
idx_arr[:10]
idx2word = {v: k for k, v in idx.iteritems()}
path = get_file('imdb_full.pkl',
origin='https://s3.amazonaws.com/text-datasets/imdb_full.pkl',
md5_hash='d091312047c43cf9e4e38fef92437263')
f = open(path, 'rb')
(x_train, labels_train), (x_test, labels_test) = pickle.load(f)
len(x_train)
', '.join(map(str, x_train[0]))
idx2word[23022]
' '.join([idx2word[o] for o in x_train[0]])
labels_train[:10]
vocab_size = 5000
trn = [np.array([i if i<vocab_size-1 else vocab_size-1 for i in s]) for s in x_train]
test = [np.array([i if i<vocab_size-1 else vocab_size-1 for i in s]) for s in x_test]
lens = np.array(map(len, trn))
(lens.max(), lens.min(), lens.mean())
seq_len = 500
trn = sequence.pad_sequences(trn, maxlen=seq_len, value=0)
test = sequence.pad_sequences(test, maxlen=seq_len, value=0)
trn.shape
model = Sequential([
Embedding(vocab_size, 32, input_length=seq_len),
Flatten(),
Dense(100, activation='relu'),
Dropout(0.7),
Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.summary()
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
conv1 = Sequential([
Embedding(vocab_size, 32, input_length=seq_len, dropout=0.2),
Dropout(0.2),
Convolution1D(64, 5, border_mode='same', activation='relu'),
Dropout(0.2),
MaxPooling1D(),
Flatten(),
Dense(100, activation='relu'),
Dropout(0.7),
Dense(1, activation='sigmoid')])
conv1.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
conv1.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=4, batch_size=64)
conv1.save_weights(model_path + 'conv1.h5')
conv1.load_weights(model_path + 'conv1.h5')
def get_glove_dataset(dataset):
Download the requested glove dataset from files.fast.ai
and return a location that can be passed to load_vectors.
# see wordvectors.ipynb for info on how these files were
# generated from the original glove data.
md5sums = {'6B.50d': '8e1557d1228decbda7db6dfd81cd9909',
'6B.100d': 'c92dbbeacde2b0384a43014885a60b2c',
'6B.200d': 'af271b46c04b0b2e41a84d8cd806178d',
'6B.300d': '30290210376887dcc6d0a5a6374d8255'}
glove_path = os.path.abspath('data/glove/results')
%mkdir -p $glove_path
return get_file(dataset,
'http://files.fast.ai/models/glove/' + dataset + '.tgz',
cache_subdir=glove_path,
md5_hash=md5sums.get(dataset, None),
untar=True)
def load_vectors(loc):
return (load_array(loc+'.dat'),
pickle.load(open(loc+'_words.pkl','rb')),
pickle.load(open(loc+'_idx.pkl','rb')))
vecs, words, wordidx = load_vectors(get_glove_dataset('6B.50d'))
def create_emb():
n_fact = vecs.shape[1]
emb = np.zeros((vocab_size, n_fact))
for i in range(1,len(emb)):
word = idx2word[i]
if word and re.match(r"^[a-zA-Z0-9\-]*$", word):
src_idx = wordidx[word]
emb[i] = vecs[src_idx]
else:
# If we can't find the word in glove, randomly initialize
emb[i] = normal(scale=0.6, size=(n_fact,))
# This is our "rare word" id - we want to randomly initialize
emb[-1] = normal(scale=0.6, size=(n_fact,))
emb/=3
return emb
emb = create_emb()
model = Sequential([
Embedding(vocab_size, 50, input_length=seq_len, dropout=0.2,
weights=[emb], trainable=False),
Dropout(0.25),
Convolution1D(64, 5, border_mode='same', activation='relu'),
Dropout(0.25),
MaxPooling1D(),
Flatten(),
Dense(100, activation='relu'),
Dropout(0.7),
Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
model.layers[0].trainable=True
model.optimizer.lr=1e-4
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=1, batch_size=64)
model.save_weights(model_path+'glove50.h5')
from keras.layers import Merge
graph_in = Input ((vocab_size, 50))
convs = [ ]
for fsz in range (3, 6):
x = Convolution1D(64, fsz, border_mode='same', activation="relu")(graph_in)
x = MaxPooling1D()(x)
x = Flatten()(x)
convs.append(x)
out = Merge(mode="concat")(convs)
graph = Model(graph_in, out)
emb = create_emb()
model = Sequential ([
Embedding(vocab_size, 50, input_length=seq_len, dropout=0.2, weights=[emb]),
Dropout (0.2),
graph,
Dropout (0.5),
Dense (100, activation="relu"),
Dropout (0.7),
Dense (1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
model.layers[0].trainable=False
model.optimizer.lr=1e-5
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
model = Sequential([
Embedding(vocab_size, 32, input_length=seq_len, mask_zero=True,
W_regularizer=l2(1e-6), dropout=0.2),
LSTM(100, consume_less='gpu'),
Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=5, batch_size=64)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is the word list
Step2: ...and this is the mapping from id to word
Step3: We download the reviews using code copied from keras.datasets
Step4: Here's the 1st review. As you see, the words have been replaced by ids. The ids can be looked up in idx2word.
Step5: The first word of the first review is 23022. Let's see what that is.
Step6: Here's the whole review, mapped from ids to words.
Step7: The labels are 1 for positive, 0 for negative.
Step8: Reduce vocab size by setting rare words to max index.
Step9: Look at distribution of lengths of sentences.
Step10: Pad (with zero) or truncate each sentence to make consistent length.
Step11: This results in nice rectangular matrices that can be passed to ML algorithms. Reviews shorter than 500 words are pre-padded with zeros, those greater are truncated.
Step12: Create simple models
Step13: The stanford paper that this dataset is from cites a state of the art accuracy (without unlabelled data) of 0.883. So we're short of that, but on the right track.
Step14: That's well past the Stanford paper's accuracy - another win for CNNs!
Step16: Pre-trained vectors
Step17: The glove word ids and imdb word ids use different indexes. So we create a simple function that creates an embedding matrix using the indexes from imdb, and the embeddings from glove (where they exist).
Step18: We pass our embedding matrix to the Embedding constructor, and set it to non-trainable.
Step19: We already have beaten our previous model! But let's fine-tune the embedding weights - especially since the words we couldn't find in glove just have random embeddings.
Step20: As expected, that's given us a nice little boost.
Step21: Multi-size CNN
Step22: We use the functional API to create multiple conv layers of different sizes, and then concatenate them.
Step23: We then replace the conv/max-pool layer in our original CNN with the concatenated conv layers.
Step24: Interestingly, I found that in this case I got best results when I started the embedding layer as being trainable, and then set it to non-trainable after a couple of epochs. I have no idea why!
Step25: This more complex architecture has given us another boost in accuracy.
|
55 | <ASSISTANT_TASK:>
Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
MODEL_DIR = BUCKET_NAME + "/imdb"
model_path_to_deploy = MODEL_DIR
! rm -rf custom
! mkdir custom
! mkdir custom/trainer
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for IMDB
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=1e-4, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=100, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print(device_lib.list_local_devices())
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
# Preparing dataset
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets():
dataset, info = tfds.load('imdb_reviews/subwords8k', with_info=True,
as_supervised=True)
train_dataset, test_dataset = dataset['train'], dataset['test']
encoder = info.features['text'].encoder
padded_shapes = ([None],())
return train_dataset.shuffle(BUFFER_SIZE).padded_batch(BATCH_SIZE, padded_shapes), encoder
train_dataset, encoder = make_datasets()
# Build the Keras model
def build_and_compile_rnn_model(encoder):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(encoder.vocab_size, 64),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(args.lr),
metrics=['accuracy'])
return model
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_rnn_model(encoder)
# Train the model
model.fit(train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
model.save(args.model_dir)
! python custom/trainer/task.py --epochs=10 --model-dir=$MODEL_DIR
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
import tensorflow_datasets as tfds
dataset, info = tfds.load("imdb_reviews/subwords8k", with_info=True, as_supervised=True)
test_dataset = dataset["test"]
encoder = info.features["text"].encoder
BATCH_SIZE = 64
padded_shapes = ([None], ())
test_dataset = test_dataset.padded_batch(BATCH_SIZE, padded_shapes)
model.evaluate(test_dataset)
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model("imdb-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy)
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
ENDPOINT_NAME = "imdb_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
MIN_NODES = 1
MAX_NODES = 1
DEPLOYED_NAME = "imdb_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
import tensorflow_datasets as tfds
dataset, info = tfds.load("imdb_reviews/subwords8k", with_info=True, as_supervised=True)
test_dataset = dataset["test"]
test_dataset.take(1)
for data in test_dataset:
print(data)
break
test_item = data[0].numpy()
def predict_data(data, endpoint, parameters_dict):
parameters = json_format.ParseDict(parameters_dict, Value())
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: data.tolist()}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_data(test_item, endpoint_id, None)
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Vertex constants
Step12: Hardware Accelerators
Step13: Container (Docker) image
Step14: Machine Type
Step15: Tutorial
Step16: Train a model locally
Step17: Task.py contents
Step18: Train the model
Step19: Load the saved model
Step20: Evaluate the model
Step21: Perform the model evaluation
Step22: Upload the model for serving
Step23: Upload the model
Step24: Get Model resource information
Step25: Deploy the Model resource
Step26: Now get the unique identifier for the Endpoint resource you created.
Step27: Compute instance scaling
Step28: Deploy Model resource to the Endpoint resource
Step29: Make a online prediction request
Step30: Send the prediction request
Step31: Undeploy the Model resource
Step32: Cleaning up
|
56 | <ASSISTANT_TASK:>
Python Code:
##Some code to run at the beginning of the file, to be able to show images in the notebook
##Don't worry about this cell
#Print the plots in this screen
%matplotlib inline
#Be able to plot images saved in the hard drive
from IPython.display import Image
#Make the notebook wider
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
import seaborn as sns
import pylab as plt
import pandas as pd
import numpy as np
import scipy.stats
import statsmodels.formula.api as smf
#Som elibraries
from sklearn import preprocessing
from sklearn.cluster import DBSCAN, KMeans
#Read teh data, dropna, get sample
df = pd.read_csv("data/big3_position.csv",sep="\t").dropna()
df["Revenue"] = np.log10(df["Revenue"])
df["Assets"] = np.log10(df["Assets"])
df["Employees"] = np.log10(df["Employees"])
df["MarketCap"] = np.log10(df["MarketCap"])
df = df.replace([np.inf,-np.inf],np.nan).dropna().sample(300)
df.head(2)
#Scale variables to give all of them the same weight
X = df.loc[:,["Revenue","Assets","Employees","MarketCap"]]
X = preprocessing.scale(X)
print(X.sum(0))
print(X.std(0))
X
#Get labels of each row and add a new column with the labels
kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
labels = kmeans.labels_
df["kmeans_labels"] = labels
sns.lmplot(x="MarketCap",y="Assets",hue="kmeans_labels",fit_reg=False,data=df)
#Get labels of each row and add a new column with the labels
db = DBSCAN(eps=1, min_samples=10).fit(X)
labels = db.labels_
df["dbscan_labels"] = labels
sns.lmplot(x="MarketCap",y="Assets",hue="dbscan_labels",fit_reg=False,data=df)
Image(url="http://scikit-learn.org/stable/_images/sphx_glr_plot_cluster_comparison_0011.png")
import scipy
import pylab
import scipy.cluster.hierarchy as sch
# Generate distance matrix based on the difference between rows
D = np.zeros([4,4])
for i in range(4):
for j in range(4):
D[i,j] = np.sum(np.abs(X[:,i]-X[:,j])) #Euclidean distance or mutual information are also common
print(D)
#Create the linkage and plot
Y = sch.linkage(D, method='centroid') #many methods, single, complete...
Z1 = sch.dendrogram(Y, orientation='right',labels=["Revenue","Assets","Employees","MarketCap"])
#Required libraries
!conda install tensorflow -y
!pip install fancyimpute
!pip install pydot_ng
import sklearn.preprocessing
import sklearn
#Read the data again but do not
df = pd.read_csv("data/big3_position.csv",sep="\t")
df["Revenue"] = np.log10(df["Revenue"])
df["Assets"] = np.log10(df["Assets"])
df["Employees"] = np.log10(df["Employees"])
df["MarketCap"] = np.log10(df["MarketCap"])
le = sklearn.preprocessing.LabelEncoder()
labels = le.fit_transform(df["TypeEnt"])
df["TypeEnt_int"] = labels
print(le.classes_)
df = df.replace([np.inf,-np.inf],np.nan).sample(300)
df.head(2)
X = df.loc[:,["Revenue","Assets","Employees","MarketCap","TypeEnt_int"]].values
X
df.describe()
from fancyimpute import KNN
# X is the complete data matrix
# X_incomplete has the same values as X except a subset have been replace with NaN
# Use 10 nearest rows which have a feature to fill in each row's missing features
X_filled_knn = KNN(k=10).complete(X)
df.loc[:,cols] = X_filled_knn
df.describe()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Clustering
Step2: 1a. Clustering with K-means
Step3: 1b. Clustering with DBSCAN
Step4: 1c. Hierarchical clustering
Step5: 2. Imputation of missing data (fancy)
|
57 | <ASSISTANT_TASK:>
Python Code:
print("Exemplo 4.1")
import numpy as np
#Para vs = 12V
#6i1 + 2i1 + 4(i1 - i2) = -12
#12i1 - 4i2 = -12
#3i1 - i2 = -3
#-3vx -12 + 4(i2 - i1) + 8i2 + 4i2 = 0
#vx = 2i1
#-6i1 + 16i2 - 4i1 = 12
#-10i1 + 16i2 = 12
#-5i1 + 8i2 = 6
#i0 = i2
coef = np.matrix('3 -1;-5 8')
res = np.matrix('-3;6')
I = np.linalg.inv(coef)*res
print("Para vs = 12V, i0 é:",I[1],"A")
#Para vs = 24V
#6i1 + 2i1 + 4(i1 - i2) = -24
#12i1 - 4i2 = -24
#3i1 - i2 = -6
#-3vx -24 + 4(i2 - i1) + 8i2 + 4i2 = 0
#vx = 2i1
#-6i1 + 16i2 - 4i1 = 24
#-10i1 + 16i2 = 24
#-5i1 + 8i2 = 12
#i0 = i2
coef = np.matrix('3 -1;-5 8')
res = np.matrix('-6;12')
I = np.linalg.inv(coef)*res
print("Para vs = 12V, i0 é:",I[1],"A")
print("Problema Prático 4.1")
#Req1 = 12 + 8 = 20
#Para is = 30
i_s = 30
i0 = i_s*4/(4+20)
v0 = 8*i0
print("Para is = 30A, v0 é:",v0,"V")
#Para is = 45
i_s = 45
i0 = i_s*4/(4+20)
v0 = 8*i0
print("Para is = 45A, v0 é:",v0,"V")
print("Exemplo 4.3")
Vs = 6
Is = 3
#Analise da fonte de tensao
v1 = Vs*4/(4 + 8)
#Analise da fonte de corrente
i2 = Is*8/(4 + 8)
v2 = 4*i2
v = v1 + v2
print("Tensão V:",v,"V")
print("Problema Prático 4.3")
Vs = 12
Is = 5
#Analise da fonte de tensao
v1 = Vs*2/(2 + 3 + 5)
#Analise da fonte de corrente
i2 = Is*5/(2 + 3 + 5)
v2 = i2*2
v = v1 + v2
print("Tensão v0:",v,"V")
print("Exemplo 4.4")
Is = 4
Vs = 20
#Analise da fonte de tensao
#i1 = -i0
#5i1 + (i1 - i2) + 5i0 + 4i1 = 20
#5i1 - i2 = 20
#-5i0 + (i2 - i1) + 3i2 + 2i2 = 0
#5i1 -i1 + 6i2 = 0
#4i1 + 6i2 = 0
#2i1 + 3i2 = 0
coef = np.matrix('5 -1;2 3')
res = np.matrix('20;0')
Iv = np.linalg.inv(coef)*res
i0v = -I[0]
#Analise da fonte de corrente
#i0 = Is - i1 = 4 - i1
#5i0 + 4i1 + 5(i1 - Is) + i1 - i2 = 0
#5(4 - i1) + 9i1 - 20 + (i1 - i2) = 0
#5i1 - i2 = 0
#-5i0 + (i2 - i1) + 3(i2 - Is) + 2i2 = 0
#-5(4 - i1) + 6i2 - i1 - 12 = 0
#4i1 + 6i2 = 32
#2i1 + 3i2 = 16
coef = np.matrix('5 -1;2 3')
res = np.matrix('0;16')
Ic = np.linalg.inv(coef)*res
i0c = Is-I[0]
i0 = i0v + i0c
print("Corrente i0:",float(i0),"A")
print("Problema Prático 4.4")
Vs = 25
Is = 5
#Analise fonte de tensao
#i1 + 0.1vx = i2
#i1 = (25 - vx)/20 = 5/4 - vx/20
#i2 = vx/4
#5/4 - vx/20 + vx/10 = vx/4
# (-vx +2vx - 5vx)/20 = -5/4
#-vx/5 = -5/4
#vx = 25/4
#Analise fonte de corrente
#i1 + i2 = 5
#vx = 4*(i2 + 0.1vx)
#6vx/10 = 4i2
#i2 = 3vx/20
#vx = 20i1
#i1 = vx/20
#vx/20 + 3vx/20 = 5
#vx = 25
vx = 25 + 25/4
print("Tensão vx:",vx,"V")
print("Exemplo 4.5")
#Analise Fonte Tensao 1
#Req1 = 8 + 4 = 12
#Req2 = (12*4)/(12 + 4) = 3
#Vx = 12*3/(3 + Req2) = 36/6 = 6
#iv1 = Vx/3 = 12/6 = 2 A
iv1 = 2
#Analise Fonte de Tensao 2
#Req1 = 8 + 4 = 12
#Req2 = (4*3)/(4+3) = 12/7
#Vx = 24*Req2/(Req2 + Req1)
#Vx = 3
#iv2 = -Vx/3 = -1
iv2 = -1
#Analise da Fonte de Corrente
#Req1 = (4*3)/(4 + 3) = 12/7
#Req2 = Req1 + 4 = 40/7
#i1 = 3*8/(8 + Req2) = 24/(8 + 40/7)
i1 = 24/(8 + 40/7)
ic = i1*4/(4 + 3) # = 1
i = iv1 + iv2 + ic
print("Corrente i:",i,"A")
print("Problema Prático 4.5")
#Analise fonte de Tensao 1
iv1 = 8/(6 + 2 + 8)
#Analise da fonte de Tensao 2
iv2 = -6/(6 + 2 + 8)
#Analise da fonte de Corrente
ic = 2*2/(2 + 6 + 8)
i = iv1 + iv2 + ic
print("Corrente i:",i,"A")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problema Prático 4.1
Step2: Superposição
Step3: Problema Prático 4.3
Step4: Exemplo 4.4
Step5: Problema Prático 4.4
Step6: Exemplo 4.5
Step7: Problema Prático 4.5
|
58 | <ASSISTANT_TASK:>
Python Code:
%pylab inline
pylab.rc("savefig", dpi=120) # set resolution of inline figures
import echidna.core.spectra as spectra
import echidna
config = spectra.SpectraConfig.load_from_file(echidna.__echidna_base__ +
"/echidna/config/example.yml")
print config.get_pars()
print echidna.__echidna_base__
print echidna.__echidna_home__
num_decays = 1000
spectrum = spectra.Spectra("spectrum", num_decays, config)
print spectrum
# Import numpy
import numpy
# Generate random energies from a Gaussin with mean (mu) and sigma (sigma)
mu = 2.5 # MeV
sigma = 0.15 # MeV
# Generate random radial position from a Uniform distribution
outer_radius = 5997 # Radius of SNO+ AV
# Detector efficiency
efficiency = 0.9 # 90%
for event in range(num_decays):
energy = numpy.random.normal(mu, sigma)
radius = numpy.random.uniform(high=outer_radius)
event_detected = (numpy.random.uniform() < efficiency)
if event_detected: # Fill spectrum with values
spectrum.fill(energy_mc=energy, radial_mc=radius)
print spectrum.sum()
print num_decays * efficiency
print spectrum._data
import echidna.output.plot as plot
import echidna.output.plot_root as plot_root
fig1 = plot.plot_projection(spectrum, "energy_mc",
fig_num=1, show_plot=False)
plt.show()
plot_root.plot_projection(spectrum, "radial_mc", fig_num=2)
fig_3 = plot.plot_surface(spectrum, "energy_mc", "radial_mc",
fig_num=3, show_plot=False)
plt.show()
shrink_dict = {"energy_mc_low": mu - 5.*sigma,
"energy_mc_high": mu + 5.*sigma,
"radial_mc_low": 0.0,
"radial_mc_high": 3500}
spectrum.shrink(**shrink_dict)
print spectrum.sum()
import echidna.core.smear as smear
smearer = smear.EnergySmearRes()
smearer.set_num_sigma(3)
smearer.set_resolution(0.05)
smeared_spectrum = smearer.weighted_smear(spectrum)
import numpy as np
import matplotlib.pyplot as plt
def overlay_spectra(original, smeared, dimension="energy_mc", fig_num=1):
Overlay original and smeared spectra.
Args:
original (echidna.core.spectra.Spectra): Original spectrum.
smeared (echidna.core.spectra.Spectra): Smeared spectrum.
dimension (string, optional): Dimension to project onto.
Default is "energy_mc".
fignum (int, optional): Figure number, if producing multiple
figures. Default is 1.
Returns:
matplotlib.figure.Figure: Figure showing overlaid spectra.
fig = plt.figure(num=fig_num)
ax = fig.add_subplot(1, 1, 1)
par = original.get_config().get_par(dimension)
width = par.get_width()
# Define array of bin boundaries (1 more than number of bins)
bins = np.linspace(par._low, par._high, par._bins+1)
# Define array of bin centres
x = bins[:-1] + 0.5*width
# Overlay two spectra using projection as weight
ax.hist(x, bins, weights=original.project(dimension),
histtype="stepfilled", color="RoyalBlue",
alpha=0.5, label=original._name)
ax.hist(x, bins, weights=smeared.project(dimension),
histtype="stepfilled", color="Red",
alpha=0.5, label=smeared._name)
# Add label/style
plt.legend(loc="upper right")
plt.ylim(ymin=0.0)
plt.xlabel(dimension + " [" + par.get_unit() + "]")
plt.ylabel("Events per " + str(width) + " " + par.get_unit() + " bin")
return fig
fig_4 = overlay_spectra(spectrum, smeared_spectrum, fig_num=4)
plt.show()
roi = (mu - 0.5*sigma, mu + 1.45*sigma) # To get nice shape for rebinning
smeared_spectrum.shrink_to_roi(roi[0], roi[1], "energy_mc")
print smeared_spectrum.get_roi("energy_mc")
dimension = smeared_spectrum.get_config().get_pars().index("energy_mc")
old_shape = smeared_spectrum._data.shape
reduction_factor = 5 # how many bins to combine into a single bin
new_shape = tuple([j / reduction_factor if i == dimension else j
for i, j in enumerate(old_shape)])
print old_shape
print new_shape
smeared_spectrum.rebin(new_shape)
smeared_spectrum.scale(104.25)
print smeared_spectrum.sum()
print smeared_spectrum._data
fig_5 = plot.plot_projection(smeared_spectrum, "energy_mc",
fig_num=5, show_plot=False)
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Spectra creation
Step2: Now we need a config file to create the spectrum from. There is an example config file in echidna/config. If we look at the contents of this yaml file, we see it tells the Spectra class to create a data structure to hold two parameters
Step3: Note we used the __echidna_base__ member of the echidna module here. This module has two special members for denoting the base directory (the outermost directory of the git repository) and the home directory (the echidna directory inside the base directory. The following lines show the current location of these directories
Step4: Finally before creating the spectrum, we should define the number of events it should represent
Step5: And there you have it, we've created a Spectra object.
Step6: This will have filled our Spectra class with the events. Make sure to use the exact parameter names that were printed out above, as kewyord arguments. To check we can now use the sum method. This returns the total number of events stored in the spectrum at a given time - the integral of the spectrum.
Step7: The value returned by sum, should roughly equal
Step8: We can also inspect the raw data structure. This is saved in the _data member of the Spectra class
Step9: Note
Step10: To plot the projection of the spectrum on the energy_mc axis
Step11: and to plot the projection on the radial_mc axis, this time using root
Step12: We can also project onto two dimensions and plot a surface
Step13: Convolution and cuts
Step14: Using the sum method, we can check to see how many events were cut.
Step15: Import the smear class
Step16: and create the smearer object.
Step17: By default the "weighted smear" method considers all bins within a $\pm 5\sigma$ range. For the sake of speed, we will reduce this to 3 here. Also set the energy resolution - 0.05 for 5%.
Step18: To smear our original spectrum and create the new Spectra object smeared_spectrum
Step20: this should hopefully only create a couple of seconds.
Step21: Other spectra manipulations
Step22: Rebin
Step23: Scaling
Step24: Putting it all together
|
59 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inpe', 'sandbox-1', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
60 | <ASSISTANT_TASK:>
Python Code:
import os
from gensim import utils
from gensim.models import translation_matrix
from gensim.models import KeyedVectors
train_file = "OPUS_en_it_europarl_train_5K.txt"
with utils.smart_open(train_file, "r") as f:
word_pair = [tuple(utils.to_unicode(line).strip().split()) for line in f]
print (word_pair[:10])
# Load the source language word vector
source_word_vec_file = "EN.200K.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt"
source_word_vec = KeyedVectors.load_word2vec_format(source_word_vec_file, binary=False)
# Load the target language word vector
target_word_vec_file = "IT.200K.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt"
target_word_vec = KeyedVectors.load_word2vec_format(target_word_vec_file, binary=False)
transmat = translation_matrix.TranslationMatrix(source_word_vec, target_word_vec, word_pair)
transmat.train(word_pair)
print ("the shape of translation matrix is: ", transmat.translation_matrix.shape)
# The pair is in the form of (English, Italian), we can see whether the translated word is correct
words = [("one", "uno"), ("two", "due"), ("three", "tre"), ("four", "quattro"), ("five", "cinque")]
source_word, target_word = zip(*words)
translated_word = transmat.translate(source_word, 5, )
for k, v in translated_word.iteritems():
print ("word ", k, " and translated word", v)
words = [("apple", "mela"), ("orange", "arancione"), ("grape", "acino"), ("banana", "banana"), ("mango", "mango")]
source_word, target_word = zip(*words)
translated_word = transmat.translate(source_word, 5)
for k, v in translated_word.iteritems():
print ("word ", k, " and translated word", v)
words = [("dog", "cane"), ("pig", "maiale"), ("cat", "gatto"), ("fish", "cavallo"), ("birds", "uccelli")]
source_word, target_word = zip(*words)
translated_word = transmat.translate(source_word, 5)
for k, v in translated_word.iteritems():
print ("word ", k, " and translated word", v)
import pickle
word_dict = "word_dict.pkl"
with utils.smart_open(word_dict, "r") as f:
word_pair = pickle.load(f)
print ("the length of word pair ", len(word_pair))
import time
test_case = 10
word_pair_length = len(word_pair)
step = word_pair_length / test_case
duration = []
sizeofword = []
for idx in xrange(0, test_case):
sub_pair = word_pair[: (idx + 1) * step]
startTime = time.time()
transmat = translation_matrix.TranslationMatrix(source_word_vec, target_word_vec, sub_pair)
transmat.train(sub_pair)
endTime = time.time()
sizeofword.append(len(sub_pair))
duration.append(endTime - startTime)
import plotly
from plotly.graph_objs import Scatter, Layout
plotly.offline.init_notebook_mode(connected=True)
plotly.offline.iplot({
"data": [Scatter(x=sizeofword, y=duration)],
"layout": Layout(title="time for creation"),
}, filename="tm_creation_time.html")
from sklearn.decomposition import PCA
import plotly
from plotly.graph_objs import Scatter, Layout, Figure
plotly.offline.init_notebook_mode(connected=True)
words = [("one", "uno"), ("two", "due"), ("three", "tre"), ("four", "quattro"), ("five", "cinque")]
en_words_vec = [source_word_vec[item[0]] for item in words]
it_words_vec = [target_word_vec[item[1]] for item in words]
en_words, it_words = zip(*words)
pca = PCA(n_components=2)
new_en_words_vec = pca.fit_transform(en_words_vec)
new_it_words_vec = pca.fit_transform(it_words_vec)
# remove the code, use the plotly for ploting instead
# fig = plt.figure()
# fig.add_subplot(121)
# plt.scatter(new_en_words_vec[:, 0], new_en_words_vec[:, 1])
# for idx, item in enumerate(en_words):
# plt.annotate(item, xy=(new_en_words_vec[idx][0], new_en_words_vec[idx][1]))
# fig.add_subplot(122)
# plt.scatter(new_it_words_vec[:, 0], new_it_words_vec[:, 1])
# for idx, item in enumerate(it_words):
# plt.annotate(item, xy=(new_it_words_vec[idx][0], new_it_words_vec[idx][1]))
# plt.show()
# you can also using plotly lib to plot in one figure
trace1 = Scatter(
x = new_en_words_vec[:, 0],
y = new_en_words_vec[:, 1],
mode = 'markers+text',
text = en_words,
textposition = 'top'
)
trace2 = Scatter(
x = new_it_words_vec[:, 0],
y = new_it_words_vec[:, 1],
mode = 'markers+text',
text = it_words,
textposition = 'top'
)
layout = Layout(
showlegend = False
)
data = [trace1, trace2]
fig = Figure(data=data, layout=layout)
plot_url = plotly.offline.iplot(fig, filename='relatie_position_for_number.html')
words = [("one", "uno"), ("two", "due"), ("three", "tre"), ("four", "quattro"), ("five", "cinque")]
en_words, it_words = zip(*words)
en_words_vec = [source_word_vec[item[0]] for item in words]
it_words_vec = [target_word_vec[item[1]] for item in words]
# Translate the English word five to Italian word
translated_word = transmat.translate([en_words[4]], 3)
print "translation of five: ", translated_word
# the translated words of five
for item in translated_word[en_words[4]]:
it_words_vec.append(target_word_vec[item])
pca = PCA(n_components=2)
new_en_words_vec = pca.fit_transform(en_words_vec)
new_it_words_vec = pca.fit_transform(it_words_vec)
# remove the code, use the plotly for ploting instead
# fig = plt.figure()
# fig.add_subplot(121)
# plt.scatter(new_en_words_vec[:, 0], new_en_words_vec[:, 1])
# for idx, item in enumerate(en_words):
# plt.annotate(item, xy=(new_en_words_vec[idx][0], new_en_words_vec[idx][1]))
# fig.add_subplot(122)
# plt.scatter(new_it_words_vec[:, 0], new_it_words_vec[:, 1])
# for idx, item in enumerate(it_words):
# plt.annotate(item, xy=(new_it_words_vec[idx][0], new_it_words_vec[idx][1]))
# # annote for the translation of five, the red text annotation is the translation of five
# for idx, item in enumerate(translated_word[en_words[4]]):
# plt.annotate(item, xy=(new_it_words_vec[idx + 5][0], new_it_words_vec[idx + 5][1]),
# xytext=(new_it_words_vec[idx + 5][0] + 0.1, new_it_words_vec[idx + 5][1] + 0.1),
# color="red",
# arrowprops=dict(facecolor='red', shrink=0.1, width=1, headwidth=2),)
# plt.show()
trace1 = Scatter(
x = new_en_words_vec[:, 0],
y = new_en_words_vec[:, 1],
mode = 'markers+text',
text = en_words,
textposition = 'top'
)
trace2 = Scatter(
x = new_it_words_vec[:, 0],
y = new_it_words_vec[:, 1],
mode = 'markers+text',
text = it_words,
textposition = 'top'
)
layout = Layout(
showlegend = False,
annotations = [dict(
x = new_it_words_vec[5][0],
y = new_it_words_vec[5][1],
text = translated_word[en_words[4]][0],
arrowcolor = "black",
arrowsize = 1.5,
arrowwidth = 1,
arrowhead = 0.5
), dict(
x = new_it_words_vec[6][0],
y = new_it_words_vec[6][1],
text = translated_word[en_words[4]][1],
arrowcolor = "black",
arrowsize = 1.5,
arrowwidth = 1,
arrowhead = 0.5
), dict(
x = new_it_words_vec[7][0],
y = new_it_words_vec[7][1],
text = translated_word[en_words[4]][2],
arrowcolor = "black",
arrowsize = 1.5,
arrowwidth = 1,
arrowhead = 0.5
)]
)
data = [trace1, trace2]
fig = Figure(data=data, layout=layout)
plot_url = plotly.offline.iplot(fig, filename='relatie_position_for_numbers.html')
words = [("dog", "cane"), ("pig", "maiale"), ("cat", "gatto"), ("horse", "cavallo"), ("birds", "uccelli")]
en_words_vec = [source_word_vec[item[0]] for item in words]
it_words_vec = [target_word_vec[item[1]] for item in words]
en_words, it_words = zip(*words)
# remove the code, use the plotly for ploting instead
# pca = PCA(n_components=2)
# new_en_words_vec = pca.fit_transform(en_words_vec)
# new_it_words_vec = pca.fit_transform(it_words_vec)
# fig = plt.figure()
# fig.add_subplot(121)
# plt.scatter(new_en_words_vec[:, 0], new_en_words_vec[:, 1])
# for idx, item in enumerate(en_words):
# plt.annotate(item, xy=(new_en_words_vec[idx][0], new_en_words_vec[idx][1]))
# fig.add_subplot(122)
# plt.scatter(new_it_words_vec[:, 0], new_it_words_vec[:, 1])
# for idx, item in enumerate(it_words):
# plt.annotate(item, xy=(new_it_words_vec[idx][0], new_it_words_vec[idx][1]))
# plt.show()
trace1 = Scatter(
x = new_en_words_vec[:, 0],
y = new_en_words_vec[:, 1],
mode = 'markers+text',
text = en_words,
textposition = 'top'
)
trace2 = Scatter(
x = new_it_words_vec[:, 0],
y = new_it_words_vec[:, 1],
mode = 'markers+text',
text = it_words,
textposition ='top'
)
layout = Layout(
showlegend = False
)
data = [trace1, trace2]
fig = Figure(data=data, layout=layout)
plot_url = plotly.offline.iplot(fig, filename='relatie_position_for_animal.html')
words = [("dog", "cane"), ("pig", "maiale"), ("cat", "gatto"), ("horse", "cavallo"), ("birds", "uccelli")]
en_words, it_words = zip(*words)
en_words_vec = [source_word_vec[item[0]] for item in words]
it_words_vec = [target_word_vec[item[1]] for item in words]
# Translate the English word birds to Italian word
translated_word = transmat.translate([en_words[4]], 3)
print "translation of birds: ", translated_word
# the translated words of birds
for item in translated_word[en_words[4]]:
it_words_vec.append(target_word_vec[item])
pca = PCA(n_components=2)
new_en_words_vec = pca.fit_transform(en_words_vec)
new_it_words_vec = pca.fit_transform(it_words_vec)
# # remove the code, use the plotly for ploting instead
# fig = plt.figure()
# fig.add_subplot(121)
# plt.scatter(new_en_words_vec[:, 0], new_en_words_vec[:, 1])
# for idx, item in enumerate(en_words):
# plt.annotate(item, xy=(new_en_words_vec[idx][0], new_en_words_vec[idx][1]))
# fig.add_subplot(122)
# plt.scatter(new_it_words_vec[:, 0], new_it_words_vec[:, 1])
# for idx, item in enumerate(it_words):
# plt.annotate(item, xy=(new_it_words_vec[idx][0], new_it_words_vec[idx][1]))
# # annote for the translation of five, the red text annotation is the translation of five
# for idx, item in enumerate(translated_word[en_words[4]]):
# plt.annotate(item, xy=(new_it_words_vec[idx + 5][0], new_it_words_vec[idx + 5][1]),
# xytext=(new_it_words_vec[idx + 5][0] + 0.1, new_it_words_vec[idx + 5][1] + 0.1),
# color="red",
# arrowprops=dict(facecolor='red', shrink=0.1, width=1, headwidth=2),)
# plt.show()
trace1 = Scatter(
x = new_en_words_vec[:, 0],
y = new_en_words_vec[:, 1],
mode = 'markers+text',
text = en_words,
textposition = 'top'
)
trace2 = Scatter(
x = new_it_words_vec[:5, 0],
y = new_it_words_vec[:5, 1],
mode = 'markers+text',
text = it_words[:5],
textposition = 'top'
)
layout = Layout(
showlegend = False,
annotations = [dict(
x = new_it_words_vec[5][0],
y = new_it_words_vec[5][1],
text = translated_word[en_words[4]][0],
arrowcolor = "black",
arrowsize = 1.5,
arrowwidth = 1,
arrowhead = 0.5
), dict(
x = new_it_words_vec[6][0],
y = new_it_words_vec[6][1],
text = translated_word[en_words[4]][1],
arrowcolor = "black",
arrowsize = 1.5,
arrowwidth = 1,
arrowhead = 0.5
), dict(
x = new_it_words_vec[7][0],
y = new_it_words_vec[7][1],
text = translated_word[en_words[4]][2],
arrowcolor = "black",
arrowsize = 1.5,
arrowwidth = 1,
arrowhead = 0.5
)]
)
data = [trace1, trace2]
fig = Figure(data=data, layout=layout)
plot_url = plotly.offline.iplot(fig, filename='relatie_position_for_animal.html')
import gensim
from gensim.models.doc2vec import TaggedDocument
from gensim.models import Doc2Vec
from collections import namedtuple
from gensim import utils
def read_sentimentDocs():
SentimentDocument = namedtuple('SentimentDocument', 'words tags split sentiment')
alldocs = [] # will hold all docs in original order
with utils.smart_open('aclImdb/alldata-id.txt', encoding='utf-8') as alldata:
for line_no, line in enumerate(alldata):
tokens = gensim.utils.to_unicode(line).split()
words = tokens[1:]
tags = [line_no] # `tags = [tokens[0]]` would also work at extra memory cost
split = ['train','test','extra','extra'][line_no // 25000] # 25k train, 25k test, 25k extra
sentiment = [1.0, 0.0, 1.0, 0.0, None, None, None, None][line_no // 12500] # [12.5K pos, 12.5K neg]*2 then unknown
alldocs.append(SentimentDocument(words, tags, split, sentiment))
train_docs = [doc for doc in alldocs if doc.split == 'train']
test_docs = [doc for doc in alldocs if doc.split == 'test']
doc_list = alldocs[:] # for reshuffling per pass
print('%d docs: %d train-sentiment, %d test-sentiment' % (len(doc_list), len(train_docs), len(test_docs)))
return train_docs, test_docs, doc_list
train_docs, test_docs, doc_list = read_sentimentDocs()
small_corpus = train_docs[:15000]
large_corpus = train_docs + test_docs
print len(train_docs), len(test_docs), len(doc_list), len(small_corpus), len(large_corpus)
# for the computer performance limited, didn't run on the notebook.
# You do can trained on the server and save the model to the disk.
import multiprocessing
from random import shuffle
cores = multiprocessing.cpu_count()
model1 = Doc2Vec(dm=1, dm_concat=1, size=100, window=5, negative=5, hs=0, min_count=2, workers=cores)
model2 = Doc2Vec(dm=1, dm_concat=1, size=100, window=5, negative=5, hs=0, min_count=2, workers=cores)
small_train_docs = train_docs[:15000]
# train for small corpus
model1.build_vocab(small_train_docs)
for epoch in xrange(50):
shuffle(small_train_docs)
model1.train(small_train_docs, total_examples=len(small_train_docs), epochs=1)
model.save("small_doc_15000_iter50.bin")
large_train_docs = train_docs + test_docs
# train for large corpus
model2.build_vocab(large_train_docs)
for epoch in xrange(50):
shuffle(large_train_docs)
model2.train(large_train_docs, total_examples=len(train_docs), epochs=1)
# save the model
model2.save("large_doc_50000_iter50.bin")
import os
import numpy as np
from sklearn.linear_model import LogisticRegression
def test_classifier_error(train, train_label, test, test_label):
classifier = LogisticRegression()
classifier.fit(train, train_label)
score = classifier.score(test, test_label)
print "the classifier score :", score
return score
#you can change the data folder
basedir = "/home/robotcator/doc2vec"
model2 = Doc2Vec.load(os.path.join(basedir, "large_doc_50000_iter50.bin"))
m2 = []
for i in range(len(large_corpus)):
m2.append(model2.docvecs[large_corpus[i].tags])
train_array = np.zeros((25000, 100))
train_label = np.zeros((25000, 1))
test_array = np.zeros((25000, 100))
test_label = np.zeros((25000, 1))
for i in range(12500):
train_array[i] = m2[i]
train_label[i] = 1
train_array[i + 12500] = m2[i + 12500]
train_label[i + 12500] = 0
test_array[i] = m2[i + 25000]
test_label[i] = 1
test_array[i + 12500] = m2[i + 37500]
test_label[i + 12500] = 0
print "The vectors are learned by doc2vec method"
test_classifier_error(train_array, train_label, test_array, test_label)
from gensim.models import translation_matrix
# you can change the data folder
basedir = "/home/robotcator/doc2vec"
model1 = Doc2Vec.load(os.path.join(basedir, "small_doc_15000_iter50.bin"))
model2 = Doc2Vec.load(os.path.join(basedir, "large_doc_50000_iter50.bin"))
l = model1.docvecs.count
l2 = model2.docvecs.count
m1 = np.array([model1.docvecs[large_corpus[i].tags].flatten() for i in range(l)])
# learn the mapping bettween two model
model = translation_matrix.BackMappingTranslationMatrix(large_corpus[:15000], model1, model2)
model.train(large_corpus[:15000])
for i in range(l, l2):
infered_vec = model.infer_vector(model2.docvecs[large_corpus[i].tags])
m1 = np.vstack((m1, infered_vec.flatten()))
train_array = np.zeros((25000, 100))
train_label = np.zeros((25000, 1))
test_array = np.zeros((25000, 100))
test_label = np.zeros((25000, 1))
# because those document, 25k documents are postive label, 25k documents are negative label
for i in range(12500):
train_array[i] = m1[i]
train_label[i] = 1
train_array[i + 12500] = m1[i + 12500]
train_label[i + 12500] = 0
test_array[i] = m1[i + 25000]
test_label[i] = 1
test_array[i + 12500] = m1[i + 37500]
test_label[i + 12500] = 0
print "The vectors are learned by back-mapping method"
test_classifier_error(train_array, train_label, test_array, test_label)
from sklearn.decomposition import PCA
import plotly
from plotly.graph_objs import Scatter, Layout, Figure
plotly.offline.init_notebook_mode(connected=True)
m1_part = m1[14995: 15000]
m2_part = m2[14995: 15000]
m1_part = np.array(m1_part).reshape(len(m1_part), 100)
m2_part = np.array(m2_part).reshape(len(m2_part), 100)
pca = PCA(n_components=2)
reduced_vec1 = pca.fit_transform(m1_part)
reduced_vec2 = pca.fit_transform(m2_part)
trace1 = Scatter(
x = reduced_vec1[:, 0],
y = reduced_vec1[:, 1],
mode = 'markers+text',
text = ['doc' + str(i) for i in range(len(reduced_vec1))],
textposition = 'top'
)
trace2 = Scatter(
x = reduced_vec2[:, 0],
y = reduced_vec2[:, 1],
mode = 'markers+text',
text = ['doc' + str(i) for i in range(len(reduced_vec1))],
textposition ='top'
)
layout = Layout(
showlegend = False
)
data = [trace1, trace2]
fig = Figure(data=data, layout=layout)
plot_url = plotly.offline.iplot(fig, filename='doc_vec_vis')
m1_part = m1[14995: 15002]
m2_part = m2[14995: 15002]
m1_part = np.array(m1_part).reshape(len(m1_part), 100)
m2_part = np.array(m2_part).reshape(len(m2_part), 100)
pca = PCA(n_components=2)
reduced_vec1 = pca.fit_transform(m1_part)
reduced_vec2 = pca.fit_transform(m2_part)
trace1 = Scatter(
x = reduced_vec1[:, 0],
y = reduced_vec1[:, 1],
mode = 'markers+text',
text = ['sdoc' + str(i) for i in range(len(reduced_vec1))],
textposition = 'top'
)
trace2 = Scatter(
x = reduced_vec2[:, 0],
y = reduced_vec2[:, 1],
mode = 'markers+text',
text = ['tdoc' + str(i) for i in range(len(reduced_vec1))],
textposition ='top'
)
layout = Layout(
showlegend = False
)
data = [trace1, trace2]
fig = Figure(data=data, layout=layout)
plot_url = plotly.offline.iplot(fig, filename='doc_vec_vis')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For this tutorial, we'll train our model using the English -> Italian word pairs from the OPUS collection. This corpus contains 5000 word pairs. Each word pair is English word with corresponding Italian word.
Step2: This tutorial uses 300-dimensional vectors of English words as source and vectors of Italian words as target. (Those vector trained by the word2vec toolkit with cbow. The context window was set 5 words to either side of the target,
Step3: Train the translation matrix
Step4: Prediction Time
Step5: Part two
Step6: Part three
Step7: The Creation Time for the Translation Matrix
Step8: You will see a two dimensional coordination whose horizontal axis is the size of corpus and vertical axis is the time to train a translation matrix (the unit is second). As the size of corpus increases, the time increases linearly.
Step9: The figure shows that the word vectors for English number one to five and the corresponding Italian words uno to cinque have similar geometric arrangements. So the relationship between vector spaces that represent these two languages can be captured by linear mapping.
Step10: You probably will see that two kind of different color nodes, one for the English and the other for the Italian. For the translation of word five, we return top 3 similar words [u'cinque', u'quattro', u'tre']. We can easily see that the translation is convincing.
Step11: You probably will see that two kind of different color nodes, one for the English and the other for the Italian. For the translation of word birds, we return top 3 similar words [u'uccelli', u'garzette', u'iguane']. We can easily see that the animals' words translation is also convincing as the numbers.
Step12: Here, we train two Doc2vec model, the parameters can be determined by yourself. We trained on 15k documents for the model1 and 50k documents for the model2. But you should mixed some documents which from the 15k document in model to the model2 as dicussed before.
Step13: For the IMDB training dataset, we train an classifier on the train data which has 25k documents with positive and negative label. Then using this classifier to predict the test data. To see what accuracy can the document vectors which learned by different method achieve.
Step14: For the experiment one, we use the vector which learned by the Doc2vec method.To evalute those document vector, we use split those 50k document into two part, one for training and the other for testing.
Step15: For the experiment two, the document vectors are learned by the back-mapping method, which has a linear mapping for the model1 and model2. Using this method like translation matrix for the word translation, If we provide the vector for the addtional 35k document vector in model2, we can infer this vector for the model1.
Step16: As we can see that, the vectors learned by back-mapping method performed not bad but still need improved.
|
61 | <ASSISTANT_TASK:>
Python Code:
import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
events_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw-eve.fif')
events = mne.read_events(events_file)
fig = raw.plot()
fig.fake_keypress('a') # Simulates user pressing 'a' on the keyboard.
eog_events = mne.preprocessing.find_eog_events(raw)
onsets = eog_events[:, 0] / raw.info['sfreq'] - 0.25
durations = [0.5] * len(eog_events)
descriptions = ['bad blink'] * len(eog_events)
blink_annot = mne.Annotations(onsets, durations, descriptions,
orig_time=raw.info['meas_date'])
raw.set_annotations(blink_annot)
eeg_picks = mne.pick_types(raw.info, meg=False, eeg=True)
raw.plot(events=eog_events, order=eeg_picks)
onsets = [
raw.first_time + 30,
raw.first_time + 180
]
durations = [60, 60]
descriptions = ['block_1', 'block_2']
block_annots = mne.Annotations(onset=onsets,
duration=durations,
description=descriptions,
orig_time=raw.info['meas_date'])
raw.set_annotations(raw.annotations + block_annots) # add to existing
raw.plot()
break_annots = mne.preprocessing.annotate_break(
raw=raw,
min_break_duration=20, # consider segments of at least 20 s duration
t_start_after_previous=5, # start annotation 5 s after end of previous one
t_stop_before_next=2 # stop annotation 2 s before beginning of next one
)
raw.set_annotations(raw.annotations + break_annots) # add to existing
raw.plot()
# only keep some button press events (code 32) for this demonstration
events_subset = events[events[:, -1] == 32]
# drop the first and last few events
events_subset = events_subset[3:-3]
break_annots = mne.preprocessing.annotate_break(
raw=raw,
events=events_subset, # passing events will ignore existing annotations
min_break_duration=25 # pick a longer break duration this time
)
# replace existing annotations (otherwise it becomes difficult to see any
# effects in the plot!)
raw.set_annotations(break_annots)
raw.plot(events=events_subset)
reject_criteria = dict(mag=3000e-15, # 3000 fT
grad=3000e-13, # 3000 fT/cm
eeg=100e-6, # 100 µV
eog=200e-6) # 200 µV
flat_criteria = dict(mag=1e-15, # 1 fT
grad=1e-13, # 1 fT/cm
eeg=1e-6) # 1 µV
raw.set_annotations(blink_annot) # restore the EOG annotations
epochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, reject_tmax=0,
reject=reject_criteria, flat=flat_criteria,
reject_by_annotation=False, preload=True)
epochs.plot_drop_log()
epochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, reject_tmax=0,
reject=reject_criteria, flat=flat_criteria, preload=True)
epochs.plot_drop_log()
print(epochs.drop_log)
epochs.drop_bad()
stronger_reject_criteria = dict(mag=2000e-15, # 2000 fT
grad=2000e-13, # 2000 fT/cm
eeg=100e-6, # 100 µV
eog=100e-6) # 100 µV
epochs.drop_bad(reject=stronger_reject_criteria)
print(epochs.drop_log)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Annotating bad spans of data
Step2: You can see that you need to add a description first to start with
Step3: Now we can confirm that the annotations are centered on the EOG events. Since
Step4: See the section tut-section-programmatic-annotations for more details
Step5: Now detect break periods. We can control how far the break annotations shall
Step6: You can see that 3 segments have been annotated as BAD_break
Step7: Rejecting Epochs based on channel amplitude
Step8: The values that are appropriate are dataset- and hardware-dependent, so some
Step9: Notice that we've passed reject_by_annotation=False above, in order to
Step10: More importantly, note that many more epochs are rejected (~20% instead of
Step11: Finally, it should be noted that "dropped" epochs are not necessarily deleted
Step12: Alternatively, if rejection thresholds were not originally given to the
|
62 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow.compat.v1 as tf1
features = [[1., 1.5]]
labels = [[0.3]]
eval_features = [[4., 4.5]]
eval_labels = [[0.8]]
def _input_fn(params):
dataset = tf1.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.repeat()
return dataset.batch(params['batch_size'], drop_remainder=True)
def _eval_input_fn(params):
dataset = tf1.data.Dataset.from_tensor_slices((eval_features, eval_labels))
dataset = dataset.repeat()
return dataset.batch(params['batch_size'], drop_remainder=True)
def _model_fn(features, labels, mode, params):
logits = tf1.layers.Dense(1)(features)
loss = tf1.losses.mean_squared_error(labels=labels, predictions=logits)
optimizer = tf1.train.AdagradOptimizer(0.05)
train_op = optimizer.minimize(loss, global_step=tf1.train.get_global_step())
return tf1.estimator.tpu.TPUEstimatorSpec(mode, loss=loss, train_op=train_op)
cluster_resolver = tf1.distribute.cluster_resolver.TPUClusterResolver(tpu='')
print("All devices: ", tf1.config.list_logical_devices('TPU'))
tpu_config = tf1.estimator.tpu.TPUConfig(iterations_per_loop=10)
config = tf1.estimator.tpu.RunConfig(
cluster=cluster_resolver,
save_checkpoints_steps=None,
tpu_config=tpu_config)
estimator = tf1.estimator.tpu.TPUEstimator(
model_fn=_model_fn,
config=config,
train_batch_size=8,
eval_batch_size=8)
estimator.train(_input_fn, steps=1)
estimator.evaluate(_eval_input_fn, steps=1)
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
print("All devices: ", tf.config.list_logical_devices('TPU'))
dataset = tf.data.Dataset.from_tensor_slices(
(features, labels)).shuffle(10).repeat().batch(
8, drop_remainder=True).prefetch(2)
eval_dataset = tf.data.Dataset.from_tensor_slices(
(eval_features, eval_labels)).batch(1, drop_remainder=True)
strategy = tf.distribute.TPUStrategy(cluster_resolver)
with strategy.scope():
model = tf.keras.models.Sequential([tf.keras.layers.Dense(1)])
optimizer = tf.keras.optimizers.Adagrad(learning_rate=0.05)
model.compile(optimizer, "mse", steps_per_execution=10)
model.fit(dataset, epochs=5, steps_per_epoch=10)
model.evaluate(eval_dataset, return_dict=True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Migrate from TPUEstimator to TPUStrategy
Step2: TensorFlow 1
Step3: With those functions defined, create a tf.distribute.cluster_resolver.TPUClusterResolver that provides the cluster information, and a tf.compat.v1.estimator.tpu.RunConfig object. Along with the model function you have defined, you can now create a TPUEstimator. Here, you will simplify the flow by skipping checkpoint savings. Then, you will specify the batch size for both training and evaluation for the TPUEstimator.
Step4: Call TPUEstimator.train to begin training the model
Step5: Then, call TPUEstimator.evaluate to evaluate the model using the evaluation data
Step6: TensorFlow 2
Step7: Next, once your data is prepared, you will create a TPUStrategy, define a model, metrics, and an optimizer under the scope of this strategy.
Step8: With that, you are ready to train the model with the training dataset
Step9: Finally, evaluate the model using the evaluation dataset
|
63 | <ASSISTANT_TASK:>
Python Code:
import os
IS_COLAB_BACKEND = 'COLAB_GPU' in os.environ # this is always set on Colab, the value is 0 or 1 depending on GPU presence
if IS_COLAB_BACKEND:
from google.colab import auth
# Authenticates the Colab machine and also the TPU using your
# credentials so that they can access your private GCS buckets.
auth.authenticate_user()
!pip install -U pip install -U tensorboard_plugin_profile==2.3.0
%tensorflow_version 2.x
import tensorflow as tf
print("Tensorflow version " + tf.__version__)
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection
print('Running on TPU ', tpu.cluster_spec().as_dict()['worker'])
except ValueError:
raise BaseException('ERROR: Not connected to a TPU runtime; please see the previous cell in this notebook for instructions!')
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu)
import re
import numpy as np
from matplotlib import pyplot as plt
AUTO = tf.data.experimental.AUTOTUNE
IMAGE_SIZE = [331, 331]
batch_size = 16 * tpu_strategy.num_replicas_in_sync
gcs_pattern = 'gs://flowers-public/tfrecords-jpeg-331x331/*.tfrec'
validation_split = 0.19
filenames = tf.io.gfile.glob(gcs_pattern)
split = len(filenames) - int(len(filenames) * validation_split)
train_fns = filenames[:split]
validation_fns = filenames[split:]
def parse_tfrecord(example):
features = {
"image": tf.io.FixedLenFeature([], tf.string), # tf.string means bytestring
"class": tf.io.FixedLenFeature([], tf.int64), # shape [] means scalar
"one_hot_class": tf.io.VarLenFeature(tf.float32),
}
example = tf.io.parse_single_example(example, features)
decoded = tf.image.decode_jpeg(example['image'], channels=3)
normalized = tf.cast(decoded, tf.float32) / 255.0 # convert each 0-255 value to floats in [0, 1] range
image_tensor = tf.reshape(normalized, [*IMAGE_SIZE, 3])
one_hot_class = tf.reshape(tf.sparse.to_dense(example['one_hot_class']), [5])
return image_tensor, one_hot_class
def load_dataset(filenames):
# Read from TFRecords. For optimal performance, we interleave reads from multiple files.
records = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
return records.map(parse_tfrecord, num_parallel_calls=AUTO)
def get_training_dataset():
dataset = load_dataset(train_fns)
# Create some additional training images by randomly flipping and
# increasing/decreasing the saturation of images in the training set.
def data_augment(image, one_hot_class):
modified = tf.image.random_flip_left_right(image)
modified = tf.image.random_saturation(modified, 0, 2)
return modified, one_hot_class
augmented = dataset.map(data_augment, num_parallel_calls=AUTO)
# Prefetch the next batch while training (autotune prefetch buffer size).
return augmented.repeat().shuffle(2048).batch(batch_size).prefetch(AUTO)
training_dataset = get_training_dataset()
validation_dataset = load_dataset(validation_fns).batch(batch_size).prefetch(AUTO)
CLASSES = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
def display_one_flower(image, title, subplot, color):
plt.subplot(subplot)
plt.axis('off')
plt.imshow(image)
plt.title(title, fontsize=16, color=color)
# If model is provided, use it to generate predictions.
def display_nine_flowers(images, titles, title_colors=None):
subplot = 331
plt.figure(figsize=(13,13))
for i in range(9):
color = 'black' if title_colors is None else title_colors[i]
display_one_flower(images[i], titles[i], 331+i, color)
plt.tight_layout()
plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.show()
def get_dataset_iterator(dataset, n_examples):
return dataset.unbatch().batch(n_examples).as_numpy_iterator()
training_viz_iterator = get_dataset_iterator(training_dataset, 9)
# Re-run this cell to show a new batch of images
images, classes = next(training_viz_iterator)
class_idxs = np.argmax(classes, axis=-1) # transform from one-hot array to class number
labels = [CLASSES[idx] for idx in class_idxs]
display_nine_flowers(images, labels)
def create_model():
pretrained_model = tf.keras.applications.Xception(input_shape=[*IMAGE_SIZE, 3], include_top=False)
pretrained_model.trainable = True
model = tf.keras.Sequential([
pretrained_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(5, activation='softmax')
])
model.compile(
optimizer='adam',
loss = 'categorical_crossentropy',
metrics=['accuracy']
)
return model
with tpu_strategy.scope(): # creating the model in the TPUStrategy scope means we will train the model on the TPU
model = create_model()
model.summary()
def count_data_items(filenames):
# The number of data items is written in the name of the .tfrec files, i.e. flowers00-230.tfrec = 230 data items
n = [int(re.compile(r"-([0-9]*)\.").search(filename).group(1)) for filename in filenames]
return np.sum(n)
n_train = count_data_items(train_fns)
n_valid = count_data_items(validation_fns)
train_steps = count_data_items(train_fns) // batch_size
print("TRAINING IMAGES: ", n_train, ", STEPS PER EPOCH: ", train_steps)
print("VALIDATION IMAGES: ", n_valid)
EPOCHS = 12
start_lr = 0.00001
min_lr = 0.00001
max_lr = 0.00005 * tpu_strategy.num_replicas_in_sync
rampup_epochs = 5
sustain_epochs = 0
exp_decay = .8
def lrfn(epoch):
if epoch < rampup_epochs:
return (max_lr - start_lr)/rampup_epochs * epoch + start_lr
elif epoch < rampup_epochs + sustain_epochs:
return max_lr
else:
return (max_lr - min_lr) * exp_decay**(epoch-rampup_epochs-sustain_epochs) + min_lr
lr_callback = tf.keras.callbacks.LearningRateScheduler(lambda epoch: lrfn(epoch), verbose=True)
rang = np.arange(EPOCHS)
y = [lrfn(x) for x in rang]
plt.plot(rang, y)
print('Learning rate per epoch:')
# Load the TensorBoard notebook extension.
%load_ext tensorboard
# Get TPU profiling service address. This address will be needed for capturing
# profile information with TensorBoard in the following steps.
service_addr = tpu.get_master().replace(':8470', ':8466')
print(service_addr)
# Launch TensorBoard.
%tensorboard --logdir=gs://bucket-name # Replace the bucket-name variable with your own gcs bucket
history = model.fit(training_dataset, validation_data=validation_dataset,
steps_per_epoch=train_steps, epochs=EPOCHS, callbacks=[lr_callback])
final_accuracy = history.history["val_accuracy"][-5:]
print("FINAL ACCURACY MEAN-5: ", np.mean(final_accuracy))
def display_training_curves(training, validation, title, subplot):
ax = plt.subplot(subplot)
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['training', 'validation'])
plt.subplots(figsize=(10,10))
plt.tight_layout()
display_training_curves(history.history['accuracy'], history.history['val_accuracy'], 'accuracy', 211)
display_training_curves(history.history['loss'], history.history['val_loss'], 'loss', 212)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Updating tensorboard_plugin_profile
Step2: Enabling and testing the TPU
Step3: Input data
Step4: Let's take a peek at the training dataset we've created
Step5: Model
Step6: Training
Step7: Calculate and show a learning rate schedule. We start with a fairly low rate, as we're using a pre-trained model and don't want to undo all the fine work put into training it.
Step8: Train the model. While the first epoch will be quite a bit slower as we must XLA-compile the execution graph and load the data, later epochs should complete in ~5s.
Step9: The TensorBoard UI is displayed in a browser window. In this colab, perform the following steps to prepare to capture profile information.
|
64 | <ASSISTANT_TASK:>
Python Code:
# RUN THIS CELL to perform standard imports:
import spacy
nlp = spacy.load('en_core_web_sm')
# Enter your code here:
with open('../TextFiles/owlcreek.txt') as f:
doc = nlp(f.read())
# Run this cell to verify it worked:
doc[:36]
len(doc)
sents = [sent for sent in doc.sents]
len(sents)
print(sents[1].text)
# NORMAL SOLUTION:
for token in sents[1]:
print(token.text, token.pos_, token.dep_, token.lemma_)
# CHALLENGE SOLUTION:
for token in sents[1]:
print(f'{token.text:{15}} {token.pos_:{5}} {token.dep_:{10}} {token.lemma_:{15}}')
# Import the Matcher library:
from spacy.matcher import Matcher
matcher = Matcher(nlp.vocab)
# Create a pattern and add it to matcher:
pattern = [{'LOWER': 'swimming'}, {'IS_SPACE': True, 'OP':'*'}, {'LOWER': 'vigorously'}]
matcher.add('Swimming', None, pattern)
# Create a list of matches called "found_matches" and print the list:
found_matches = matcher(doc)
print(found_matches)
print(doc[1265:1290])
print(doc[3600:3615])
for sent in sents:
if found_matches[0][1] < sent.end:
print(sent)
break
for sent in sents:
if found_matches[1][1] < sent.end:
print(sent)
break
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Create a Doc object from the file owlcreek.txt<br>
Step2: 2. How many tokens are contained in the file?
Step3: 3. How many sentences are contained in the file?<br>HINT
Step4: 4. Print the second sentence in the document<br> HINT
Step5: 5. For each token in the sentence above, print its text, POS tag, dep tag and lemma<br>
Step6: 6. Write a matcher called 'Swimming' that finds both occurrences of the phrase "swimming vigorously" in the text<br>
Step7: 7. Print the text surrounding each found match
Step8: EXTRA CREDIT
|
65 | <ASSISTANT_TASK:>
Python Code:
import os
import sys
import inspect
import numpy as np
import datetime as dt
import time
import pytz
import pandas as pd
import pdb
import tmpo
#import charts
from opengrid import config
from opengrid.library import houseprint
c=config.Config()
DEV = c.get('env', 'type') == 'dev' # DEV is True if we are in development environment, False if on the droplet
if not DEV:
# production environment: don't try to display plots
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import HourLocator, DateFormatter, AutoDateLocator
try:
if os.path.exists(c.get('tmpo', 'data')):
path_to_tmpo_data = c.get('tmpo', 'data')
except:
path_to_tmpo_data = None
if DEV:
if c.get('env', 'plots') == 'inline':
%matplotlib inline
else:
%matplotlib qt
else:
pass # don't try to render plots
plt.rcParams['figure.figsize'] = 12,8
number_of_days = 7
hp = houseprint.load_houseprint_from_file('new_houseprint.pkl')
hp.init_tmpo(path_to_tmpo_data=path_to_tmpo_data)
start = pd.Timestamp(time.time() - number_of_days*86400, unit='s')
sensors = hp.get_sensors()
#sensors.remove('b325dbc1a0d62c99a50609e919b9ea06')
for sensor in sensors:
s = sensor.get_data(head=start, resample='s')
try:
s = s.resample(rule='60s', how='max')
s = s.diff()*3600/60
# plot with charts (don't show it) and save html
charts.plot(pd.DataFrame(s), stock=True,
save=os.path.join(c.get('data', 'folder'), 'figures', 'TimeSeries_'+sensor.key+'.html'), show=True)
except:
pass
len(sensors)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Script settings
Step2: We create one big dataframe, the columns are the sensors
|
66 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import os
import time
import meshcat
import meshcat.geometry as g
import meshcat.transformations as tf
# Create a new visualizer
vis = meshcat.Visualizer()
vis.open()
vis.url()
vis.set_object(g.Box([0.2, 0.2, 0.2]))
for theta in np.linspace(0, 2 * np.pi, 200):
vis.set_transform(tf.rotation_matrix(theta, [0, 0, 1]))
time.sleep(0.005)
vis.jupyter_cell()
vis.set_object(g.Box([0.1, 0.1, 0.2]))
vis.delete()
vis.set_object(g.Box([1, 1, 2]),g.MeshPhongMaterial(map=g.TextTexture('Hello, world!')))
vis.delete()
vis.set_object(g.SceneText('Hello, world!',font_size=100))
Rz = tf.rotation_matrix(np.pi/2, [0, 0, 1])
Ry = tf.rotation_matrix(np.pi/2, [0, 1, 0])
vis.set_transform(Ry.dot(Rz))
for i in np.linspace(8,2,10):
vis.set_object(g.SceneText('Hello, world!',width=2*i,height=2*i,font_size=300))
time.sleep(0.05)
vis["robot"].set_object(g.Box([0.15, 0.35, 0.4]))
vis["robot"]["head"].set_object(g.Box([0.2, 0.2, 0.2]))
vis["robot"]["head"].set_transform(tf.translation_matrix([0, 0, 0.32]))
for x in np.linspace(0, np.pi, 100):
vis["robot"].set_transform(tf.translation_matrix([np.sin(x), 0, 0]))
time.sleep(0.01)
for x in np.linspace(0, 2 * np.pi, 100):
# vis["robot/head"] is a shorthand for vis["robot"]["head"]
vis["robot/head"].set_transform(
tf.translation_matrix([0, 0, 0.32]).dot(
tf.rotation_matrix(x, [0, 0, 1])))
time.sleep(0.01)
vis["robot/head"].delete()
vis["robot"].delete()
vis["sphere"].set_object(g.Sphere(0.1),
g.MeshLambertMaterial(
color=0xff22dd,
reflectivity=0.8))
vis["sphere"].delete()
vis["robots/valkyrie/head"].set_object(
g.ObjMeshGeometry.from_file(
os.path.join(meshcat.viewer_assets_path(), "data/head_multisense.obj")),
g.MeshLambertMaterial(
map=g.ImageTexture(
image=g.PngImage.from_file(
os.path.join(meshcat.viewer_assets_path(), "data/HeadTextureMultisense.png"))
)
)
)
verts = np.random.rand(3, 100000)
vis["perception/pointclouds/random"].set_object(
g.PointCloud(position=verts, color=verts))
vis["perception/pointclouds/random"].set_transform(
tf.translation_matrix([0, 1, 0]))
vis["robots"].delete()
vis["perception"].delete()
cart_pole = vis["cart_pole"]
cart_pole.delete()
cart = cart_pole["cart"]
pivot = cart["pivot"]
pole = pivot["pole"]
cart.set_object(g.Box([0.5, 0.3, 0.2]))
pole.set_object(g.Box([1, 0.05, 0.05]))
pole.set_transform(tf.translation_matrix([0.5, 0, 0]))
pivot.set_transform(tf.rotation_matrix(-np.pi/2, [0, 1, 0]))
for x in np.linspace(-np.pi, np.pi, 200):
cart.set_transform(tf.translation_matrix([np.sin(x), 0, 0]))
pivot.set_transform(tf.rotation_matrix(x / 4 - np.pi / 2, [0, 1, 0]))
time.sleep(0.01)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: By default, creating the Visualizer will start up a meshcat server for you in the background. The easiest way to open the visualizer is with its open method
Step2: If vis.open() does not work for you, you can also point your browser to the server's URL
Step3: To create a 3D object, we use the set_object method
Step4: And to move that object around, we use set_transform
Step5: MeshCat also supports embedding a 3D view inside a Jupyter notebook cell
Step6: Notice how the 3D scene displayed in the Jupyter cell matches the one in the external window. The meshcat server process remembers the objects and transforms you've sent, so opening a new browser pointing to the same URL should give you the same scene.
Step7: We can also delete the box
Step8: MeshCat supports simple 2d texts rendering. For example, to write 2d texts onto a geometry
Step9: It is also possible to simple write 'floating' texts onto a scene without attaching it to an object (e.g., for scene description)
Step10: and just like the usual geometry/object, the scene texts can be rotated
Step11: Under the hood, the SceneTexts are written onto a Plane geometry, and the plane size can be specified by width and height. These two parameters affect the texts size when the font_size itself is set too large; they would force a font downsizing when rendering so as to fit all the texts within the specified plane.
Step12: The Scene Tree
Step13: Now let's give the robot a head
Step14: We can move the entire robot by setting the transform of the /robot path
Step15: And we can move just the head by setting the transform of /robot/head
Step16: We can delete the head...
Step17: ...or the entire robot
Step18: Other Geometries
Step19: MeshCat can load .obj, .dae, and .stl meshes via the ObjMeshGeometry, DaeMeshGeometry, and StlMeshGeometry types respectively
Step20: The PointCloud() function is a helper to create a Points object with a PointsGeometry and PointsMaterial
Step21: Cart-Pole
|
67 | <ASSISTANT_TASK:>
Python Code:
# Let's find out the number of neighbors that individual #7 has.
G.neighbors(9)
# Possible Answers:
sorted([n for n in G.nodes()], key=lambda x:len(G.neighbors(x)), reverse=True)
sorted([(n, G.neighbors(n)) for n in G.nodes()], key=lambda x: len(x[1]), reverse=True)
nx.degree_centrality(G)
# Possible Answers:
fig = plt.figure(0)
degree_centralities = [v for k, v in nx.degree_centrality(G).items()]
plt.hist(degree_centralities)
plt.title('Degree Centralities')
fig = plt.figure(1)
neighbors = [len(G.neighbors(node)) for node in G.nodes()]
plt.hist(neighbors)
plt.title('Number of Neighbors')
fig = plt.figure(2)
plt.scatter(degree_centralities, neighbors)
plt.xlabel('Degree Centralities')
plt.ylabel('Number of Neighbors')
nx.draw(G, with_labels=True)
def path_exists(node1, node2, G):
This function checks whether a path exists between two nodes (node1, node2) in graph G.
queue = [node1]
destination = node2
for node in queue:
neighbors = G.neighbors(node)
if destination in neighbors:
print('Path exists between nodes {0} and {1}'.format(node1, node2))
return True
break
else:
queue.remove(node)
queue.extend(neighbors)
if len(queue) == 0:
print('Path does not exist between nodes {0} and {1}'.format(node1, node2))
return False
path_exists(18, 5, G)
path_exists(29, 26, G)
nx.has_path(G, 18, 5)
nx.draw(G, with_labels=True)
nx.shortest_path(G, 4, 14)
# Possible Answer:
def extract_path_edges(G, source, target):
# Check to make sure that a path does exists between source and target.
if nx.has_path(G, source, target) == True:
nodes = nx.shortest_path(G, source, target)
newG = G.subgraph(nodes)
return newG
if nx.has_path(G, source, target) == False:
raise Exception('Path does not exist between nodes {0} and {1}.'.format(source, target))
newG = extract_path_edges(G, 1, 14)
nx.draw(newG, with_labels=True)
# Possible Answer
def extract_neighbor_edges(G, node):
neighbors = G.neighbors(node)
newG = nx.Graph()
for n1, n2 in G.edges():
if (n1 == node and n2 in neighbors) or (n1 in neighbors and n2 == node):
newG.add_edge(n1, n2)
return newG
def extract_neighbor_edges2(G, node):
neighbors = G.neighbors(node)
newG = nx.Graph()
for neighbor in neighbors:
if (node, neighbor) in G.edges() or (neighbor, node) in G.edges():
newG.add_edge(node, neighbor)
return newG
fig = plt.figure(0)
newG = extract_neighbor_edges(G, 19)
nx.draw(newG, with_labels=True)
fig = plt.figure(1)
newG = extract_neighbor_edges2(G, 19)
nx.draw(newG, with_labels=True)
# Possible answer to Question 1:
# All we need here is the length of the path.
def compute_transmission_time(G, source, target):
Fill in code below.
length = nx.shortest_path_length(G, source, target)
time = sum([i for i in range(1, length+1)])
return time
compute_transmission_time(G, 14, 4)
# Possible answer to Question 2:
# We need to know the length of every single shortest path between every pair of nodes.
# If we don't put a source and target into the nx.shortest_path_length(G) function call, then
# we get a dictionary of dictionaries, where all source-->target-->lengths are shown.
lengths = []
times = []
for source, sink_length in nx.shortest_path_length(G).items():
for sink, length in sink_length.items():
times.append(sum(range(1, length+1)))
lengths.append(length)
plt.figure(0)
plt.bar(Counter(lengths).keys(), Counter(lengths).values())
plt.figure(1)
plt.bar(Counter(times).keys(), Counter(times).values())
# Possible answer to Question 3:
from collections import Counter
from itertools import combinations
totals = Counter()
for n1, n2 in combinations(G.nodes(), 2):
if nx.has_path(G, n1, n2):
for path in nx.all_shortest_paths(G, n1, n2):
path.pop(0)
path.pop(-1)
totals = totals + Counter(path)
totals
# Possible answer to Question 4:
plt.bar(totals.keys(), totals.values())
btws = nx.betweenness_centrality(G, normalized=False)
plt.bar(btws.keys(), btws.values())
# Possible answer:
deg_centrality = nx.degree_centrality(G)
btw_centrality = nx.betweenness_centrality(G)
deg_cent_sorted = [i[1] for i in sorted(zip(deg_centrality.keys(), deg_centrality.values()))]
btw_cent_sorted = [i[1] for i in sorted(zip(btw_centrality.keys(), btw_centrality.values()))]
plt.scatter(deg_cent_sorted, btw_cent_sorted)
nx.draw(nx.barbell_graph(5, 1))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise
Step2: Approach 2
Step3: If you inspect the dictionary closely, you will find that node 19 is the one that has the highest degree centrality, just as we had measured by counting the number of neighbors.
Step4: Paths in a Network
Step6: Let's say we wanted to find the shortest path between two nodes. How would we approach this? One approach is what one would call a breadth-first search (http
Step7: And testing the function on a few test cases
Step8: If you write an algorithm that runs breadth-first, the recursion pattern is likely to follow what we have done above. If you do depth-first, the recursion pattern is likely to look a bit different. We can figure that out together later, if there is time.
Step9: NetworkX also has other shortest path algorithms implemented.
Step10: nx.shortest_path(G, source, target) gives us a list of nodes that exist within one of the shortest paths between the two nodes. (Not all paths are guaranteed to be found.)
Step11: Incidentally, the node list is in order as well - we will travel through 19 and 17 in that order to get from 14 from 4.
Step12: Exercise
Step14: Challenge Exercises
Step15: Hubs Revisited
Step16: Exercise
Step17: Think about it...
|
68 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as ss
import sympy as sp
sns.set_context('notebook')
%matplotlib inline
x = np.linspace(.01, .99, num=1e3)
doppler = lambda x : np.sqrt(x * (1 - x)) * np.sin(1.2 * np.pi / (x + .05))
plt.plot(x, doppler(x))
plt.show()
from sympy.utilities.lambdify import lambdify
from IPython.display import display, Math, Latex
u = sp.Symbol('u')
sym_doppler = lambda x : (x * (1 - x))**.5 * sp.sin(1.2 * sp.pi / (x + .05))
d_doppler = sym_doppler(u).diff()
dd_doppler = sym_doppler(u).diff(n=2)
display(Math(sp.latex(d_doppler)))
d_doppler = np.vectorize(lambdify(u, d_doppler))
dd_doppler = np.vectorize(lambdify(u, dd_doppler))
plt.plot(x, d_doppler(x))
plt.show()
def f_rtexp(x, lmbd=1, b=1):
return np.exp(-x / lmbd) / lmbd / (1 - np.exp(-b / lmbd))
def f_ltexp(x, lmbd=1, b=1):
return np.exp(x / lmbd) / lmbd / (np.exp(b / lmbd) - 1)
def right_trunc_exp(lmbd=1, b=1, size=1000):
X = np.sort(np.random.rand(size))
return - lmbd * np.log(1 - X * (1 - np.exp(-b / lmbd)))
def left_trunc_exp(lmbd=1, b=1, size=1000):
X = np.sort(np.random.rand(size))
return lmbd * np.log(1 - X * (1 - np.exp(b / lmbd)))
# Equivalent using SciPy:
# Y = ss.truncexpon.rvs(1, size=1000)
lmbd = .2
Y1 = right_trunc_exp(lmbd=lmbd)
Y2 = left_trunc_exp(lmbd=lmbd)
density1 = ss.gaussian_kde(Y1)
density2 = ss.gaussian_kde(Y2)
U = np.linspace(0, 1, num=1e3)
fig = plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.hist(Y1, normed=True, bins=20, label='Histogram')
plt.plot(U, f_rtexp(U, lmbd=lmbd), lw=4, color=[0, 0, 0], label='True density')
plt.plot(U, density1(U), lw=4, color='red', label='Kernel density')
plt.legend()
plt.title('Right truncated')
plt.subplot(1, 2, 2)
plt.hist(Y2, normed=True, bins=20, label='Histogram')
plt.plot(U, f_ltexp(U, lmbd=lmbd), lw=4, color=[0, 0, 0], label='True density')
plt.plot(U, density2(U), lw=4, color='red', label='Kernel density')
plt.legend()
plt.title('Left truncated')
plt.show()
def indicator(x):
return np.asfarray((np.abs(x) <= 1.) & (np.abs(x) >= 0.))
def kernel(x, ktype='Truncated'):
if ktype == 'Truncated':
return .5 * indicator(x)
if ktype == 'Epanechnikov':
return 3./4. * (1 - x**2) * indicator(x)
if ktype == 'Biweight':
return 15./16. * (1 - x**2)**2 * indicator(x)
if ktype == 'Triweight':
return 35./36. * (1 - x**2)**3 * indicator(x)
if ktype == 'Gaussian':
return 1./np.sqrt(2. * np.pi) * np.exp(- .5 * x**2)
def roughness(ktype='Truncated'):
if ktype == 'Truncated':
return 1./2.
if ktype == 'Epanechnikov':
return 3./5.
if ktype == 'Biweight':
return 5./7.
if ktype == 'Triweight':
return 350./429.
if ktype == 'Gaussian':
return np.pi**(-.5)/2.
def sigmak(ktype='Truncated'):
if ktype == 'Truncated':
return 1./3.
if ktype == 'Epanechnikov':
return 1./5.
if ktype == 'Biweight':
return 1./7.
if ktype == 'Triweight':
return 1./9.
if ktype == 'Gaussian':
return 1.
x = np.linspace(0., 2., 100)
names = ['Truncated', 'Epanechnikov', 'Biweight', 'Triweight', 'Gaussian']
for name in names:
plt.plot(x, kernel(x, ktype=name), label=name, lw=2)
plt.legend()
plt.show()
def weight(U, X, h=.1, ktype='Truncated'):
# X - N-array
# U - M-array
# XmU - M*N-array
XmU = (X - np.atleast_2d(U).T) / h
# K - M*N-array
K = kernel(XmU, ktype)
# K.sum(1) - M-array
# K.T - N*M-array
# K.T / K.sum(1) - N*M-array
return (K.T / K.sum(1)).T
def NW(U, X, Y, h=.1, ktype='Truncated'):
return np.dot(weight(U, X, h, ktype), Y)
def generate_data(N=1000, M=500, lmbd=1, trunc='left'):
if trunc == 'left':
X = left_trunc_exp(lmbd=lmbd, size=N)
if trunc == 'right':
X = right_trunc_exp(lmbd=lmbd, size=N)
e = np.random.normal(0, .1, N)
Y = doppler(X) + e
U = np.linspace(.01, .99, M)
return X, Y, U
X, Y, U = generate_data()
# Nadaraya-Watson estimator
Yhat = NW(U, X, Y, h=.05, ktype='Truncated')
fig = plt.figure(figsize=(10, 6))
plt.plot(U, doppler(U), lw=2, color='blue', label='True')
plt.plot(U, Yhat, lw=2, color='red', label='Fitted')
plt.scatter(X, Y, s=15, lw=.5, facecolor='none', label='Realized')
plt.xlim([0, 1])
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.show()
def fx(x, lmbd=1, b=1):
return sp.exp(-x / lmbd) / lmbd / (1 - sp.exp(-b / lmbd))
dfx = fx(u).diff()
fx = np.vectorize(lambdify(u, fx(u)))
dfx = np.vectorize(lambdify(u, dfx))
def bias(U, etype='NW', h=.05, ktype='Gaussian'):
if etype == 'NW':
bias = .5 * dd_doppler(U) + d_doppler(U) * dfx(U) / fx(U)
if etype == 'LL':
bias = .5 * dd_doppler(U) * fx(U)
return bias * h**2 * sigmak(ktype)
h = .05
ktype = 'Gaussian'
fig = plt.figure(figsize=(15, 6))
X, Y, U = generate_data()
Yhat = NW(X, X, Y, h=h, ktype=ktype)
Ynobias = Yhat - bias(X, etype='NW', h=h, ktype=ktype)
plt.plot(X, doppler(X), lw=2, color='blue', label='True')
plt.plot(X, Yhat, lw=2, color='red', label='Fitted')
plt.scatter(X, Y, s=15, lw=.5, facecolor='none', label='Realized')
plt.plot(X, Ynobias, lw=2, color='green', label='No Bias')
plt.xlim([0, 1])
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.show()
def LL(U, X, Y, h=.1, ktype='Truncated'):
# X - N-array
# U - M-array
# K - M*N-array
W = weight(U, X, h, ktype)
alpha = np.empty(U.shape[0])
beta = np.empty(U.shape[0])
for i in range(U.shape[0]):
# N*N-array
K = np.diag(W[i])
# N-array
Z1 = (X - U[i]) / h
Z0 = np.ones(Z1.shape)
# 2*N-array
Z = np.vstack([Z0, Z1]).T
# 2*2-array
A = np.dot(Z.T, np.dot(K, Z))
# 2-array
B = np.dot(Z.T, np.dot(K, Y))
# 2-array
coef = np.dot(np.linalg.inv(A), B)
alpha[i] = coef[0]
beta[i] = coef[1]
return alpha, beta
X, Y, U = generate_data()
Yhat, dYhat = LL(U, X, Y, h=.05, ktype='Gaussian')
fig = plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
plt.plot(U, doppler(U), lw=2, color='blue', label='True')
plt.plot(U, Yhat, lw=2, color='red', label='Fitted')
plt.scatter(X, Y, s=15, lw=.5, facecolor='none', label='Realized')
plt.xlim([0, 1])
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.title('Doppler function')
plt.subplot(1, 2, 2)
plt.plot(U, d_doppler(U), lw=2, color='blue', label='True')
plt.plot(U, dYhat, lw=2, color='red', label='Fitted')
plt.xlim([0, 1])
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.title('Doppler function derivative')
plt.show()
X1, Y1, U = generate_data(lmbd=.1, trunc='left')
X2, Y2, U = generate_data(lmbd=.1, trunc='right')
ktype = 'Gaussian'
h = .05
Y1hat = NW(U, X1, Y1, h=h, ktype=ktype)
Y2hat = NW(U, X2, Y2, h=h, ktype=ktype)
fig = plt.figure(figsize=(15, 10))
plt.subplot(2, 2, 1)
plt.hist(X1, normed=True, bins=20, label='Histogram')
plt.ylabel('X1')
plt.subplot(2, 2, 2)
plt.hist(X2, normed=True, bins=20, label='Histogram')
plt.ylabel('X2')
plt.subplot(2, 2, 3)
plt.plot(U, doppler(U), lw=2, color='blue', label='True')
plt.plot(U, Y1hat, lw=2, color='red', label='Fitted')
plt.scatter(X1, Y1, s=15, lw=.5, facecolor='none', label='Realized')
plt.xlim([0, 1])
plt.xlabel('X1')
plt.ylabel('Y1')
plt.legend()
plt.subplot(2, 2, 4)
plt.plot(U, doppler(U), lw=2, color='blue', label='True')
plt.plot(U, Y2hat, lw=2, color='red', label='Fitted')
plt.scatter(X2, Y2, s=15, lw=.5, facecolor='none', label='Realized')
plt.xlim([0, 1])
plt.xlabel('X2')
plt.ylabel('Y2')
plt.legend()
plt.show()
def error(Y, X, h, ktype):
ehat = np.empty(X.shape)
for i in range(X.shape[0]):
ehat[i] = Y[i] - NW(X[i], np.delete(X, i), np.delete(Y, i), h=h, ktype=ktype)
return np.array(ehat)
N = 500
X, Y, U = generate_data(N=N, lmbd=.2)
h = .05
ktype = 'Epanechnikov'
Yhat = NW(U, X, Y, h=h, ktype=ktype)
ehat = error(Y, X, h, ktype)
sigma2hat = NW(U, X, ehat**2, h=.1, ktype=ktype)
fxhat = ss.gaussian_kde(X)(U)
V2hat = roughness(ktype) * sigma2hat / fxhat / N / h
shat = V2hat**.5
fig = plt.figure(figsize = (10, 10))
plt.subplot(3, 1, 1)
plt.scatter(X, Y, s=15, lw=.5, facecolor='none', label='Realized')
#plt.plot(U, doppler(U), lw=2, color='blue', label='True')
plt.fill_between(U, Yhat - 2*shat, Yhat + 2*shat, lw=0, color='red', alpha=.2, label='+2s')
plt.plot(U, Yhat, lw=2, color='red', label='Fitted')
plt.ylabel('Y')
plt.legend()
plt.xlim([0, 1])
ylim = plt.gca().get_ylim()
plt.title('Data')
plt.subplot(3, 1, 2)
plt.scatter(X, ehat, s=15, lw=.5, facecolor='none', label='Errors')
plt.axhline(color='black')
plt.ylim(ylim)
plt.xlim([0, 1])
plt.title('Errors')
plt.subplot(3, 1, 3)
plt.plot(U, sigma2hat**.5, lw=2, color='red', label='Estimate')
plt.plot(U, .1 * np.ones(U.shape), lw=2, color='blue', label='True')
plt.ylim([0, .4])
plt.xlim([0, 1])
plt.legend()
plt.xlabel('X')
plt.title('Conditional variance')
plt.tight_layout()
plt.show()
N = 500
X, Y, U = generate_data(N=N)
ktype = 'Gaussian'
H = np.linspace(.001, .05, 100)
CV = np.array([])
for h in H:
ehat = error(Y, X, h, ktype)
CV = np.append(CV, np.mean(ehat**2))
h = H[CV.argmin()]
Yhat = NW(U, X, Y, h=h, ktype=ktype)
ehat = error(Y, X, h, ktype)
sigma2hat = NW(U, X, ehat ** 2, h=h, ktype=ktype)
fxhat = ss.gaussian_kde(X)(U)
V2hat = roughness(ktype) * sigma2hat / fxhat / N / h
shat = V2hat**.5
plt.figure(figsize=(10, 5))
plt.plot(H, CV)
plt.scatter(h, CV.min(), facecolor='none', lw=2, s=100)
plt.xlim([H.min(), H.max()])
plt.xlabel('Bandwidth, h')
plt.ylabel('cross-validation, CV')
plt.show()
plt.figure(figsize=(10, 5))
#plt.plot(U, doppler(U), lw=2, color='blue', label='True')
plt.fill_between(U, Yhat - 2*shat, Yhat + 2*shat, lw=0, color='red', alpha=.2, label='+2s')
plt.plot(U, Yhat, lw=2, color='red', label='Fitted')
plt.scatter(X, Y, s=15, lw=.5, facecolor='none', label='Realized')
plt.xlim([0, 1])
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Doppler function
Step2: Derivative of Doppler function
Step3: Left and right truncated exponentials
Step4: Draw the densitites
Step5: Kernels
Step6: Nadaraya-Watson (NW) or local constant estimator
Step7: Nadaraya-Watson (NW)
Step8: Generate data
Step9: Perform estimation and plot the results
Step10: Bias correction
Step11: Local Linear (LL) estimator
Step12: Perform estimation and plot the results
Step13: Comparison for different DGP of X
Step14: Conditional variance and confidence intervals
Step15: Estimate variance
Step16: Plot the results
Step17: Bandwidth selection
Step18: Plot the (optimized) fit
|
69 | <ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = sorted(set(text))
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
encoded[:100]
len(vocab)
def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the number of characters per batch and number of batches we can make
characters_per_batch = n_seqs * n_steps
n_batches = len(arr)//characters_per_batch
# Keep only enough characters to make full batches
arr = arr[:n_batches * characters_per_batch]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n+n_steps]
# The targets, shifted by one
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
yield x, y
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
def build_cell(lstm_size, keep_prob):
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([build_cell(lstm_size, keep_prob) for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
x: Input tensor
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# That is, the shape should be batch_size*num_steps rows by lstm_size columns
seq_output = tf.concat(lstm_output, axis=1)
x = tf.reshape(seq_output, [-1, in_size])
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
softmax_w = tf.Variable(tf.truncated_normal((in_size, out_size), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name='predictions')
return out, logits
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per batch_size per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape())
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN and collect the outputs
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
batch_size = 100 # Sequences per batch
num_steps = 100 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.001 # Learning rate
keep_prob = 0.5 # Dropout keep probability
epochs = 20
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
tf.train.get_checkpoint_state('checkpoints')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
Step2: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
Step3: And we can see the characters encoded as integers.
Step4: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
Step5: Making training mini-batches
Step6: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
Step7: If you implemented get_batches correctly, the above output should look something like
Step8: LSTM Cell
Step9: RNN Output
Step10: Training loss
Step11: Optimizer
Step12: Build the network
Step13: Hyperparameters
Step14: Time for training
Step15: Saved checkpoints
Step16: Sampling
Step17: Here, pass in the path to a checkpoint and sample from the network.
|
70 | <ASSISTANT_TASK:>
Python Code:
#$HIDE_INPUT$
from google.cloud import bigquery
# Create a "Client" object
client = bigquery.Client()
# Construct a reference to the "hacker_news" dataset
dataset_ref = client.dataset("hacker_news", project="bigquery-public-data")
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
# Construct a reference to the "comments" table
table_ref = dataset_ref.table("comments")
# API request - fetch the table
table = client.get_table(table_ref)
# Preview the first five lines of the "comments" table
client.list_rows(table, max_results=5).to_dataframe()
# Query to select comments that received more than 10 replies
query_popular =
SELECT parent, COUNT(id)
FROM `bigquery-public-data.hacker_news.comments`
GROUP BY parent
HAVING COUNT(id) > 10
# Set up the query (cancel the query if it would use too much of
# your quota, with the limit set to 10 GB)
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
query_job = client.query(query_popular, job_config=safe_config)
# API request - run the query, and convert the results to a pandas DataFrame
popular_comments = query_job.to_dataframe()
# Print the first five rows of the DataFrame
popular_comments.head()
# Improved version of earlier query, now with aliasing & improved readability
query_improved =
SELECT parent, COUNT(1) AS NumPosts
FROM `bigquery-public-data.hacker_news.comments`
GROUP BY parent
HAVING COUNT(1) > 10
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
query_job = client.query(query_improved, job_config=safe_config)
# API request - run the query, and convert the results to a pandas DataFrame
improved_df = query_job.to_dataframe()
# Print the first five rows of the DataFrame
improved_df.head()
query_good =
SELECT parent, COUNT(id)
FROM `bigquery-public-data.hacker_news.comments`
GROUP BY parent
query_bad =
SELECT author, parent, COUNT(id)
FROM `bigquery-public-data.hacker_news.comments`
GROUP BY parent
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Let's use the table to see which comments generated the most replies. Since
Step3: Now that our query is ready, let's run it and store the results in a pandas DataFrame
Step5: Each row in the popular_comments DataFrame corresponds to a comment that received more than ten replies. For instance, the comment with ID 801208 received 56 replies.
Step7: Now you have the data you want, and it has descriptive names. That's good style.
Step9: Note that there are two variables
|
71 | <ASSISTANT_TASK:>
Python Code:
import sys
sys.path.insert(0, './code')
# Go into the subdirectory
from thinkbayes import Pmf
# Grab the thinkbayes script
help(Pmf)
# What is this object?
pmf = Pmf()
# intialize the object
for x in [1,2,3,4,5,6]:
# for x in array
pmf.Set(x, 1/6.0)
# Set the frequency for each x
pmf.Print()
word_list = ['hi', 'the', 'bye', 'hi', 'football', 'sky']
pmf = Pmf()
for word in word_list:
pmf.Incr(word, 1)
pmf.Print()
pmf.Normalize()
pmf.Print()
# wow!
print(pmf.Prob('the'))
pmf = Pmf()
# Reinitialize the Pmf()
pmf.Set('Bowl 1', 0.5)
pmf.Set('Bowl 2', 0.5)
# Set up the prior distribution; 50/50 odds
pmf.Print()
# Show us what's in there so far
pmf.Mult('Bowl 1', 0.75)
pmf.Mult('Bowl 2', 0.5)
# Update with the vanilla likelihoods
pmf.Print()
# Where are we at now?
pmf.Normalize()
pmf.Normalize()
pmf.Prob('Bowl 1')
# Odds of getting the Vanilla cookie from Bowl 1?
pmf.Prob('Bowl 2')
# Odds of getting the Vanilla cookie from Bowl 1?
class Cookie(Pmf):
A map from string bowl ID to probablity.
def __init__(self, hypos):
Initialize self.
hypos: sequence of string bowl IDs
Pmf.__init__(self)
# Intializie the Pmf object from before
for hypo in hypos:
#self.Set(hypo, 1)
# For hypo in in array, set to 1
# For learning, let's see what happens with Pmf.Incr()
# Yields the same result
self.Incr(hypo, 1)
self.Normalize()
#Renormalize after all the new hypotheses
mixes = {
'Bowl 1':dict(vanilla=0.75, chocolate=0.25),
'Bowl 2':dict(vanilla=0.5, chocolate=0.5),
}
# Mix data as provided by the problem.
# Refresher:
# * Bowl 1 contains 30 vanilla cookies and 10 chocolate cookies.
# * Bowl 2 contains 20 of each (10 vanilla, 10 chocolate)
def Likelihood(self, data, hypo):
The likelihood of the data under the hypothesis.
data: string cookie type
hypo: string bowl ID
mix = self.mixes[hypo]
# Search for the mix of a given hypo ('Bowl 1' or 'Bowl 2')
like = mix[data]
# Likelihood of the prior given the current data in the mixes dict
return like
# Return the likelihood
def Update(self, data):
Updates the PMF with new data.
data: string cookie type
for hypo in self.Values():
# For every hypo in the current prior distribution
like = self.Likelihood(data, hypo)
# Get the likelihood value using the Likelihood() method above
self.Mult(hypo, like)
# Multiple the prior by the new Likelihood
self.Normalize()
# Renormalize after all the new updates
hypos = ['Bowl 1', 'Bowl 2']
pmf = Cookie(hypos)
# Run the Cookie object on our hypothesis, using __init__ to
# generate priors
pmf.Print()
# Show us the current distribution
Cookie.mixes
pmf.Update('vanilla')
# Update using all 'vanilla' entries in the dictionary
pmf.Print()
# Show us the posterior distribution (post Update via Likelihood)
for hypo, prob in pmf.Items():
print(hypo, prob)
dataset = ['vanilla', 'chocolate', 'vanilla']
# 3 draws, here are the results
for data in dataset:
pmf.Update(data)
# Update our pmf using the results of our draws
pmf.Print()
# What's the new distribution?
# More refined with new information
class Monty(Pmf):
Map from string location of car to probability
def __init__(self, hypos):
Initialize the prior distribution using the hp
hypos: sequence of hypotheses
Pmf.__init__(self)
for hypo in hypos:
self.Set(hypo, 1)
self.Normalize()
def Update(self, data):
Updates each hypothesis based on the data.
data: any representation of the data
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
self.Normalize()
def Likelihood(self, data, hypo):
Compute the likelihood of the data under the hypothesis.
hypo: string name of the door where the prize is
data: string name of the door Monty opened
if hypo == data:
return 0
elif hypo == 'A':
return 0.5
else:
return 1
hypos = 'ABC'
pmf = Monty(hypos)
pmf.Print()
# Current prior; all have the same odds
data = 'B'
# Opened Door B
pmf.Update(data)
# Update Prior with the Likelihoods
pmf.Print()
# Posterior Distribution
# Our opened door B in data was not the car, so the odds for Car behind B are now 0
def Likelihood(self, data, hypo):
Compute the likelihood of the data under the hypothesis.
hypo: string name of the door where the prize is
data: string name of the door Monty opened
print('Is our hypo {} the same as our data {}?'.format(hypo, data))
if hypo == data:
print('Yes, so the odds of the car beind {} are 0'.format(data))
return 0
elif hypo == 'A':
print('Not A, so the odds update to 50/50, only two doors left')
return 0.5
else:
return 1
for hypo, prob in pmf.Items():
print(hypo, prob)
class Monty(Pmf):
Map from string location of car to probability
def __init__(self, hypos):
Initialize the prior distribution using the hp
hypos: sequence of hypotheses
Pmf.__init__(self)
for hypo in hypos:
self.Set(hypo, 1)
self.Normalize()
def Update(self, data):
Updates each hypothesis based on the data.
data: any representation of the data
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
self.Normalize()
def Likelihood(self, data, hypo):
Compute the likelihood of the data under the hypothesis.
hypo: string name of the door where the prize is
data: string name of the door Monty opened
if hypo == data:
return 0
elif hypo == 'A':
return 0.5
else:
return 1
from thinkbayes import Suite
class Monty(Suite):
def Likelihood(self, data, hypo):
if hypo == data:
return 0
elif hypo == 'A':
return 0.5
else:
return 1
suite = Monty('ABC')
suite.Update('B')
suite.Print()
from thinkbayes import Suite
class M_and_M(Suite):
Map from hypothesis (A or B) to probability.
# Mixes as defined by the problem
mix94 = dict(brown=30,
yellow=20,
red=20,
green=10,
orange=10,
tan=10)
mix96 = dict(blue=24,
green=20,
orange=16,
yellow=14,
red=13,
brown=13)
hypoA = dict(bag1=mix94, bag2=mix96)
hypoB = dict(bag1=mix96, bag2=mix94)
# Hypothesis using the info, i.e which bag did it come from, 1 or 2?
hypotheses = dict(A=hypoA, B=hypoB)
def Likelihood(self, data, hypo):
Computes the likelihood of the data under the hypothesis.
hypo: string hypothesis (A or B)
data: tuple of string bag, string color
print('The data we observed is {}'.format(data))
bag, color = data
# Take the bag and color of M&M from the observation
mix = self.hypotheses[hypo][bag]
print('The current hypo we are examing is {}'.format(hypo))
# Pull the mixes for the relevant bag and color
print('The current mix for {} is {}'.format(bag, mix))
like = mix[color]
# Calculate the likelihood of seeing that color
print('Return the number of M&Ms with that color in {} ({}) and renomarlize for likelihood'.format(bag, like))
return like
suite = M_and_M('AB')
print('\n The prior probabilities before any observations are:\n')
suite.Print()
print('\n Where \'A\' are the odds the bag is from 1994, and \'B\' are the odds that it came from 1996\n')
print('\n Let us draw the first M&M')
suite.Update(('bag1', 'yellow'))
print('\n The posterior probabilities after this observation is now:')
suite.Print()
print('\n Let us draw another M&M')
suite.Update(('bag2', 'green'))
print('\n The posterior probabilities after pulling both M&Ms is now:')
suite.Print()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The following code builds a Pmf to represent the distribution of
Step2: This is a Probability Mass Function object, which includes some pre-defined methods and parameters to help us deal with Pmfs (which measures the chance that some disecrete number is equal some value, where all values must sum to 1).
Step3: Pmf() creates an empty Probability Mass Function with no values.
Step4: The Set method sets the probability associated with each value to $1/6$.
Step5: Incr increases the “probability” associated with each word (array value) by 1. If a
Step6: Once you have a defined Pmf object, you can ask for the probability associated
Step7: Which returns the frequency of the word “the” as a fraction of the words in the list.
Step8: This distribution, which contains the priors for each hypothesis, is called (wait for it) the <span>prior distribution</span>.
Step9: Note that this does not add up to 1. That is because after this update, the distribution is no longer normalized, but because these hypotheses are mutually exclusive and collectively exhaustive, we can <span>renormalize</span>
Step10: The result is a distribution that contains the posterior probability for each hypothesis, which is called (wait now) the <span>posterior distribution</span>.
Step11: Finally, we can get the posterior probability for Bowl 1, what are the odds of getting that vanilla cookie from Bowl 1?
Step12: And for fun, Bowl 2?
Step17: And the answer is 0.6. You can download this example from
Step18: A Cookie object is now a Pmf that maps from hypotheses to their probabilities.
Step19: Likelihood
Step20: Here’s what the update looks like
Step21: And then we can print the posterior probability of each hypothesis
Step22: Which is the same as what we got before.
Step27: The other advantage is that it provides a framework for solving many
Step28: So far Monty and Cookie are nearly the same (ignoring the Likelihood method for a second)
Step29: Calling Update is pretty much the same
Step31: The implementation of Update is exactly the same; we are updating the Prior distribution as defined by the hypothesis using the Mult function via Likelhood.
Step32: Finally, printing the results is the same
Step37: The problem centers around the notion of switching; the car is behind one of three doors, and Monty can safely open one door at random.
Step38: In this example, writing Likelihood is a little complicated, but the
Step43: And here’s the code that uses this class.
Step46: You can download this example from http
Step47: First I need to encode the color mixes from before and after 1995
|
72 | <ASSISTANT_TASK:>
Python Code:
# Import pyoptools to load all contents
from pyoptools.all import *
from math import pi
#Example 2.1 : Plane surfaces
P1=Plane(shape=Circular(radius=(20)),reflectivity=1)
P2=Plane(shape=Rectangular(size=(40,25)))
P3=Plane(shape=Triangular(coord=((-15,15),(5,-20),(18,12))))
Plot3D(P1,center=(0,0,0),size=(60,60),rot=[(0,-pi/6,0)],scale=6)
# Example 2.2: Spherical surface
S=Spherical(curvature=1/300., shape=Circular(radius=145.),reflectivity=1)
Plot3D(S,center=(0,0,0),size=(400,400),rot=[(0,-pi/1.9,0)],scale=1)
# Example 2.3: Circular cilynders with different shapes
S1=Cylinder(radius=36,length=100,reflectivity=1)
S2=Cylindrical(shape=Circular(radius=(50)),curvature=1/100.)
S3=Cylindrical(shape=Rectangular(size=(50,100)),curvature=1/20.)
Plot3D(S2,center=(0,0,0),size=(150,150),rot=[(pi/6,pi/6,0)],scale=2)
# Example 2.4 poly2d in pyOpyools
p2d=poly2d((1,2,3,4,5,1,7,8,9,10,11,12))
print(p2d)
# Example 2.5 Symmetric aspherical surface
AS=Aspherical(shape=Circular(radius=2.5),Ax=.2,Ay=.2,Kx=.1, Ky=.15, poly=poly2d((0,0,0,.5,0,.5)))
Plot3D(AS,center=(-2,-2,5),size=(8,8),rot=[(-3*pi/10,pi/4,0)],scale=40)
# Example 3.1: Building an equilateral prism and a cylindrical lens
from math import *
## Prism
L=60
h=L/2.*cos(pi/3.)
h1=L/2*sin(pi/3)
S1=Plane(shape=Rectangular((L,L)))
S2=Plane(shape=Rectangular((L,L)))
S3=Plane(shape=Rectangular((L,L)))
T1=Plane(shape=Triangular(((2*h,h1),(-2*h,h1),(0,-h1))))
T2=Plane(shape=Triangular(((2*h,h1),(-2*h,h1),(0,-h1))))
P=Component(surflist=[(S1,(0,h1,0),(pi/2,0,0)),
(S2,(0,0,h),(pi/6,0,0)),
(S3,(0,0,-h),(-pi/6,0,0)),
(T1,(L/2,0,0),(0,pi/2,0)),
(T2,(-L/2,0,0),(0,pi/2,0))
],
material=material.schott["N-BK7"])
## Cylindrical lens
S4=Cylindrical(shape=Circular(radius=25),curvature=1./200)
S5=Cylindrical(shape=Circular(radius=25),curvature=-1./200)
L=Component(surflist=[(S5,(0,0,5),(0,0,pi/2)),
(S4,(0,0,-5),(0,0,pi/2))
],
material=material.schott["N-BK7"])
display(Plot3D(P,center=(0,0,0),size=(150,100),scale=2,rot=[(0,pi/2+.2,0),(-.1,0,0)]))
Plot3D(L,center=(0,0,0),size=(150,100),scale=2,rot=[(0,pi/2.2,0),(0,0,0)])
# Example 3.2 Visualization of an air spaced doublet
L=AirSpacedDoublet(radius = 25.,curvature_s1= 0.01,curvature_s2= 0.01,
curvature_s3= 0.01,curvature_s4= 0.01, thickness_l1= 5,air_gap=5 , thickness_l2= 5,
material_l1=1., material_l2=1.)
Plot3D(L,center=(0,0,0),size=(70,70),rot=[(0,pi/1.8,0)],scale=5)
# Example 3.3 Visulization of a rectangular thick mirror.
m2=RectMirror(size= (5,30,60), reflectivity = 1)
Plot3D(m2,center=(0,0,30),size=(100,50),rot=[(0,pi/1.7,0)],scale=4)
# Example 3.4 Visulization of a beamsplitter
BS=BeamSplitingCube(size=20, reflectivity=0.5, material=material.lzos["BK8"])
Plot3D(BS,center=(0,0,0),size=(50,50),rot=[(pi/4,0,0)],scale=6)
#Example 4.1: Visualization of 2 predefined lenses and a CCD
L2=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
L3=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
C=CCD()
S=System(complist=[(L2,(0,0,100),(0,0,0)),(L3,(0,0,120),(0,0,0)),(C,(0,0,215),(0,0,0)) ],n=1)
Plot3D(S,center=(0,0,150),size=(250,100),scale=3,rot=[(0,pi/1.9,0)])
#Example 4.2 Visualization of a default parallel beam
S=System()
R=parallel_beam_c()
S.ray_add(R)
S.propagate()
Plot3D(S,center=(0.3,-0.4,0.6),size=(4,2),scale=150,rot=[(pi/4,pi/6,pi/4)])
# Example 4.3 : Ray tracing for the optical system of the example 4.1
L1=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
L2=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
CSys=CCD()
S=System(complist=[(L1,(0,0,100),(0,0,0)),(L2,(0,0,120),(0,0,0)),(CSys,(0,0,210),(0,0,0)) ],n=1)
R=point_source_c(origin=(0,0,0),direction=(0,0,0),span=(pi/8,pi/8),num_rays=(5,4),wavelength=0.45,label="")
S.ray_add(R)
S.propagate()
Plot3D(S,center=(0,0,100),size=(250,100),scale=4,rot=[(0,pi/2,0),(pi/20,-pi/10,0)])
## Example 4.4 : Spectroscope using the components from the example 3.1
## Taken from the example 3.1
# Prism
from math import *
L=60
h=L/2.*cos(pi/3.)
h1=L/2*sin(pi/3)
S1=Plane(shape=Rectangular((L,L)))
S2=Plane(shape=Rectangular((L,L)))
S3=Plane(shape=Rectangular((L,L)))
T1=Plane(shape=Triangular(((2*h,h1),(-2*h,h1),(0,-h1))))
T2=Plane(shape=Triangular(((2*h,h1),(-2*h,h1),(0,-h1))))
P=Component(surflist=[(S1,(0,h1,0),(pi/2,0,0)),
(S2,(0,0,h),(pi/6,0,0)),
(S3,(0,0,-h),(-pi/6,0,0)),
(T1,(L/2,0,0),(0,pi/2,0)),
(T2,(-L/2,0,0),(0,pi/2,0))
],
material=material.schott["N-BK7"])
S4=Cylindrical(shape=Circular(radius=25),curvature=1./200)
S5=Cylindrical(shape=Circular(radius=25),curvature=-1./200)
# Lens
L=Component(surflist=[(S5,(0,0,5),(0,0,pi/2)),
(S4,(0,0,-5),(0,0,pi/2))
],
material=material.schott["N-BK7"])
# CCD and optical system
CSpect=CCD()
PCCD=243.5
S=System(complist=[(P,(0,0,0),(0,0,0)),
(L,(0,.35*60,60),(-0.357,0,0)),
(CSpect,(0,.35*PCCD,PCCD),(-0.357,0,0))],n=1)
## Polychromatic beam
R=[]
for x in range (-5,6,1):
for y in range (-5,6,2):
R.append(Ray(pos=(x,y+35,-100),dir=(0,-.35,1),wavelength=.45))
R.append(Ray(pos=(x,y+35,-100),dir=(0,-.35,1),wavelength=.5))
R.append(Ray(pos=(x,y+35,-100),dir=(0,-.35,1),wavelength=.55))
R.append(Ray(pos=(x,y+35,-100),dir=(0,-.35,1),wavelength=.60))
R.append(Ray(pos=(x,y+35,-100),dir=(0,-.35,1),wavelength=.65))
S.ray_add(R)
S.propagate()
Plot3D(S,center=(0,.35*60,60),size=(500,200),scale=2,rot=[(0,pi/2+.2,0),(-.1,0,0)])
# Example 4.5: Recovering the information from the CCD of the example 4.3
spot_diagram_c(CSys)
CSys.get_optical_path_data()
# Example 4.6: Polynomial approximation and visualization of the optical path length map of the example 4.3 using pylab.
poly,error=CSys.get_optical_path_map_lsq(order=2)
print(poly)
%pylab inline
imshow(CSys.get_optical_path_map())
colorbar()
# Example 4.7: spot diagram for the CCD of the example 4.4
spot_diagram_c(CSpect)
# Example 4.8: Chief ray for the system from the example 4.3 with an aperture stop between the two lenses
L1=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
L2=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
C=CCD()
AP=Stop(shape=Circular(radius=(30)), ap_shape=Circular(radius=(25)))
S=System(complist=[(L1,(0,0,100),(0,0,0)),(L2,(0,0,120),(0,0,0)),(AP,(0,0,110),(0,0,0)), (C,(0,0,150),(0,0,0)) ],n=1)
CHR=chief_ray_search(S,AP,(0,10,0),(0,-1,1))
S.ray_add(CHR)
S.propagate()
Plot3D(S,center=(0,0,100),size=(150,100),scale=4,rot=[(0,pi/2.1,0)])
# Example 4.9: Optical path of a ray
M=RectMirror(size= (20,30,3), reflectivity = 1)
C=CCD()
S=System(complist=[(M,(0,0,100),(0,pi/4,pi/2)), (M,(0,-25,100),(0,pi/4,pi/2)), (C,(0,-25,130),(0,0,0))],n=1)
R=Ray(pos=(0,0,0),dir=(0,0,1),wavelength=.55) #Parent ray
S.ray_add(R)
S.propagate()
display(Plot3D(S,center=(0,-10,80),size=(140,60),scale=4,rot=[(0,pi/2,0)]))
#R.optical_path() # Optical path from the origin to the first mirror
(R.get_final_rays()[0]).optical_path_parent() # Optical path from the origin to the second mirror
d=0.005 #Distancee between adjacent sources on the grating in mm
G=RPPMask(shape=Triangular(coord=((-15,15),(5,-20),(18,12))), phm=poly2d([0,0,2*pi*1/d,0,]),M=[-1])#,0,-1,-2])
OC=Component(surflist=[(G, (0, 0, 0), (0, 0, 0))])
C=CCD(size=(10,50))
S=System(complist=[(OC,(0,0,20),(0,0,0)),(C,(0,0,120),(0,0,0))],n=1)
r=[]
#Test using many wavelengths
for w in (.4,.45,.5,.53,.6,.65,.7):
r.append(Ray(wavelength=w)) #standard rays of different wavelength
S.ray_add(r)
S.propagate()
%pylab inline
display(Plot3D(S,center=(0,0,60), size=(180,150),scale=3,rot=[(0,pi/2.3,0)]))
spot_diagram_c(C)
# Autocollimator with a plane mirror
SA=0.005 # small angle in radians -less than 0.01rad
BS=BeamSplitingCube(size=25,reflectivity=.5,material=1.)
L=library.Edmund.get("32494")
M1=Component(surflist=[(Plane(shape=Rectangular((25,25)) ,reflectivity=0.5),(0,0,0),(0,0,0))])
M2=Component(surflist=[(Plane(shape=Rectangular((25,25)) ,reflectivity=1),(0,0,0),(0,0,0))])
C=CCD()
#There is a measure problem, if the angle is too small the cavity gets resonant
S=System(complist=[(C,(20,0,20),(0,pi/2,0)),
(BS,(0,0,20),(0,0,0)),
(L,(0,0,150),(0,-pi,0)),
(M1,(0,0,170),(0,0,0)),(M2,(0,0,570),(SA,0,0))],n=1.)
R=point_source_c(span=(0.04,0.04), num_rays=(2,2), wavelength=.65)
S.ray_add(R)
S.propagate()
%pylab inline
display(Plot3D(S,center=(0,0,300), size=(600,100),scale=2,rot=[(0,0,-3*pi/8),(0,3*pi/8,0)]))
spot_diagram_c(C)
# Calculating the small angle SA from data
d=C.get_optical_path_data()[1][0]-C.get_optical_path_data()[1][1]
print('The measured angle is SA='), (d/(150*2))
## Air spaced triplet telescope objective -
L1=SphericalLens(radius=15,curvature_s1=1/50.098,curvature_s2=-1/983.420,thickness=4.500,material=material.schott["N-BK7"])
L2=SphericalLens(radius=15,curvature_s1=1/56.671,curvature_s2=-1/171.150,thickness=4.500,material=material.schott["N-BK7"])
L3=SphericalLens(radius=10,curvature_s1=-1/97.339,curvature_s2=1/81.454,thickness=3.500,material=material.schott["N-SF1"])
OA=Ray(pos=(0,0,-10000),dir=(0,0,1),wavelength=.55) # Optical axis
C=CCD(size=(10,10))
S=System(complist=[(L1,(0,0,100),(0,0,0)),(L2,(0,0,104.6),(0,0,0)),(L3,(0,0,114.171),(0,0,0)),
(C,(0,0,191.053),(0,0,0))],n=1)
PB=parallel_beam_c(origin=(0,0,50),direction=(0,0,0),size=(15,15),num_rays=(15,15),wavelength=0.55)
S.ray_add(PB)
S.ray_add(PB[7])
S.ray_add(PB[8])
S.propagate()
display(Plot3D(S,center=(0,0,140),size=(150,70),scale=8,rot=[(0,pi/2.2,0)]))
# To check if the simulation is correct we calculate the focal distance as the distance between
# the primary principal plane and the paraxial focal point. (According to the book the focal length is 100mm)
f=(nearest_points(PB[110].get_final_rays()[0],PB[115].get_final_rays()[0])[0][2])-(find_ppp(S, OA)[2])
print(f)
# Finding the plane of the circle of least confusion
def CircleSph(lp):
L=library.Edmund.get("45129")
CSph=CCD(size=(3,3))
SSph=System(complist=[(L1,(0,0,20),(0,0,0)),(CSph,(0,0,lp),(0,0,0))],n=1)
PB= parallel_beam_c(size=(1.5,1.5),num_rays=(5,5), wavelength=.650)
SSph.ray_add(PB)
SSph.propagate()
X,Y,Z=CSph.get_optical_path_data()
return array(X).std()
# Seed
CircleSph(25)
from scipy.optimize import fmin
fmin(CircleSph,25)
#Spherical lens
PB= parallel_beam_c(size=(1.5,1.5),num_rays=(5,5), wavelength=.650)
L1=library.Edmund.get("45129") #f=6 r= 1.5
CSph=CCD(size=(3,3))
SSph=System(complist=[(L1,(0,0,20),(0,0,0)),(CSph,(0,0,26.11808777),(0,0,0))],n=1)
SSph.ray_add(PB)
SSph.propagate()
display(Plot3D(SSph,center=(0,0,20), size=(40,20),scale=16,rot=[(0,pi/2.1,0)]))
## Definition of the aspheric lens
R=2.75
k=-0.6139160
A2=0
A4=5.8891900E-04
A6=-1.7660200E-05
A8=1.0102500E-05
A10=-3.9148700E-06
r2=poly2d((0,0,0,1.,0,1.))
r4=r2*r2
r6=r4*r2
r8=r4*r4
r10=r8*r2
poly=A2*r2+A4*r4+ A6*r6 +A8*r8 +A10*r10
asf2=Aspherical(Kx=k, Ky=k, Ax=1./R,Ay=1./R, shape=Circular(radius=2.5),
poly=poly)
R=-3.1885400
k=-12.6638600
A2=0
A4=1.2458340e-02
A6=-3.7119450e-03
A8=5.1223910e-04
A10=-3.1085780e-05
poly=A2*r2+A4*r4+ A6*r6 +A8*r8 +A10*r10
asf1=Aspherical(Kx=k, Ky=k, Ax=1./R,Ay=1./R, shape=Circular(radius=2.5),
poly=poly)
AS=Component(surflist=[(asf2, (0, 0, 0), (0, 0, 0)), (asf1, (0, 0, 2.8+.35), (0,0, 0))], material=1.58913)
# Finding the plane of the circle of least confusion
def CircleAsph(lp):
CAsph=CCD(size=(3,3))
SAsph=System(complist=[(AS,(0,0,20),(0,0,0)),(CAsph,(0,0,lp),(0,0,0))],n=1)
PB= parallel_beam_c(size=(2,2),num_rays=(5,5), wavelength=.650)
SAsph.ray_add(PB)
SAsph.propagate()
X,Y,Z=CAsph.get_optical_path_data()
return array(X).std()
CircleAsph(25)
from scipy.optimize import fmin
fmin(CircleAsph,25)
# Visualization of the aspheric lens
PB= parallel_beam_c(size=(2,2),num_rays=(5,5), wavelength=.650)
CAsph=CCD(size=(3,3))
SAsph=System(complist=[(AS,(0,0,20),(0,0,0)),(CAsph,(0,0,24.95140076),(0,0,0))],n=1)
SAsph.ray_add(PB)
SAsph.propagate()
Plot3D(SAsph,center=(0,0,20), size=(40,20),scale=16,rot=[(0,pi/2.1,0)])
# The circle of least confusion is about 10 times smaller when using the aspheric lens
spot_diagram_c(CSph)
figure()
spot_diagram_c(CAsph)
# Lenses choice: We look in the catalogue for the appropriate objective lens
L1=library.Edmund.get("45179") # f=200 r= 25
OA=Ray(pos=(0,0,-10000),dir=(0,0,1),wavelength=.55) # Optical axis
C=CCD(size=(10,10))
S=System(complist=[(L1,(0,0,100),(0,pi,0)), (C,(0,0,320.053),(0,0,0))],n=1)
PB=parallel_beam_c(origin=(0,0,50),direction=(0,0,0),size=(15,15),num_rays=(15,15),wavelength=.55)
S.ray_add(PB)
S.propagate()
display(Plot3D(S,center=(0,0,180),size=(250,70),scale=4,rot=[(0,pi/2,0)]))
f=(nearest_points(PB[7].get_final_rays()[0],PB[8].get_final_rays()[0])[0][2])-(find_ppp(S, OA)[2])
print(f)
# Placing the objective and the Porro prism
L1=library.Edmund.get("45179") #f=200 r= 25
RP1=RightAnglePrism(width=55,height=55,material=material.schott["N-SK5"])
RP2=RightAnglePrism(width=40,height=40,material=material.schott["N-SK5"])
CC=CCD(size=(50,50))
S=System(complist=[(L1,(0,0,100),(0,0,0)), (RP1,(0,-22.5,150),(pi,-pi/4,pi/2)), (RP2,(-20,-40,140),(0,pi/4,0)),
(CC,(-40,-46.41421356, 195.64187845),(0,0,0))],n=1)
PB=parallel_beam_c(origin=(0,0,0),direction=(0,0,0),size=(15,15),num_rays=(15,15),wavelength=.55)
S.ray_add(PB)
S.propagate()
display(Plot3D(S,center=(0,-30,140),size=(250,150),scale=5,rot=[(0,pi/2.,0),(pi/6,0,0)]))
# Calculating the coordinates of the paraxial focal point
nearest_points(PB[110].get_final_rays()[0],PB[115].get_final_rays()[0])
# Looking for the right eyepiece configuration
L2=library.Edmund.get("45175") # f=30 ; r=10
CC=CCD(size=(50,50))
S=System(complist=[(L2,(0,0,89.84),(0,pi,0)), (L2,(0,0,100),(0,0,0)), (CC,(0,0,1.18656541e+02),(0,0,0))],n=1)
## The distance between the two doublets is equal to the total lense thickness
PB=parallel_beam_c(origin=(0,0,0),direction=(0,0,0),size=(10,10),num_rays=(10,10),wavelength=.55)
S.ray_add(PB)
S.propagate()
display(Plot3D(S,center=(0,0,90),size=(100,50),scale=5,rot=[(0,pi/2,0)]))
#Distance between the center of the lense and the paraxial focal point
nearest_points(PB[7].get_final_rays()[0],PB[8].get_final_rays()[0])
## Placing the eyepiece: Binocular 7x50
L1=library.Edmund.get("45179") #f=200 r= 25
L2=library.Edmund.get("45175") # f=30 ; r=10
RP1=RightAnglePrism(width=55,height=55,material=material.schott["N-SK5"])
RP2=RightAnglePrism(width=40,height=40,material=material.schott["N-SK5"])
CC=CCD(size=(50,50))
S=System(complist=[(L1,(0,0,100),(0,0,0)), (RP1,(0,-22.5,150),(pi,-pi/4,pi/2)), (RP2,(-20,-40,140),(0,pi/4,0)),
(L2,(-40,-46.41421356,195.64187847+12.32427176),(0,pi,0)),
(L2,(-40,-46.41421356,195.64187847+12.32427176+10.16),(0,0,0)),
(CC,(-40,-40,260),(0,0,0))],n=1)
OA=Ray(pos=(0,0,100),dir=(0,0,10),intensity=100, wavelength=.55) # Optical axis
PB=parallel_beam_c(origin=(0,0,0),direction=(0,0,0),size=(15,15),num_rays=(15,15),wavelength=.55)
S.ray_add(OA)
S.ray_add(PB)
S.propagate()
%pylab inline
display(Plot3D(S,center=(0,-20,170),size=(250,130),scale=2,rot=[(0,pi/2.,0),(0,0,0)]))
figure()
spot_diagram_c(CC)
## Aberrated optical system:
## We place a CCD named HOLO between the two lenses
L2=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
L3=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
C=CCD()
HOLO=CCD(size=(50,50))
S=System(complist=[(L2,(0,0,100),(0,0,0)),(HOLO,(0,0,110),(0,0,0)),(L3,(0,0,120),(0,0,0)),(C,(0,0,215),(0,0,0)) ],n=1)
R=point_source_p(origin=(0.,0.,0.),direction=(0.,0.,0),span=pi/14
,num_rays=(50,50),wavelength=0.470, label="blue")
S.ray_add(R)
S.propagate()
#The plot is too slow due the high amount of rays
#Plot3D(S,center=(0,0,100),size=(250,100),scale=4,rot=[(0,pi/2.05,0)])
# We approximate the optical path length map with a polynomial
pol1,err1=HOLO.get_optical_path_map_lsq(order=15)
## Now we make the beam propagate backwards from a point fi
fi=210
L2=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
L3=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
C=CCD()
HOLO=CCD(size=(50,50))
S=System(complist=[(L2,(0,0,100),(0,0,0)),(HOLO,(0,0,110),(0,0,0)),(L3,(0,0,120),(0,0,0)),(C,(0,0,0),(0,0,0)) ],n=1)
R=point_source_p(origin=(0.,0.,fi),direction=(pi,0.,0),span=pi/14
,num_rays=(50,50),wavelength=0.470, label="blue")
S.ray_add(R)
S.propagate()
#The plot is too slow due the high amount of rays
#Plot3D(S,center=(0,0,100),size=(250,100),scale=4,rot=[(0,pi/2.05,0)])
# We approximate the optical path length map with a polynomial
pol2,err2=HOLO.get_optical_path_map_lsq(order=15)
# We add the polynomials
%pylab inline
grid=(2*pi/0.47e-3)*(-pol1-pol2)
X,Y=meshgrid(linspace(-5,5,20),linspace(-5,5,20))
Z=grid.eval(X,Y)
imshow(Z,extent=(-5,5,-5,5));colorbar()
# Correcting aberrations: Creating and placing the diffraction grating
G=RPPMask(shape=Rectangular(size=(50,50)), phm=(2*pi/0.47e-3)*(-pol1-pol2), M=[1])
DG=Component(surflist=[(G, (0, 0, 0), (0, 0, 0))])
fi=210
L2=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
L3=SphericalLens(radius=25,curvature_s1=1./100.,curvature_s2=-1./100,thickness=10,material=material.schott["N-BK7"])
C=CCD()
S=System(complist=[(L2,(0,0,100),(0,0,0)),(DG,(0,0,110),(0,0,0)),(L3,(0,0,120),(0,0,0)),(C,(0,0,fi),(0,0,0))],n=1)
R=point_source_p(origin=(0.,0.,0),direction=(0,0.,0),span=pi/14,num_rays=(10,10),wavelength=0.470, label="blue")
S.ray_add(R)
S.propagate()
Plot3D(S,center=(0,0,100),size=(250,100),scale=4,rot=[(0,pi/2.05,0)])
# Hologram
holo=(2*pi/.470e-3)*(-pol1-pol2)
X,Y=meshgrid(linspace(-25,25,1000),linspace(-25,25,1000))
figsize(15,15)
imshow(holo.meval(X,Y)%(2*pi),extent=(-25,25,-25,25),cmap="gray")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Table of contents
Step2: 2.2 Spherical surfaces <a class="anchor" id="2.2"></a>
Step3: 2.3 Cylinders and cylidrical surfaces <a class="anchor" id="2.3"></a>
Step4: 2.4 Aspheric surfaces <a class="anchor" id="2.4"></a>
Step5: 2.5 Diffraction grating <a class="anchor" id="2.5"></a>
Step6: 3.1 Predefined optical components <a class="anchor" id="31"></a>
Step7: Catalog of lenses
Step8: 3.4 Optical prisms & beamsplitters <a class="anchor" id="34"></a>
Step9: 4. Optical systems <a class="anchor" id="4"></a>
Step10: 4.2 Rays <a class="anchor" id="42"></a>
Step11: 4.3 Ray tracing <a class="anchor" id="43"></a>
Step12: 4.4 Optical maps <a class="anchor" id="44"></a>
Step13: 4.5 Calculations on optical systems <a class="anchor" id="45"></a>
Step14: 4.6 Calculations inolving rays <a class="anchor" id="46"></a>
Step15: 5. More advanced examples <a class="anchor" id="5"></a>
Step16: 5.2 Autocollimator <a class="anchor" id="52"></a>
Step17: 5.3 Air spaced triplet telescope objective <a class="anchor" id="53"></a>
Step18: 5.4 Comparison between a spherical and an aspheric lens <a class="anchor" id="54"></a>
Step19: 5.5 Binocular design <a class="anchor" id="55"></a>
Step20: 5.6 Correcting aberrations using a diffraction grating <a class="anchor" id="56"></a>
Step21: 5.7 Creating an hologram <a class="anchor" id="57"></a>
|
73 | <ASSISTANT_TASK:>
Python Code:
# Authors: Robert Luke <mail@robertluke.net>
#
# License: BSD (3-clause)
import os
import mne
from mne.preprocessing.nirs import (optical_density,
temporal_derivative_distribution_repair)
fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
fnirs_cw_amplitude_dir = os.path.join(fnirs_data_folder, 'Participant-1')
raw_intensity = mne.io.read_raw_nirx(fnirs_cw_amplitude_dir, verbose=True)
raw_intensity.load_data().resample(3, npad="auto")
raw_od = optical_density(raw_intensity)
new_annotations = mne.Annotations([31, 187, 317], [8, 8, 8],
["Movement", "Movement", "Movement"])
raw_od.set_annotations(new_annotations)
raw_od.plot(n_channels=15, duration=400, show_scrollbars=False)
corrupted_data = raw_od.get_data()
corrupted_data[:, 298:302] = corrupted_data[:, 298:302] - 0.06
corrupted_data[:, 450:750] = corrupted_data[:, 450:750] + 0.03
corrupted_od = mne.io.RawArray(corrupted_data, raw_od.info,
first_samp=raw_od.first_samp)
new_annotations.append([95, 145, 245], [10, 10, 10],
["Spike", "Baseline", "Baseline"])
corrupted_od.set_annotations(new_annotations)
corrupted_od.plot(n_channels=15, duration=400, show_scrollbars=False)
corrected_tddr = temporal_derivative_distribution_repair(corrupted_od)
corrected_tddr.plot(n_channels=15, duration=400, show_scrollbars=False)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import data
Step2: We can see some small artifacts in the above data from movement around 40,
Step3: Apply temporal derivative distribution repair
|
74 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'csiro-bom', 'sandbox-2', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
75 | <ASSISTANT_TASK:>
Python Code:
from thermostate import State, Q_, units, set_default_units
p_1 = Q_(101325, 'Pa')
p_1 = Q_(1.01325, 'bar')
p_1 = Q_(14.7, 'psi')
p_1 = Q_(1.0, 'atm')
T_1 = 460*units.degR
T_1 = 25*units.degC
T_1 = 75*units.degF
T_1 = 400*units.K
Q_(101325, 'Pa') == 1.0*units.atm
substance = 'water'
print('T = {}, p = {}'.format(T_1, p_1))
st_1 = State(substance, T=T_1, p=p_1)
luke = Q_(1.0, 'atm')
leia = Q_(400.0, 'K')
print('Does luke equal p_1?', luke == p_1)
print('Does leia equal T_1?', leia == T_1)
st_starwars = State(substance, T=leia, p=luke)
print('Does st_starwars equal st_1?', st_starwars == st_1)
print('T_1 = {}'.format(st_1.T))
print('p_1 = {}'.format(st_1.p))
print('v_1 = {}'.format(st_1.v))
print('u_1 = {}'.format(st_1.u))
print('h_1 = {}'.format(st_1.h))
print('s_1 = {}'.format(st_1.s))
print('x_1 = {}'.format(st_1.x))
print('cp_1 = {}'.format(st_1.cp))
print('cv_1 = {}'.format(st_1.cv))
print('phase_1 = {}'.format(st_1.phase))
T_2 = Q_(100.0, 'degC')
x_2 = Q_(0.1, 'dimensionless')
st_2 = State('water', T=T_2, x=x_2)
print('T_2 = {}'.format(st_2.T))
print('p_2 = {}'.format(st_2.p))
print('v_2 = {}'.format(st_2.v))
print('u_2 = {}'.format(st_2.u))
print('h_2 = {}'.format(st_2.h))
print('s_2 = {}'.format(st_2.s))
print('x_2 = {}'.format(st_2.x))
x_2 == Q_(10.0, 'percent')
print(st_2.T.to('degF'))
print(st_2.s.to('BTU/(lb*degR)'))
print(st_2.T.to('joule'))
Q_(1000.0, 'joule').to('watt')## Other Common Errors
set_default_units("EE")
st_3 = State("water", T = Q_(100, 'degC'), p = Q_(1.0, 'atm'))
print(st_3.s)
st_4 = State("water", T = Q_(100, 'degC'), p = Q_(1.0, 'atm'), units = "SI")
print(st_4.s)
st_4.units = None
print(st_4.s)
set_default_units(None)
State('water', v=Q_(1000.0, 'degC'), p=Q_(1.0, 'bar'))
h_5 = Q_(2000.0, 'kJ/kg')
s_5 = Q_(3.10, 'kJ/(kg*K)')
st_5 = State('water', h=h_5, s=s_5)
print('T_5 = {}'.format(st_5.T))
print('p_5 = {}'.format(st_5.p))
print('v_5 = {}'.format(st_5.v))
print('u_5 = {}'.format(st_5.u))
print('h_5 = {}'.format(st_5.h))
print('s_5 = {}'.format(st_5.s))
print('x_5 = {}'.format(st_5.x))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pint and Units
Step2: We can use whatever units we'd like, Pint supports a wide variety of units.
Step3: Another way to specify the units is to use the units class that we imported. This class has a number of attributes (text following a period) that can be used to create a quantity with units by multiplying a number with the unit.
Step4: The two ways of creating the units are equivalent. The following cell should print True to demonstrate this.
Step5: Note the convention we are using here
Step6: Now we need to create the State and assign values for the properties. Properties of the state are set as arguments to the State class, and they must always be set in pairs, we cannot set a single property at a time. The syntax is
Step7: Note again the convention we are using here
Step8: <div class="alert alert-warning">
Step9: In this case, the value for the quality is the special Python value None. This is because at 400 K and 101325 Pa, the state of water is a superheated vapor and the quality is undefined except in the vapor dome. To access states in the vapor dome, we cannot use T and p as independent properties, because they are not independent inside the vapor dome. Instead, we have to use the pairs involving the other properties (possibly including the quality) to set the state. When we define the quality, the units are dimensionless or percent. For instance
Step10: In addition, whether you use the 'dimensionless' "units" for the quality as above, or use the 'percent' "units", the result is exactly equivalent. The next cell should print True to the screen to demonstrate this.
Step11: From these results, we can see that the units of the units of the properties stored in the State are always SI units - Kelvin, Pascal, m<sup>3</sup>/kg, J/kg, and J/(kg-Kelvin). We can use the to function to convert the units to anything we want, provided the dimensions are compatible. The syntax is State.property.to('units').
Step12: <div class="alert alert-info">
Step13: Here we have tried to convert from 'kelvin' to 'joule' and the error message which is the last line says
Step14: Default Units
Step15: Other Common Errors
Step16: Summary
|
76 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mri', 'sandbox-2', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
77 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
c, v = np.loadtxt('data.csv', delimiter=',', usecols=(6,7), unpack=True)
c
v
#选择第4列,开盘价
opening_price = np.loadtxt('data.csv', delimiter=',', usecols=(3,), unpack=True)
print opening_price
vwap = np.average(c, weights=v)
print "VWAP =", vwap
t = np.arange(len(c))
print "twap = ",np.average(c, weights=t)
mean = np.mean(c)
print "mean = ",mean
print "mean = ", c.mean()
h,l = np.loadtxt('data.csv', delimiter=',', usecols=(4,5), unpack=True)
print 'hightest = ', np.max(h)
print 'lowest = ', np.min(l)
print 'Spread high price : ', np.ptp(h)
print 'Spread low price : ', np.ptp(l)
closing_price = np.loadtxt('data.csv', delimiter=',', usecols=(6,), unpack=True)
print 'median = ', np.median(closing_price)
sorted_closing = np.msort(closing_price)
print "sorted_closing_price = ", sorted_closing
#先判断数组的个数是奇数还是偶数
N = len(closing_price)
median_ind = (N-1)/2
if N & 0x1 :
print "median = ", sorted_closing[median_ind]
else:
print "median = ", (sorted_closing[median_ind]+sorted_closing[median_ind+1])/2
print "variance = ", np.var(closing_price)
#手动求方差
print 'variance from definition = ', np.mean( (closing_price-c.mean())**2 )
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: data.csv文件是苹果公司的历史股价数据。第一列为股票代码,第二列为dd-mm-yyyy格式的日期,第三列为空,随后各列依次是开盘价(4)、最高价(5)、最低价(6)和收盘价(7),最后一列为当日的成交量(8)。
Step2: 2. 计算平均值
Step3: TWAP是Time0Weighted Average Price,时间加权平均价格,其基本思想是最近的价格重要性大一些,所以我们应该对近期的价格给以较高的权重。
Step4: 2.2 算术平均
Step5: 3. 求最大最小值和取值范围
Step6: numpy中ptp函数可以计算数组的取值范围。该函数返回的是数组元素最大值和最小值的差值,即max(array)-min(array)。
Step7: 4. 计算中位数
Step8: 对数组进行排序,之后再去中位数
Step9: 5. 计算方差
|
78 | <ASSISTANT_TASK:>
Python Code:
# Execute this cell to load the notebook's style sheet, then ignore it
from IPython.core.display import HTML
css_file = '../../style/custom.css'
HTML(open(css_file, "r").read())
# Import Libraries
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# Define parameters
# velocity m/s
# distance from source
# length of seismogram (s)
# number of time samples
# time increment
# source time
# Acquisition geometry
# coordinates of source
# coordinates of receiver
# Define time vector
# Calculating Green's function in 1D
# initialization G with zeros
# Plotting Green's function in 1D
plt.plot(time, G1)
plt.title("Green's function for hom. 1D acoustic medium" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.grid()
plt.show()
# Calculation of 2D Green's function
# initialization G with zeros
# Plotting Green's function in 2D
plt.plot(time, G2)
plt.title("Green's function for hom. 2D acoustic medium" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.xlim((0, tmax))
plt.grid()
plt.show()
# Calculation of 3D Green's function
# initialization G with zeros
# defining offset
# defining amplitudes
# time arrival
# Plotting Green's function in 3D
plt.plot(time, G3)
plt.title("Green's function for hom. 3D acoustic medium" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.xlim((0, tmax))
plt.grid()
plt.show()
# Defining source time function
# Frequency (Hz)
# period
# defining t0
# Initialization of source-time function
# Initialization of first derivative of gaussian
# Plotting of source time function
plt.plot(time, src)
plt.title('Source time function')
plt.xlabel('Time, s')
plt.ylabel('Amplitude')
plt.grid()
plt.show()
# Computation of 1D seismogram
# Convolution of Green's function with the 1st derivative of a Gaussian
# COMPUTE YOUR SEISMOGRAM HERE!
#G1_seis=
# Plotting Green's function in 1D
plt.plot(time, G1)
plt.title("Green's function for hom. 1D acoustic medium" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.grid()
plt.show()
# Plotting convolved Green's function in 1D
# PLOT YOUR SEISMOGRAM HERE!
# plt.plot()
plt.title('After convolution')
plt.xlabel('Time, s')
plt.ylabel('Amplitude')
plt.xlim (0, tmax)
plt.grid()
plt.show()
# Convolution of Green's function with the 1st derivative of a Gaussian
# COMPUTE YOUR SEISMOGRAM HERE!
#G2_seis=
# Plotting Green's function in 2D
plt.plot(time, G2)
plt.title("Green's function in 2D" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.xlim((0, tmax))
plt.grid()
plt.show()
# Plotting convolved Green's function in 1D
# PLOT YOUR SEISMOGRAM HERE!
# plt.plot()
plt.title('After convolution')
plt.xlabel('Time, s')
plt.ylabel('Amplitude')
plt.xlim((0, tmax))
plt.grid()
# Convolution of Green's function with the 1st derivative of a Gaussian
# COMPUTE YOUR SEISMOGRAM HERE!
#G3_seis =
# Plotting Green's function in 3D
plt.plot(time, G3)
plt.title("Green's function in 3D" )
plt.xlabel("Time, s")
plt.ylabel("Amplitude")
plt.xlim((0, tmax))
plt.grid()
plt.show()
# Plotting convolved Green's function in 1D
# PLOT YOUR SEISMOGRAM HERE!
# plt.plot()
plt.title('After convolution')
plt.xlabel('Time, s')
plt.ylabel('Amplitude')
plt.xlim (0, tmax)
plt.grid()
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Computation of Green's functions and seismograms for the acoustic wave equation
Step2: 2D Green's function
Step3: 3D Green's function
Step4: Exercise
Step5: Excerise
|
79 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
! pip install -q tensorflow-model-optimization
import tensorflow as tf
from tensorflow import keras
import numpy as np
import tempfile
import zipfile
import os
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture.
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
validation_split=0.1,
epochs=10
)
_, baseline_model_accuracy = model.evaluate(
test_images, test_labels, verbose=0)
print('Baseline test accuracy:', baseline_model_accuracy)
_, keras_file = tempfile.mkstemp('.h5')
print('Saving model to: ', keras_file)
tf.keras.models.save_model(model, keras_file, include_optimizer=False)
import tensorflow_model_optimization as tfmot
cluster_weights = tfmot.clustering.keras.cluster_weights
CentroidInitialization = tfmot.clustering.keras.CentroidInitialization
clustering_params = {
'number_of_clusters': 16,
'cluster_centroids_init': CentroidInitialization.LINEAR
}
# Cluster a whole model
clustered_model = cluster_weights(model, **clustering_params)
# Use smaller learning rate for fine-tuning clustered model
opt = tf.keras.optimizers.Adam(learning_rate=1e-5)
clustered_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=opt,
metrics=['accuracy'])
clustered_model.summary()
# Fine-tune model
clustered_model.fit(
train_images,
train_labels,
batch_size=500,
epochs=1,
validation_split=0.1)
_, clustered_model_accuracy = clustered_model.evaluate(
test_images, test_labels, verbose=0)
print('Baseline test accuracy:', baseline_model_accuracy)
print('Clustered test accuracy:', clustered_model_accuracy)
final_model = tfmot.clustering.keras.strip_clustering(clustered_model)
_, clustered_keras_file = tempfile.mkstemp('.h5')
print('Saving clustered model to: ', clustered_keras_file)
tf.keras.models.save_model(final_model, clustered_keras_file,
include_optimizer=False)
clustered_tflite_file = '/tmp/clustered_mnist.tflite'
converter = tf.lite.TFLiteConverter.from_keras_model(final_model)
tflite_clustered_model = converter.convert()
with open(clustered_tflite_file, 'wb') as f:
f.write(tflite_clustered_model)
print('Saved clustered TFLite model to:', clustered_tflite_file)
def get_gzipped_model_size(file):
# It returns the size of the gzipped model in bytes.
import os
import zipfile
_, zipped_file = tempfile.mkstemp('.zip')
with zipfile.ZipFile(zipped_file, 'w', compression=zipfile.ZIP_DEFLATED) as f:
f.write(file)
return os.path.getsize(zipped_file)
print("Size of gzipped baseline Keras model: %.2f bytes" % (get_gzipped_model_size(keras_file)))
print("Size of gzipped clustered Keras model: %.2f bytes" % (get_gzipped_model_size(clustered_keras_file)))
print("Size of gzipped clustered TFlite model: %.2f bytes" % (get_gzipped_model_size(clustered_tflite_file)))
converter = tf.lite.TFLiteConverter.from_keras_model(final_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_quant_model = converter.convert()
_, quantized_and_clustered_tflite_file = tempfile.mkstemp('.tflite')
with open(quantized_and_clustered_tflite_file, 'wb') as f:
f.write(tflite_quant_model)
print('Saved quantized and clustered TFLite model to:', quantized_and_clustered_tflite_file)
print("Size of gzipped baseline Keras model: %.2f bytes" % (get_gzipped_model_size(keras_file)))
print("Size of gzipped clustered and quantized TFlite model: %.2f bytes" % (get_gzipped_model_size(quantized_and_clustered_tflite_file)))
def eval_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for i, test_image in enumerate(test_images):
if i % 1000 == 0:
print('Evaluated on {n} results so far.'.format(n=i))
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
print('\n')
# Compare prediction results with ground truth labels to calculate accuracy.
prediction_digits = np.array(prediction_digits)
accuracy = (prediction_digits == test_labels).mean()
return accuracy
interpreter = tf.lite.Interpreter(model_content=tflite_quant_model)
interpreter.allocate_tensors()
test_accuracy = eval_model(interpreter)
print('Clustered and quantized TFLite test_accuracy:', test_accuracy)
print('Clustered TF test accuracy:', clustered_model_accuracy)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Keras 예제의 가중치 클러스터링
Step2: 클러스터링을 사용하지 않고 MNIST용 tf.keras 모델 훈련하기
Step3: 기준 모델을 평가하고 나중에 사용할 수 있도록 저장하기
Step4: 클러스터링을 사용하여 사전 훈련된 모델 미세 조정하기
Step5: 모델을 미세 조정하고 기준 대비 정확성 평가하기
Step6: 이 예의 경우, 기준과 비교하여 클러스터링 후 테스트 정확성의 손실이 미미합니다.
Step7: 클러스터링으로부터 6배 더 작은 모델 만들기
Step8: 그런 다음, TFLite를 위한 압축 가능한 모델을 만듭니다. 클러스터링된 모델을 대상 백엔드에서 실행 가능한 형식으로 변환할 수 있습니다. TensorFlow Lite는 모바일 기기에 배포하는 데 사용할 수 있는 예입니다.
Step9: 실제로 gzip을 통해 모델을 압축하는 도우미 함수를 정의하고 압축된 크기를 측정합니다.
Step10: 클러스터링으로부터 모델이 6배 더 작아진 것을 확인하세요.
Step11: 가중치 클러스터링과 훈련 후 양자화를 결합하여 8배 더 작은 TFLite 모델 만들기
Step12: TF에서 TFLite로 정확성이 지속되는지 확인하기
Step13: 클러스터링되고 양자화된 모델을 평가한 다음, TensorFlow의 정확성이 TFLite 백엔드까지 유지되는지 확인합니다.
|
80 | <ASSISTANT_TASK:>
Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["job"] = create_job_client()
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0}
DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE}
JOB_NAME = "custom_job_" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
if not TRAIN_NGPU or TRAIN_NGPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "mirror"
EPOCHS = 20
STEPS = 100
PARAM_FILE = BUCKET_NAME + "/params.txt"
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
"--param-file=" + PARAM_FILE,
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
"--param-file=" + PARAM_FILE,
]
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"disk_spec": disk_spec,
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [BUCKET_NAME + "/trainer_boston.tar.gz"],
"python_module": "trainer.task",
"args": CMDARGS,
},
}
]
if DIRECT:
job_spec = {"worker_pool_specs": worker_pool_spec}
else:
job_spec = {
"worker_pool_specs": worker_pool_spec,
"base_output_directory": {"output_uri_prefix": MODEL_DIR},
}
custom_job = {"display_name": JOB_NAME, "job_spec": job_spec}
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: Boston Housing tabular regression\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for Boston Housing
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.001, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=100, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
parser.add_argument('--param-file', dest='param_file',
default='/tmp/param.txt', type=str,
help='Output file for parameters')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
def make_dataset():
# Scaling Boston Housing data features
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float)
return feature, max
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
params = []
for _ in range(13):
x_train[_], max = scale(x_train[_])
x_test[_], _ = scale(x_test[_])
params.append(max)
# store the normalization (max) value for each feature
with tf.io.gfile.GFile(args.param_file, 'w') as f:
f.write(str(params))
return (x_train, y_train), (x_test, y_test)
# Build the Keras model
def build_and_compile_dnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr))
return model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
BATCH_SIZE = 16
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_dnn_model()
# Train the model
(x_train, y_train), (x_test, y_test) = make_dataset()
model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE)
model.save(args.model_dir)
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_boston.tar.gz
def create_custom_job(custom_job):
response = clients["job"].create_custom_job(parent=PARENT, custom_job=custom_job)
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = create_custom_job(custom_job)
# The full unique ID for the custom job
job_id = response.name
# The short numeric ID for the custom job
job_short_id = job_id.split("/")[-1]
print(job_id)
def get_custom_job(name, silent=False):
response = clients["job"].get_custom_job(name=name)
if silent:
return response
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = get_custom_job(job_id)
while True:
response = get_custom_job(job_id, True)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_path_to_deploy = None
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR
print("Training Time:", response.update_time - response.create_time)
break
time.sleep(60)
print("model_to_deploy:", model_path_to_deploy)
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
import numpy as np
from tensorflow.keras.datasets import boston_housing
(_, _), (x_test, y_test) = boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float32)
return feature
# Let's save one data item that has not been scaled
x_test_notscaled = x_test[0:1].copy()
for _ in range(13):
x_test[_] = scale(x_test[_])
x_test = x_test.astype(np.float32)
print(x_test.shape, x_test.dtype, y_test.shape)
print("scaled", x_test[0])
print("unscaled", x_test_notscaled)
model.evaluate(x_test, y_test)
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"boston-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
test_item_1 = x_test[0]
test_label_1 = y_test[0]
test_item_2 = x_test[1]
test_label_2 = y_test[1]
print(test_item_1.shape)
import json
gcs_input_uri = BUCKET_NAME + "/" + "test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
data = {serving_input: test_item_1.tolist()}
f.write(json.dumps(data) + "\n")
data = {serving_input: test_item_2.tolist()}
f.write(json.dumps(data) + "\n")
MIN_NODES = 1
MAX_NODES = 1
BATCH_MODEL = "boston_batch-" + TIMESTAMP
def create_batch_prediction_job(
display_name,
model_name,
gcs_source_uri,
gcs_destination_output_uri_prefix,
parameters=None,
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
batch_prediction_job = {
"display_name": display_name,
# Format: 'projects/{project}/locations/{location}/models/{model_id}'
"model": model_name,
"model_parameters": json_format.ParseDict(parameters, Value()),
"input_config": {
"instances_format": IN_FORMAT,
"gcs_source": {"uris": [gcs_source_uri]},
},
"output_config": {
"predictions_format": OUT_FORMAT,
"gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix},
},
"dedicated_resources": {
"machine_spec": machine_spec,
"starting_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
},
}
response = clients["job"].create_batch_prediction_job(
parent=PARENT, batch_prediction_job=batch_prediction_job
)
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try:
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", response.labels)
return response
IN_FORMAT = "jsonl"
OUT_FORMAT = "jsonl"
response = create_batch_prediction_job(
BATCH_MODEL, model_to_deploy_id, gcs_input_uri, BUCKET_NAME
)
# The full unique ID for the batch job
batch_job_id = response.name
# The short numeric ID for the batch job
batch_job_short_id = batch_job_id.split("/")[-1]
print(batch_job_id)
def get_batch_prediction_job(job_name, silent=False):
response = clients["job"].get_batch_prediction_job(name=job_name)
if silent:
return response.output_config.gcs_destination.output_uri_prefix, response.state
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try: # not all data types support explanations
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" error:", response.error)
gcs_destination = response.output_config.gcs_destination
print(" gcs_destination")
print(" output_uri_prefix:", gcs_destination.output_uri_prefix)
return gcs_destination.output_uri_prefix, response.state
predictions, state = get_batch_prediction_job(batch_job_id)
def get_latest_predictions(gcs_out_dir):
Get the latest prediction subfolder using the timestamp in the subfolder name
folders = !gsutil ls $gcs_out_dir
latest = ""
for folder in folders:
subfolder = folder.split("/")[-2]
if subfolder.startswith("prediction-"):
if subfolder > latest:
latest = folder[:-1]
return latest
while True:
predictions, state = get_batch_prediction_job(batch_job_id, True)
if state != aip.JobState.JOB_STATE_SUCCEEDED:
print("The job has not completed:", state)
if state == aip.JobState.JOB_STATE_FAILED:
raise Exception("Batch Job Failed")
else:
folder = get_latest_predictions(predictions)
! gsutil ls $folder/prediction.results*
print("Results:")
! gsutil cat $folder/prediction.results*
print("Errors:")
! gsutil cat $folder/prediction.errors*
break
time.sleep(60)
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Vertex constants
Step12: Hardware Accelerators
Step13: Container (Docker) image
Step14: Machine Type
Step15: Tutorial
Step16: Train a model
Step17: Prepare your disk specification
Step18: Define the worker pool specification
Step19: Assemble a job specification
Step20: Examine the training package
Step21: Task.py contents
Step22: Store training script on your Cloud Storage bucket
Step23: Train the model
Step24: Now get the unique identifier for the custom job you created.
Step25: Get information on a custom job
Step26: Deployment
Step27: Load the saved model
Step28: Evaluate the model
Step29: Perform the model evaluation
Step30: Upload the model for serving
Step31: Upload the model
Step32: Get Model resource information
Step33: Model deployment for batch prediction
Step34: Make the batch input file
Step35: Compute instance scaling
Step36: Make batch prediction request
Step37: Now get the unique identifier for the batch prediction job you created.
Step38: Get information on a batch prediction job
Step40: Get the predictions
Step41: Cleaning up
|
81 | <ASSISTANT_TASK:>
Python Code:
__author__ = 'ATSC-301 UBC'
import glob
import numpy as np
import matplotlib.pyplot as plt
from __future__ import division
from __future__ import print_function
% matplotlib inline
import h5py
import scipy.io
from mpl_toolkits.basemap import Basemap
hdf5_L1B=glob.glob('_data/MODIS_L1B/MYD021*.h5')
print("MODIS L1B file found {}".format(hdf5_L1B))
hdf5_Geo=glob.glob('_data/MODIS_L1B/MYD03*.h5')
print("MODIS Geolocation file found {}".format(hdf5_Geo))
hdf5_obj=h5py.File(hdf5_L1B[0], 'r')
geo_obj=h5py.File(hdf5_Geo[0], 'r')
hdf5_attr=hdf5_obj.keys()
print('Attributes in {}'.format(hdf5_L1B))
print('=============================================================')
print('hdf5_attr=hdf5_obj.keys() \n\n{}'.format(hdf5_attr))
sub_attr=hdf5_obj['MODIS_SWATH_Type_L1B'].keys()
print("\n\n\tSub-attributes in 'MODIS_SWATH_Type_L1B'")
print('\t=============================================================')
print("\tsub_attr=hdf5_obj['MODIS_SWATH_Type_L1B'].keys() \n\n\t{}".format(sub_attr))
subsub_attr=hdf5_obj['MODIS_SWATH_Type_L1B']['Data Fields'].keys()
print("\n\n\t\tSubsub-attributes in 'Data Fields'")
print('\t\t=============================================================')
print("\t\tsubsub_attr=hdf5_obj['MODIS_SWATH_Type_L1B']['Data Fields'].keys() \n\n\t\t{}".format(subsub_attr))
# Channel-31
L1B_emiss=hdf5_obj['MODIS_SWATH_Type_L1B']['Data Fields']['EV_1KM_Emissive'][:];
print("Size of 'EV_1KM_Emissive':\n===========================\n{}".format(L1B_emiss.shape))
# Channel-1
L1B_ref=hdf5_obj['MODIS_SWATH_Type_L1B']['Data Fields']['EV_250_Aggr1km_RefSB'][:];
print("\nSize of 'EV_500_Aggr1km_RefSB':\n================================\n{}".format(L1B_ref.shape))
band_info=hdf5_obj['MODIS_SWATH_Type_L1B']['Data Fields']['Band_1KM_Emissive'][:]
print('List of MODIS L1B Channels\n=====================================\n{}'.format(band_info))
band_info=hdf5_obj['MODIS_SWATH_Type_L1B']['Data Fields']['Band_250M'][:]
print('\nList of MODIS L1B Channels\n=====================================\n{}'.format(band_info))
C31=L1B_emiss[10, :, :]
C1=L1B_ref[1, :, :]
print('Channel-31 info\n===============================================================')
hdf5_obj['MODIS_SWATH_Type_L1B']['Data Fields']['EV_1KM_Emissive'].attrs.items()
a_list=hdf5_obj['MODIS_SWATH_Type_L1B']['Data Fields']['EV_1KM_Emissive'].attrs.values()
print(a_list)
C31_scale=a_list[6][10]
C31_offset=a_list[7][10]
C1_scale=hdf5_obj['MODIS_SWATH_Type_L1B']['Data Fields']['EV_250_Aggr1km_RefSB'].attrs.values()[9][0]
C1_offset=hdf5_obj['MODIS_SWATH_Type_L1B']['Data Fields']['EV_250_Aggr1km_RefSB'].attrs.values()[10][0]
#corrected_counts_scales=hdf5_obj['MODIS_SWATH_Type_L1B']['Data Fields']['EV_250_Aggr1km_RefSB'].attrs.values()[12][0]
C31=(C31 - C31_offset * np.ones(C31.shape))*C31_scale
C1=(C1 - C1_offset * np.ones(C1.shape))*C1_scale
np.max(C1)
C_x=geo_obj['MODIS_Swath_Type_GEO']['Geolocation Fields']['Longitude'][:]
C_y=geo_obj['MODIS_Swath_Type_GEO']['Geolocation Fields']['Latitude'][:]
print('===================================================')
print('Size of Longitude: {}'.format(C_x.shape))
print('Longitude Interval: {} ~ {}'.format(np.min(C_x), np.max(C_x)))
print('===================================================')
print('Size of Latitude: {}'.format(C_y.shape))
print('Latitude Interval: {} ~ {}'.format(np.min(C_y), np.max(C_y)))
def reproj_L1B(raw_data, raw_x, raw_y, xlim, ylim, res):
'''
=========================================================================================
Reproject MODIS L1B file to a regular grid
-----------------------------------------------------------------------------------------
d_array, x_array, y_array, bin_count = reproj_L1B(raw_data, raw_x, raw_y, xlim, ylim, res)
-----------------------------------------------------------------------------------------
Input:
raw_data: L1B data, N*M 2-D array.
raw_x: longitude info. N*M 2-D array.
raw_y: latitude info. N*M 2-D array.
xlim: range of longitude, a list.
ylim: range of latitude, a list.
res: resolution, single value.
Output:
d_array: L1B reprojected data.
x_array: reprojected longitude.
y_array: reprojected latitude.
bin_count: how many raw data point included in a reprojected grid.
Note:
function do not performs well if "res" is larger than the resolution of input data.
size of "raw_data", "raw_x", "raw_y" must agree.
=========================================================================================
'''
import numpy as np
x_bins=np.arange(xlim[0], xlim[1], res)
y_bins=np.arange(ylim[0], ylim[1], res)
# x_indices=np.digitize(raw_x.flat, x_bins)
# y_indices=np.digitize(raw_y.flat, y_bins)
x_indices=np.searchsorted(x_bins, raw_x.flat, 'right')
y_indices=np.searchsorted(y_bins, raw_y.flat, 'right')
y_array=np.zeros([len(y_bins), len(x_bins)], dtype=np.float)
x_array=np.zeros([len(y_bins), len(x_bins)], dtype=np.float)
d_array=np.zeros([len(y_bins), len(x_bins)], dtype=np.float)
bin_count=np.zeros([len(y_bins), len(x_bins)], dtype=np.int)
for n in range(len(y_indices)): #indices
bin_row=y_indices[n]-1 # '-1' is because we call 'right' in np.searchsorted.
bin_col=x_indices[n]-1
bin_count[bin_row, bin_col] += 1
x_array[bin_row, bin_col] += raw_x.flat[n]
y_array[bin_row, bin_col] += raw_y.flat[n]
d_array[bin_row, bin_col] += raw_data.flat[n]
for i in range(x_array.shape[0]):
for j in range(x_array.shape[1]):
if bin_count[i, j] > 0:
x_array[i, j]=x_array[i, j]/bin_count[i, j]
y_array[i, j]=y_array[i, j]/bin_count[i, j]
d_array[i, j]=d_array[i, j]/bin_count[i, j]
else:
d_array[i, j]=np.nan
x_array[i, j]=np.nan
y_array[i,j]=np.nan
return d_array, x_array, y_array, bin_count
xlim=[np.min(C_x), np.max(C_x)]
ylim=[np.min(C_y), np.max(C_y)]
C31_grid, longitude, latitude, bin_count = reproj_L1B(C31, C_x, C_y, xlim, ylim, 0.1)
C31_grid=np.ma.masked_where(np.isnan(C31_grid), C31_grid)
bin_count=np.ma.masked_where(np.isnan(bin_count), bin_count)
longitude=np.ma.masked_where(np.isnan(longitude), longitude)
latitude=np.ma.masked_where(np.isnan(latitude), latitude)
longitude.shape
fig=plt.figure(figsize=(10.5, 9.5))
ax=plt.gca()
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
image=ax.pcolormesh(longitude, latitude, C31_grid)
#plt.colorbar(image)
plt.show
res=0.05;
xlim=[np.min(C_x), np.max(C_x)]
ylim=[np.min(C_y), np.max(C_y)]
C31_grid, longitude, latitude, bin_count = reproj_L1B(C31, C_x, C_y, xlim, ylim, res)
C1_grid, longitude, latitude, bin_count = reproj_L1B(C1, C_x, C_y, xlim, ylim, res)
# create bins for channel-31
C31_bins = 100
C31_lim=[np.nanmin(C31_grid), np.nanmax(C31_grid)]
C31_bins=np.linspace(C31_lim[0], C31_lim[1], C31_bins, dtype=np.float)
# and channel-1
C1_bins = 150
C1_lim=[np.nanmin(C1_grid), np.nanmax(C1_grid)]
C1_bins=np.linspace(C1_lim[0], C1_lim[1], C1_bins, dtype=np.float)
y=C31_grid.flat[:]; y_bins=C31_bins # x: C31
x=C1_grid.flat[:]; x_bins=C1_bins # y: C1
H, y_edges, x_edges = np.histogram2d(y, x, bins=(y_bins, x_bins))
X, Y = np.meshgrid(x_edges[:-1], y_edges[:-1])
# make_axes_locatable ---> for axis control
from mpl_toolkits.axes_grid1 import make_axes_locatable
# set axis
left=0.1; width = 0.8; bottom=0.1; height = 0.65
gap=0.02; hist_len=0.2; cbar_len=0.12
# three boxes
rect_main = [left+hist_len+gap, bottom, width, height]
rect_histx = [left+hist_len+gap, left+height+gap, width-cbar_len, hist_len]
rect_histy = [left, bottom, hist_len, height]
# clev
#clevs=range(40, 281, 40)
clevs=np.arange(3, 31, 3)
CMap=plt.cm.PuBu
CMap.set_over(CMap(np.arange(256))[-1, 0:3])
CMap.set_under('w')
#
xlim_bin=[np.min(X), np.max(X)]
ylim_bin=[np.min(Y), np.max(Y)]
#
# ========== figure ========== #
fig=plt.figure(figsize=(9, 9))
# ========== Main ========== #
# axis
axMain=plt.axes(rect_main)
axMain.yaxis.tick_right()
axMain.yaxis.set_label_position('right')
axMain.set_xlim(xlim_bin)
axMain.set_ylim(ylim_bin)
axMain.set_xlabel('Channel-1', fontsize=12)
axMain.set_ylabel('Channel-31', fontsize=12)
axMain.set_title('2-D Histgram', fontsize=16, fontweight='bold', x=1.15, y=1.15)
divider=make_axes_locatable(axMain)
# grid and frame
plt.grid() # grid on
[i.set_linewidth(2) for i in axMain.spines.itervalues()] # a bold frame
CS=axMain.contourf(X, Y, H/np.max(H)*100, clevs, cmap=CMap, extend='both') # 2-D histgram
CAx=divider.append_axes('right', size='5%', pad=0.75)
CBar=plt.colorbar(CS, cax=CAx)
CBar.set_label('Percentage ( % )', fontsize=10)
CBar.ax.tick_params(axis='y', length=22.5)
# draw line
axMain.plot(x_edges, x_edges*5.5+6.5*np.ones(x_edges.shape), \
color='k', linestyle='--', linewidth=5)
axMain.text(0.4, 6.25, 'Cloud', fontsize=16, fontweight='bold', \
ha='center', va='center', color='k')
axMain.text(0.125, 8.0, 'Ocean', fontsize=16, fontweight='bold', \
ha='center', va='center', color='k')
#
# ========== Hist-x ========== #
axHistx=plt.axes(rect_histx)
axHistx.hist(x, bins=x_bins, color=[0.3, 0.6, 0.8])
axHistx.set_xlim(xlim_bin)
axHistx.axes.get_xaxis().set_visible(False)
# scientific notation for x, y-axis
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
[i.set_linewidth(2) for i in axHistx.spines.itervalues()]
#
# ========== Hist-y ========== #
axHisty = plt.axes(rect_histy)
axHisty.hist(y, bins=y_bins, color=[0.3, 0.6, 0.8], orientation='horizontal')
axHisty.set_ylim(ylim_bin)
axHisty.invert_xaxis()
axHisty.axes.get_yaxis().set_visible(False)
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
[i.set_linewidth(2) for i in axHisty.spines.itervalues()]
# savefig
plt.savefig('_figures/01_MODIS_L1B_histgram.png', dpi=450, facecolor='w', edgecolor='w',
orientation='portrait', papertype='a4', format='png',
transparent=True, bbox_inches='tight', pad_inches=0,
frameon=None)
# show
plt.show()
fig=plt.figure(figsize=(8, 8))
ax=plt.gca()
plt.grid()
ax.set_xlim(xlim_bin[0], xlim_bin[1])
ax.set_ylim(ylim_bin[0], ylim_bin[1])
ax.set_xlabel('Channel-1', fontsize=12)
ax.set_ylabel('Channel-31', fontsize=12)
ax.plot(x, y, color=[0.5, 0.5, 0.5], marker='.', linestyle='None')
ax.plot(x_edges, x_edges*5.5+6.5*np.ones(x_edges.shape), linestyle='--', color='k', linewidth=5)
ax.text(0.4, 6.25, 'Cloud', fontsize=16, fontweight='bold', \
ha='center', va='center', color='k')
ax.text(0.10725, 7.75, 'Ocean', fontsize=16, fontweight='bold', \
ha='center', va='center', color='k')
plt.savefig('_figures/01_MODIS_L1B_Divide_Cloud_and_Ocean.png', dpi=450, facecolor='w', edgecolor='w',
orientation='portrait', papertype='a4', format='png',
transparent=True, bbox_inches='tight', pad_inches=0,
frameon=None)
plt.show()
criteria_k=5.5 # less than
cirteria_b=6.5
C1_clouds=np.empty((C31_grid.shape[0], C31_grid.shape[1],))
C1_clouds[:]=np.nan
C31_clouds=np.empty((C31_grid.shape[0], C31_grid.shape[1],))
C31_clouds[:]=np.nan
for i in range(C31_grid.shape[0]):
for j in range(C31_grid.shape[1]):
if(C31_grid[i, j] < C1_grid[i, j]*0.3+cirteria_b):
# print(C31_grid[i, j])
C31_clouds[i, j]=C31_grid[i, j]
C1_clouds[i, j]=C1_grid[i, j]
C31_clouds_masked=np.ma.masked_where(np.isnan(C31_clouds), C31_clouds)
fig=plt.figure(figsize=(10.5, 9.5))
ax=plt.gca()
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
image=ax.pcolormesh(longitude, latitude, C31_clouds_masked)
#plt.colorbar(image)
plt.show
def planckInvert(wavel,Llambda):
input wavelength in microns and Llambda in W/m^2/micron/sr, output
output brightness temperature in K (note that we've remove the factor
of pi because we are working with radiances, not fluxes)
c=2.99792458e+08 #m/s -- speed of light in vacumn
h=6.62606876e-34 #J s -- Planck's constant
kb=1.3806503e-23 # J/K -- Boltzman's constant
c1=2.*h*c**2.
c2=h*c/kb
Llambda=Llambda*1.e6 #convert to W/m^2/m/sr
wavel=wavel*1.e-6 #convert wavelength to m
Tbright=c2/(wavel*np.log(c1/(wavel**5.*Llambda) + 1.))
return Tbright
cloud_Tbright=planckInvert(11.02, C31_clouds)
np.nanmax(cloud_Tbright)
np.nanmin(cloud_Tbright)
import scipy.io
# save as *.mat
scipy.io.savemat('_share/01_MODIS_L1B_TBright', {'longitude': longitude, 'latitude': latitude, 'cloud_Tbright': cloud_Tbright})
# mask the result
cloud_Tbright_masked=np.ma.masked_where(np.isnan(cloud_Tbright), cloud_Tbright)
from mpl_toolkits.basemap import Basemap
lonlim=xlim
latlim=ylim
vancity_lat=49.25
vancity_lon=-123.1
# Colormap
CMap=plt.cm.hot_r#gist_heat # hot, afmhot, gnuplot
#
proj=Basemap(resolution='l', projection='lcc', \
lat_1=30, lat_2=60, lat_0=45, lon_0=-140, \
llcrnrlon=-155, llcrnrlat=30, \
urcrnrlon=-110, urcrnrlat=56)
# create figure, add axes
fig=plt.figure(figsize=(12, 12))
ax=plt.gca()
## parallels and meridians.
parallels=np.arange(-90, 90, 5)
meridians=np.arange(0, 360, 5)
proj.drawparallels(parallels, labels=[1, 0, 0, 0],\
fontsize=10, latmax=90)
proj.drawmeridians(meridians, labels=[0, 0, 0, 1],\
fontsize=10, latmax=90)
# mask ocean/land to distinguish clouds
proj.drawlsmask(land_color=[0.925, 0.875, 0.375], ocean_color=[0.375, 0.5, 0.75], \
lakes=False, resolution='l')
# draw coast
proj.drawcoastlines(linewidth=1.5, linestyle='solid', color=[0.25, 0.25, 0.25])
# compute native x,y coordinates of grid.
x, y=proj(longitude, latitude)
x_van, y_van=proj(vancity_lon, vancity_lat)
x_text, y_text=proj(vancity_lon+4.5, vancity_lat-0.25)
# pcolor plot
CS=proj.pcolor(x, y, cloud_Tbright_masked, cmap=CMap, vmin=210, vmax=275)
# colorbar
CBar=proj.colorbar(CS, 'right', size='5%', pad='5%')
CBar.set_label('Brightness Temperature ( K )', fontsize=12, fontweight='bold')
CBar.ax.tick_params(axis='y', length=0)
#CBar.ax.invert_yaxis()
# Vancouver
proj.plot(x_van, y_van, marker='o', markersize=18, mfc='k', mec='k')
plt.text(x_text, y_text, 'Vancouver', fontsize=16, fontweight='bold',
ha='center', va='center', color='k')
# title
ax.set_title('Brightness Temperature\nMYD021KM.A2006303.2220 channel-31 ',\
fontweight='bold', fontsize=14)
# Save figure
plt.savefig('_figures/01_MODIS_L1B_TBright.png', dpi=450, facecolor='w', edgecolor='w',
orientation='portrait', papertype='a4', format='png',
transparent=True, bbox_inches='tight', pad_inches=0,
frameon=None)
# Show
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Content
Step2: We import h5py to read HDF5 files
Step3: scipy.io for saving data in *.mat format
Step4: For the map view of data, we need mpl_toolkits.basemap
Step5: Read MODIS L1B data through h5py
Step6: Get HDF5 file object
Step7: h5py.File('filename.h5', 'r') returns the object of HDF5 file.
Step8: Some external tools
Step9: Here the file has a size of Channels * Longitude * Latitude.
Step10: Then we can chose the channel we want
Step11: Scale factor and offset value
Step12: Here radiance_scales and radiance_offsets are what we want. Number of channels can also be seen through band_names.
Step13: radiance_scales and radiance_offsets are the 7th and 8th group of a_list, and channel-31 is the 11th element of the group.
Step14: We do the same thing for channel-1 data, but now we use reflectance_scales
Step15: Finally, we correct the data, numpy.ones is the same as ones.m in MATLAB.
Step16: Print the maximum data and see if it is reasonable.
Step17: Geolocation field
Step18: Reproject MODIS L1B data to a regular grid
Step19: Test if reproj_L1B works well
Step20: Mask NaN for plot, also make sure that the data is not too big to plot.
Step21: Plot the result
Step22: Convert channel-31 and channel-1 data
Step23: Identify cloud and ocean via 2-D histogram
Step24: Here, we define channel-1 data on x-axis and call np.histogram2d as what's in <a href='#How-to-use-numpy.histogram2d'>above section</a> to get bin_count value x_edges and y_edges. Noted that masked NumPy array has no attribute flat.
Step25: Then we make 2-D histgram to see the difference between clouds and ocean, the core idea is
Step26: We can see that, there are generally two place where data points are very dense, one is typical ocean, one is typical cloud.
Step27: Eliminate ocean points based on 2-D histgram
Step28: Test if the "criteria" works well
Step30: Calculate brightness temperature on the top of the clouds
Step31: Print the maximum and see if it is reasonable.
Step32: Save the output as *.mat
Step33: Plot cloud_TBright in mpl_toolkits.basemap
|
82 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
# This is needed to display the images.
%matplotlib inline
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from utils import label_map_util
from utils import visualization_utils as vis_util
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Env setup
Step2: Object detection imports
Step3: Model preparation
Step4: Download Model
Step5: Load a (frozen) Tensorflow model into memory.
Step6: Loading label map
Step7: Helper code
Step8: Detection
|
83 | <ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
from ipywidgets import widgets
from ipywidgets import interact, interactive, fixed
from IPython.display import display,HTML,clear_output
import os
HTML('''<script>code_show=true;function code_toggle() {if (code_show){$('div.input').hide();} else {$('div.input').show();} code_show = !code_show} $( document ).ready(code_toggle);</script><form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
import phievo.AnalysisTools as AT
from phievo.AnalysisTools.Notebook import Notebook
notebook = Notebook()
MODEL = "adaptation"
sim = AT.Simulation(MODEL)
read_network = AT.main_functions.read_network
def write_network(net,filename): net.store_to_pickle(filename)
def draw(net,label=False): net.draw(edgeLegend=label)
def fitness(net,trial): return sim.run_dynamics(net,trial,erase_buffer=False,return_treatment_fitness=True)
def gene_knock_out(net,label): net.delete_clean(label,target='species')
def remove_interaction(net,label): net.delete_clean(label,target='interaction')
net = read_network("test.pkl")
draw(net)
gene_knock_out(net,1)
draw(net)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Definition and proxy for usefull functions
Step2: Analyse
|
84 | <ASSISTANT_TASK:>
Python Code:
%pylab inline
import matplotlib
#matplotlib.rc('xtick', labelsize=20)
#matplotlib.rc('ytick', labelsize=20)
from scipy.spatial import distance
x = np.loadtxt("data.txt", comments='//')
x.shape
print(x.shape)
# Plot 2 measurements
#for i in x:
# plt.plot(i[0],i[1], 'ko');
plt.scatter(x[:,0], x[:,1], color='black')
plt.xlim(-0.5, 10);
plt.ylim(-0.5, 10);
plt.xlabel('Measurement 1');
plt.ylabel('Measurement 2');
centroid1 = x[0] # first experiment
centroid2 = x[3] # fourth experiment
print(centroid1, centroid2)
# calculate Euclidean distances from centroid 1
sample1= list()
for i, obs in enumerate(x):
dist = distance.euclidean(obs, centroid1)
sample1.append(dist)
print("%2d -> %6f" %(i, dist))
# calculate Euclidean distances from centroid 2
sample2 = list()
for i, obs in enumerate(x):
dist = distance.euclidean(obs, centroid2)
sample2.append(dist)
print("%2d -> %6f" %(i, dist))
group1 = x[np.array(sample1)<=np.array(sample2)]
print(group1)
group2 = x[np.array(sample2)<np.array(sample1)]
print(group2)
# Replot first categorization
# Plot 2 measurements
for i in group1:
plt.plot(i[0],i[1], 'o',color='magenta');
for i in group2:
plt.plot(i[0],i[1], 'o', color='cyan');
plt.plot(centroid1[0], centroid1[1], '*', color='magenta', ms=12)
plt.plot(centroid2[0], centroid2[1], '*', color='cyan', ms=12)
plt.xlim(-0.5, 10);
plt.ylim(-0.5, 10);
plt.xlabel('Measurement 1');
plt.ylabel('Measurement 2');
# recalculate centroids
centroid1 = np.average(group1, axis=0)
centroid2 = np.average(group2, axis=0)
print(centroid1, centroid2)
# calculate Euclidean distances from new centroid 1
sample1= list()
for i, obs in enumerate(x):
dist = distance.euclidean(obs, centroid1)
sample1.append(dist)
print("%2d -> %6f" %(i, dist))
sample2= list()
for i, obs in enumerate(x):
dist = distance.euclidean(obs, centroid2)
sample2.append(dist)
print("%2d -> %6f" %(i, dist))
# reasign the groups
group1 = x[np.array(sample1)<=np.array(sample2)]
print(group1)
group2 = x[np.array(sample2)<np.array(sample1)]
print(group2)
# Replot first categorization with new centrodis
# Plot 2 measurements
for i in group1:
plt.plot(i[0],i[1], 'o', color='magenta');
for i in group2:
plt.plot(i[0],i[1], 'o', color='cyan');
plt.plot(centroid1[0], centroid1[1], '*', color='magenta', ms=12)
plt.plot(centroid2[0], centroid2[1], '*', color='cyan', ms=12)
plt.xlim(-0.5, 10);
plt.ylim(-0.5, 10);
plt.xlabel('Measurement 1');
plt.ylabel('Measurement 2');
# recalculate centroids
centroid1 = np.average(group1, axis=0)
centroid2 = np.average(group2, axis=0)
print(centroid1, centroid2)
# calculate Euclidean distances from new centroid 1
sample1= list()
for i, obs in enumerate(x):
dist = distance.euclidean(obs, centroid1)
sample1.append(dist)
# calculate Euclidean distances from centroid 2
sample2 = list()
for i, obs in enumerate(x):
dist = distance.euclidean(obs, centroid2)
sample2.append(dist)
# reasign the groups
group1 = x[np.array(sample1)<=np.array(sample2)]
group2 = x[np.array(sample2)<np.array(sample1)]
# Replot first categorization with new centrodis
# Plot 2 measurements
for i in group1:
plt.plot(i[0],i[1], 'o', color='magenta');
for i in group2:
plt.plot(i[0],i[1], 'o', color='cyan');
plt.plot(centroid1[0], centroid1[1], '*', color='magenta', ms=12)
plt.plot(centroid2[0], centroid2[1], '*', color='cyan', ms=12)
plt.xlim(-0.5, 10);
plt.ylim(-0.5, 10);
plt.xlabel('Measurement 1');
plt.ylabel('Measurement 2');
# recalculate centroids
centroid1 = np.average(group1, axis=0)
centroid2 = np.average(group2, axis=0)
print(centroid1, centroid2)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Consider the following data set consisting of the scores of two variables on each of 17 experiments
Step2: This data set is to be grouped into two clusters.
|
85 | <ASSISTANT_TASK:>
Python Code:
sc.addPyFile("https://github.com/ibm-watson-data-lab/simple-data-pipe-connector-flightstats/raw/master/flightPredict/training.py")
sc.addPyFile("https://github.com/ibm-watson-data-lab/simple-data-pipe-connector-flightstats/raw/master/flightPredict/run.py")
import training
import run
%matplotlib inline
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.linalg import Vectors
from numpy import array
import numpy as np
import math
from datetime import datetime
from dateutil import parser
sqlContext=SQLContext(sc)
training.sqlContext = sqlContext
training.cloudantHost='dtaieb.cloudant.com'
training.cloudantUserName='weenesserliffircedinvers'
training.cloudantPassword='72a5c4f939a9e2578698029d2bb041d775d088b5'
training.weatherUrl='https://4b88408f-11e5-4ddc-91a6-fbd442e84879:p6hxeJsfIb@twcservice.mybluemix.net'
dbName="pycon_flightpredict_training_set"
%time cloudantdata = training.loadDataSet(dbName,"training")
%time cloudantdata.printSchema()
%time cloudantdata.count()
training.scatterPlotForFeatures(cloudantdata, \
"departureWeather.temp","arrivalWeather.temp","Departure Airport Temp", "Arrival Airport Temp")
training.scatterPlotForFeatures(cloudantdata,\
"departureWeather.pressure","arrivalWeather.pressure","Departure Airport Pressure", "Arrival Airport Pressure")
training.scatterPlotForFeatures(cloudantdata,\
"departureWeather.wspd","arrivalWeather.wspd","Departure Airport Wind Speed", "Arrival Airport Wind Speed")
trainingData = training.loadLabeledDataRDD("training")
trainingData.take(5)
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
logRegModel = LogisticRegressionWithLBFGS.train(trainingData.map(lambda lp: LabeledPoint(lp.label,\
np.fromiter(map(lambda x: 0.0 if np.isnan(x) else x,lp.features.toArray()),dtype=np.double )))\
, iterations=1000, validateData=False, intercept=False)
print(logRegModel)
from pyspark.mllib.classification import NaiveBayes
#NaiveBayes requires non negative features, set them to 0 for now
modelNaiveBayes = NaiveBayes.train(trainingData.map(lambda lp: LabeledPoint(lp.label, \
np.fromiter(map(lambda x: x if x>0.0 else 0.0,lp.features.toArray()),dtype=np.int)\
))\
)
print(modelNaiveBayes)
from pyspark.mllib.tree import DecisionTree
modelDecisionTree = DecisionTree.trainClassifier(trainingData.map(lambda lp: LabeledPoint(lp.label,\
np.fromiter(map(lambda x: 0.0 if np.isnan(x) else x,lp.features.toArray()),dtype=np.double )))\
, numClasses=training.getNumClasses(), categoricalFeaturesInfo={})
print(modelDecisionTree)
from pyspark.mllib.tree import RandomForest
modelRandomForest = RandomForest.trainClassifier(trainingData.map(lambda lp: LabeledPoint(lp.label,\
np.fromiter(map(lambda x: 0.0 if np.isnan(x) else x,lp.features.toArray()),dtype=np.double )))\
, numClasses=training.getNumClasses(), categoricalFeaturesInfo={},numTrees=100)
print(modelRandomForest)
dbTestName="pycon_flightpredict_test_set"
testCloudantdata = training.loadDataSet(dbTestName,"test")
testCloudantdata.count()
testData = training.loadLabeledDataRDD("test")
training.displayConfusionTable=True
training.runMetrics(testData,modelNaiveBayes,modelDecisionTree,logRegModel,modelRandomForest)
rdd = sqlContext.sql("select deltaDeparture from training").map(lambda s: s.deltaDeparture)\
.filter(lambda s: s < 50 and s > 12)
print(rdd.count())
histo = rdd.histogram(50)
#print(histo[0])
#print(histo[1])
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
bins = [i for i in histo[0]]
params = plt.gcf()
plSize = params.get_size_inches()
params.set_size_inches( (plSize[0]*2.5, plSize[1]*2) )
plt.ylabel('Number of records')
plt.xlabel('Bin')
plt.title('Histogram')
intervals = [abs(j-i) for i,j in zip(bins[:-1], bins[1:])]
values=[sum(intervals[:i]) for i in range(0,len(intervals))]
plt.bar(values, histo[1], intervals, color='b', label = "Bins")
plt.xticks(bins[:-1],[int(i) for i in bins[:-1]])
plt.legend()
plt.show()
class customTrainingHandler(training.defaultTrainingHandler):
def getClassLabel(self, value):
if ( int(value)==0 ):
return "Delayed less than 13 minutes"
elif (int(value)==1 ):
return "Delayed between 13 and 41 minutes"
elif (int(value) == 2 ):
return "Delayed more than 41 minutes"
return value
def numClasses(self):
return 3
def computeClassification(self, s):
return 0 if s.deltaDeparture<13 else (1 if s.deltaDeparture < 41 else 2)
def customTrainingFeaturesNames(self ):
return ["departureTime"]
def customTrainingFeatures(self, s):
dt=parser.parse(s.departureTime)
print(dt)
features=[]
for i in range(0,7):
features.append(1 if dt.weekday()==i else 0)
return features
training.customTrainingHandler=customTrainingHandler()
#reload the training labeled data RDD
trainingData = training.loadLabeledDataRDD("training")
#recompute the models
logRegModel = LogisticRegressionWithLBFGS.train(trainingData.map(lambda lp: LabeledPoint(lp.label,\
np.fromiter(map(lambda x: 0.0 if np.isnan(x) else x,lp.features.toArray()),dtype=np.double )))\
, iterations=1000, validateData=False, intercept=False)
modelNaiveBayes = NaiveBayes.train(trainingData.map(lambda lp: LabeledPoint(lp.label, \
np.fromiter(map(lambda x: x if x>0.0 else 0.0,lp.features.toArray()),dtype=np.int)\
))\
)
modelDecisionTree = DecisionTree.trainClassifier(trainingData.map(lambda lp: LabeledPoint(lp.label,\
np.fromiter(map(lambda x: 0.0 if np.isnan(x) else x,lp.features.toArray()),dtype=np.double )))\
, numClasses=training.getNumClasses(), categoricalFeaturesInfo={})
modelRandomForest = RandomForest.trainClassifier(trainingData.map(lambda lp: LabeledPoint(lp.label,\
np.fromiter(map(lambda x: 0.0 if np.isnan(x) else x,lp.features.toArray()),dtype=np.double )))\
, numClasses=training.getNumClasses(), categoricalFeaturesInfo={},numTrees=100)
#reload the test labeled data
testData = training.loadLabeledDataRDD("test")
#recompute the accuracy metrics
training.displayConfusionTable=True
training.runMetrics(testData,modelNaiveBayes,modelDecisionTree,logRegModel,modelRandomForest)
run.useModels(modelNaiveBayes,modelDecisionTree,logRegModel,modelRandomForest)
run.runModel('BOS', "2016-05-18 20:15-0500", 'AUS', "2016-05-18 22:30-0800" )
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: load data from training data set and print the schema
Step2: Visualize classes in scatter plot based on 2 features
Step3: Load the training data as an RDD of LabeledPoint
Step4: Train multiple classification models
Step5: Load Test data from Cloudant database and compute accuracy metrics
Step6: Accuracy analysis and model refinement
Step7: Customize classification using Training Handler class extension
Step8: Run the predictive model
|
86 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
# comment out this line if you don't have seaborn installed
import seaborn as sns
sns.set_palette("colorblind")
import numpy as np
# execute this line:
from astroquery.sdss import SDSS
TSquery = SELECT TOP 10000
p.psfMag_r, p.fiberMag_r, p.fiber2Mag_r, p.petroMag_r,
p.deVMag_r, p.expMag_r, p.modelMag_r, p.cModelMag_r,
s.class
FROM PhotoObjAll AS p JOIN specObjAll s ON s.bestobjid = p.objid
WHERE p.mode = 1 AND s.sciencePrimary = 1 AND p.clean = 1 AND s.class != 'QSO'
ORDER BY p.objid ASC
SDSSts = SDSS.query_sql(TSquery)
SDSSts
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
# set the random state
rs = 23 # we are in Chicago after all
# extract feature names, remove class
feats = list(SDSSts.columns)
feats.remove('class')
# cast astropy table to pandas, remove classes
X = np.array(SDSSts[feats].to_pandas())
# our classes are the outcomes to classify on
y = np.array(SDSSts['class'])
# let's do a split in training and test set:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = rs)
# we'll leave the test set for later.
# instantiate the random forest classifier:
RFmod = RandomForestClassifier()
# do a grid search over the free random forest parameters:
pars = {"n_estimators": [10, 100, 300],
"max_features": [1, 3, 7],
"min_samples_leaf": [1,10]}
grid_results = GridSearchCV(RandomForestClassifier(),
pars,
cv = 5)
grid_results.fit(X_train, y_train)
grid_results.grid_scores_
from sklearn.decomposition import PCA
# instantiate the PCA object
pca = PCA(n_components=2)
# fit and transform the samples:
X_pca = pca.fit_transform(X)
# make a plot object
fig, ax = plt.subplots(1, 1, figsize=(12,8))
# loop over number of classes:
for i,l in enumerate(np.unique(y)):
members = y == l
plt.scatter(X_pca[members, 0], X_pca[members, 1],
color=sns.color_palette("colorblind",8)[i],
label=l)
ax.set_xlabel("PCA Component 1")
ax.set_ylabel("PCA Component 2")
plt.legend()
# Train PCA on training data set
X_pca_train = pca.fit_transform(X_train)
# apply to test set
X_pca_test = pca.transform(X_test)
# we'll leave the test set for later.
# instantiate the random forest classifier:
RFmod = RandomForestClassifier()
# do a grid search over the free random forest parameters:
pars = {"n_estimators": [10, 100, 300],
"max_features": [1, 2],
"min_samples_leaf": [1,10]}
grid_results = GridSearchCV(RandomForestClassifier(),
pars,
cv = 5)
grid_results.fit(X_pca_train, y_train)
grid_results.best_score_
from sklearn.pipeline import Pipeline
# make a list of name-estimator tuples
estimators = [('pca', PCA()), ('clf', RandomForestClassifier())]
# instantiate the pipeline
pipe = Pipeline(estimators)
# make a dictionary of parameters
params = dict(pca__n_components=[2, 4, 6, 8],
clf__n_estimators=[10, 100, 300],
clf__min_samples_leaf=[1,10])
# perform the grid search
grid_search = GridSearchCV(pipe, param_grid=params)
grid_search.fit(X_train, y_train)
print(grid_search.best_score_)
print(grid_search.best_params_)
# First, let's redo the train-test split to split the training data
# into training and hold-out validation set
X_train_new, X_val, y_train_new, y_val = train_test_split(X_train, y_train,
test_size = 0.2,
random_state = rs)
# Now we have to re-do the PCA pipeline:
from sklearn.pipeline import Pipeline
# make a list of name-estimator tuples
estimators = [('pca', PCA()), ('clf', RandomForestClassifier())]
# instantiate the pipeline
pipe = Pipeline(estimators)
# make a dictionary of parameters
params = dict(pca__n_components=[2, 4, 6, 8],
clf__n_estimators=[10, 100, 300],
clf__min_samples_leaf=[1,10])
# perform the grid search
grid_search = GridSearchCV(pipe, param_grid=params)
grid_search.fit(X_train_new, y_train_new)
print("Best score: " + str(grid_search.best_score_))
print("Best parameter set: " + str(grid_search.best_params_))
print("Validation score for model with PCA: " + str(grid_search.score(X_val, y_val)))
# I'm going to pick locally linear embedding here:
# LLE has two free parameters:
# - the number of parameters to use `n_neighbors`
# - the number of components in the output
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.pipeline import Pipeline
# make a list of name-estimator tuples
estimators = [('lle', LocallyLinearEmbedding()), ('clf', RandomForestClassifier())]
# instantiate the pipeline
pipe2 = Pipeline(estimators)
# make a dictionary of parameters
params = dict(lle__n_components=[2, 4, 6, 8],
lle__n_neighbors=[5, 10, 100],
clf__n_estimators=[10, 100, 300],
clf__min_samples_leaf=[1,10])
# perform the grid search
grid_search2 = GridSearchCV(pipe2, param_grid=params)
grid_search2.fit(X_train_new, y_train_new)
print("Best score: " + str(grid_search2.best_score_))
print("Best parameter set: " + str(grid_search2.best_params_))
print("Validation score for model with LLE: " + str(grid_search2.score(X_val, y_val)))
from sklearn.linear_model import LogisticRegressionCV
lr = LogisticRegressionCV(penalty="l2", Cs=10, cv=10)
lr.fit(X_train, y_train)
lr.coef_
# let's leave out the first parameter and see whether the coefficients change:
lr.fit(X_train[:,1:], y_train)
lr.coef_
from sklearn.base import BaseEstimator, TransformerMixin
class RebinTimeseries(BaseEstimator, TransformerMixin):
def __init__(self, n=4, method="average"):
Initialize hyperparameters
:param n: number of samples to bin
:param method: "average" or "sum" the samples within a bin?
:return:
self.n = n ## save number of bins to average together
self.method = method
return
def fit(self,X):
I don't really need a fit method!
## set number of light curves (L) and
## number of samples per light curve (k)
return self
def transform(self, X):
self.L, self.K = X.shape
## set the number of binned samples per light curve
K_binned = int(self.K/self.n)
## if the number of samples in the original light curve
## is not divisible by n, then chop off the last few samples of
## the light curve to make it divisible
#print("X shape: " + str(X.shape))
if K_binned*self.n < self.K:
X = X[:,:self.n*K_binned]
## the array for the new, binned light curves
X_binned = np.zeros((self.L, K_binned))
if self.method in ["average", "mean"]:
method = np.mean
elif self.method == "sum":
method = np.sum
else:
raise Exception("Method not recognized!")
#print("X shape: " + str(X.shape))
#print("L: " + str(self.L))
for i in xrange(self.L):
t_reshape = X[i,:].reshape((K_binned, self.n))
X_binned[i,:] = method(t_reshape, axis=1)
return X_binned
def predict(self, X):
pass
def score(self, X):
pass
def fit_transform(self, X, y=None):
self.fit(X)
X_binned = self.transform(X)
return X_binned
class PSFMagThreshold(BaseEstimator, TransformerMixin):
def __init__(self, p=1.45,):
Initialize hyperparameters
Parameters
----------
p : float
The threshold for the magnitude - model magnitude
self.p = p # store parameter in object
return
def fit(self,X):
I don't really need a fit method!
return self
def transform(self, X):
# extract relevant columns
psfmag = X[:,0]
c_psfmag = X[:,-1]
# compute difference
d_psfmag = psfmag - c_psfmag
# make a 1D array of length N
X_new = np.zeros(X.shape[0])
X_new[d_psfmag > self.p] = 1.0
# IMPORTANT: Your output vector must be a COLUMN vector
# You can achieve this with the numpy function atleast_2D()
# and the numpy function transpose()
return np.atleast_2d(X_new).T
def predict(self, X):
pass
def score(self, X):
pass
def fit_transform(self, X, y=None):
self.fit(X)
X_new = self.transform(X)
return X_new
pt = PSFMagThreshold(p=1.45)
X_pt = pt.fit_transform(X)
from sklearn.pipeline import FeatureUnion
transformers = [("pca", PCA(n_components=2)),
("pt", PSFMagThreshold(p=1.45))]
feat_union = FeatureUnion(transformers)
X_transformed = feat_union.fit_transform(X_train)
# combine the
transformers = [("pca", PCA()),
("pt", PSFMagThreshold(p=1.45))]
feat_union = FeatureUnion(transformers)
estimators = [("feats", feat_union),
("clf", RandomForestClassifier())]
pipe_c = Pipeline(estimators)
# make the parameter set
params = dict(feats__pca__n_components=[2, 4, 6, 8],
feats__pt__p=[0.5, 0.9, 1.45, 2.0],
clf__n_estimators=[10, 100, 300],
clf__min_samples_leaf=[1,10])
# perform the grid search
grid_search_c = GridSearchCV(pipe_c, param_grid=params)
grid_search_c.fit(X_train_new, y_train_new)
# print validation score
print("Best score: " + str(grid_search_c.best_score_))
print("Best parameter set: " + str(grid_search_c.best_params_))
print("Validation score: " + str(grid_search_c.score(X_val, y_val)))
# all stars
star_ind = np.argwhere(y == b"STAR").T[0]
# all galaxies
galaxy_ind = np.argwhere(y == b"GALAXY").T[0]
np.random.seed(100)
# new array with much fewer stars
star_ind_new = np.random.choice(star_ind, replace=False, size=int(len(star_ind)/80.0))
X_new = np.vstack((X[galaxy_ind], X[star_ind_new]))
y_new = np.hstack((y[galaxy_ind], y[star_ind_new]))
print(len(y_new[y_new == b"GALAXY"]))
print(len(y_new[y_new == b"STAR"]))
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score
X_train2, X_test2, y_train2, y_test2 = train_test_split(X_new, y_new,
test_size = 0.3,
random_state = 20)
C_all = [0.0001, 10000]
for C in C_all:
lr = LogisticRegression(penalty='l2', C=C)
lr.fit(X_train2, y_train2)
y_pred = lr.predict(X_test2)
print("The accuracy score for C = %i is %.4f"%(C, accuracy_score(y_test2, y_pred)))
cm = confusion_matrix(y_test2, y_pred, labels=np.unique(y))
print(cm)
for C in C_all:
lr = LogisticRegression(penalty='l2', C=C)
lr.fit(X_train2, y_train2)
y_pred = lr.predict(X_test2)
print("The accuracy score for C = %i is %.4f"%(C, accuracy_score(y_test2, y_pred)))
print("The F1 score for C = %.5f is %.4f"%(C, f1_score(y_test2, y_pred,
pos_label=b"STAR",
average="binary")))
cm = confusion_matrix(y_test2, y_pred, labels=np.unique(y))
print(cm)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: First, we're going to need some data. We'll work with the star-galaxy data from the first session. This uses the astroquery package and then queries the top 10000 observations from SDSS (see this exercise for more details)
Step3: Exercise 1
Step4: Exercise 2c
Step5: It looks like the scores are very similar, and have very small variance between the different cross validation instances. It can be useful to do this kind of representation to see for example whether there is a large variance in the cross-validation results.
Step6: Exercise 5
Step7: Note
Step8: It looks like n_components=6 works best.
Step9: Looks like PCA does slightly better as a dimensionality reduction method.
Step10: Answer 1
Step13: Answer 3
Step16: Here are the important things about writing transformer objects for use in scikit-learn
Step17: Now let's make a feature set that combines this feature with the PCA features
Step18: Now we can build the pipeline
Step19: Choosing The Right Scoring Function
Step20: We have now made a really imbalanced data set with many galaxies and only a few stars
Step21: Exercise 10
Step22: Exercise 11
|
87 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
df = pd.read_excel('https://github.com/chris1610/pbpython/blob/master/data/sample-salesv3.xlsx?raw=true')
df.dtypes
df['date'] = pd.to_datetime(df['date'])
df.head()
df.dtypes
df[df["account number"]==307599].head()
df[df["quantity"] > 22].head()
df[df["sku"].map(lambda x: x.startswith('B1'))].head()
df[df["sku"].map(lambda x: x.startswith('B1')) & (df["quantity"] > 22)].head()
df[df["account number"].isin([714466,218895])].head()
df.query('name == ["Kulas Inc","Barton LLC"]').head()
df = df.sort_values(by='date')
df.head()
df[df['date'] >='20140905'].head()
df[df['date'] >='2014-03'].head()
df[(df['date'] >='20140701') & (df['date'] <= '20140715')].head()
df[df['date'] >= 'Oct-2014'].head()
df[df['date'] >= '10-10-2014'].head()
df2 = df.set_index(['date'])
df2.head()
df2["20140101":"20140201"].head()
df2["2014-Jan-1":"2014-Feb-1"].head()
df2["2014-Jan-1":"2014-Feb-1"].tail()
df2["2014"].head()
df2["2014-Dec"].head()
df[df['sku'].str.contains('B1')].head()
df[(df['sku'].str.contains('B1-531')) & (df['quantity']>40)].sort_values(by=['quantity','name'],ascending=[0,1])
df["name"].unique()
df.drop_duplicates(subset=["account number","name"]).head()
df.drop_duplicates(subset=["account number","name"]).iloc[:,[0,1]]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in the Excel data that represents a year's worth of sales.
Step2: Take a quick look at the data types to make sure everything came through as expected.
Step3: You'll notice that our date column is showing up as a generic object. We are going to convert it to datetime object to make some selections a little easier.
Step4: The date is now a datetime object which will be useful in future steps.
Step5: You could also do the filtering based on numeric values.
Step6: If we want to do more complex filtering, we can use map to filter. In this example, let's look for items with sku's that start with B1.
Step7: It's easy to chain two statements together using the &.
Step8: Another useful function that pandas supports is called isin. It allows us to define a list of values we want to look for.
Step9: Pandas supports another function called query which allows you to efficiently select subsets of data. It does require the installation of numexpr so make sure you have it installed before trying this step.
Step10: The query function allows you do more than just this simple example but for the purposes of this discussion, I'm showing it so you are aware that it is out there for you.
Step11: The python filtering syntax shown before works with dates.
Step12: One of the really nice features of pandas is that it understands dates so will allow us to do partial filtering. If we want to only look for data more recent than a specific month, we can do so.
Step13: Of course, you can chain the criteria.
Step14: Because pandas understands date columns, you can express the date value in multiple formats and it will give you the results you expect.
Step15: When working with time series data, if we convert the data to use the date as at the index, we can do some more filtering.
Step16: We can slice the data to get a range.
Step17: Once again, we can use various date representations to remove any ambiguity around date naming conventions.
Step18: Additional String Functions
Step19: We can string queries together and use sort to control how the data is ordered.
Step20: Bonus Task
Step21: If we wanted to include the account number, we could use drop_duplicates.
Step22: We are obviously pulling in more data than we need and getting some non-useful information, so select only the first and second columns using ix.
|
88 | <ASSISTANT_TASK:>
Python Code:
import pandas_datareader as pdr
import pandas as pd
import statsmodels.api as sm
from statsmodels.regression.rolling import RollingOLS
import matplotlib.pyplot as plt
import seaborn
seaborn.set_style('darkgrid')
pd.plotting.register_matplotlib_converters()
%matplotlib inline
factors = pdr.get_data_famafrench('F-F_Research_Data_Factors', start='1-1-1926')[0]
print(factors.head())
industries = pdr.get_data_famafrench('10_Industry_Portfolios', start='1-1-1926')[0]
print(industries.head())
endog = industries.HiTec - factors.RF.values
exog = sm.add_constant(factors['Mkt-RF'])
rols = RollingOLS(endog, exog, window=60)
rres = rols.fit()
params = rres.params
print(params.head())
print(params.tail())
fig = rres.plot_recursive_coefficient(variables=['Mkt-RF'], figsize=(14,6))
exog_vars = ['Mkt-RF', 'SMB', 'HML']
exog = sm.add_constant(factors[exog_vars])
rols = RollingOLS(endog, exog, window=60)
rres = rols.fit()
fig = rres.plot_recursive_coefficient(variables=exog_vars, figsize=(14,18))
joined = pd.concat([factors, industries], axis=1)
joined['Mkt_RF'] = joined['Mkt-RF']
mod = RollingOLS.from_formula('HiTec ~ Mkt_RF + SMB + HML', data=joined, window=60)
rres = mod.fit()
print(rres.params.tail())
%timeit rols.fit()
%timeit rols.fit(params_only=True)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: pandas-datareader is used to download data from
Step2: The first model estimated is a rolling version of the CAPM that regresses
Step3: We next plot the market loading along with a 95% point-wise confidence interval.
Step4: Next, the model is expanded to include all three factors, the excess market, the size factor
Step5: Formulas
Step6: RollingWLS
|
89 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Let's grab some libraries to help us manipulate symbolic equations
from __future__ import print_function
from __future__ import division
import numpy as np
import sympy
from sympy import symbols, sin, cos, pi, simplify
def makeT(a, alpha, d, theta):
# create a modified DH homogenious matrix
return np.array([
[ cos(theta), -sin(theta), 0, a],
[sin(theta)*cos(alpha), cos(theta)*cos(alpha), -sin(alpha), -d*sin(alpha)],
[sin(theta)*sin(alpha), cos(theta)*sin(alpha), cos(alpha), d*cos(alpha)],
[ 0, 0, 0, 1]
])
def simplifyT(tt):
This goes through each element of a matrix and tries to simplify it.
for i, row in enumerate(tt):
for j, col in enumerate(row):
tt[i,j] = simplify(col)
return tt
# craig puma
t1,t2,t3,t4,t5,t6 = symbols('t1 t2 t3 t4 t5 t6')
a2, a3, d3, d4 = symbols('a2 a3 d3 d4')
T1 = makeT(0,0,0,t1)
T2 = makeT(0,-pi/2,0,t2)
T3 = makeT(a2,0,d3,t3)
T4 = makeT(a3,-pi/2,d4,t4)
T5 = makeT(0,pi/2,0,t5)
T6 = makeT(0,-pi/2,0,t6)
ans = np.eye(4)
for T in [T1, T2, T3, T4, T5, T6]:
ans = ans.dot(T)
print(ans)
ans = simplifyT(ans)
print(ans)
print('position x: {}'.format(ans[0,3]))
print('position y: {}'.format(ans[1,3]))
print('position z: {}'.format(ans[2,3]))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Puma Example
Step2: Puma
|
90 | <ASSISTANT_TASK:>
Python Code:
import os
import requests
from bs4 import BeautifulSoup
import re
import json
import time
import praw
import dominate
from dominate.tags import *
from time import gmtime, strftime
#import nose
#import unittest
import numpy as np
import pandas as pd
from pandas import *
from PIL import Image
from pprint import pprint
#import pyttsx
import shutil
import getpass
hosnam = getpass.getuser()
gtsdrndir = ('/home/' + hosnam + '/getsdrawndotcom/')
gtsdrndir
if os.path.isdir(gtsdrndir) == True:
print 'its true'
else:
print 'its false'
os.mkdir(gtsdrndir)
os.chdir(gtsdrndir)
r = praw.Reddit(user_agent='getsdrawndotcom')
#getmin = r.get_redditor('itwillbemine')
#mincom = getmin.get_comments()
#engine = pyttsx.init()
#engine.say('The quick brown fox jumped over the lazy dog.')
#engine.runAndWait()
#shtweet = []
#for mi in mincom:
# print mi
# shtweet.append(mi)
bodycom = []
bodyicv = dict()
#beginz = pyttsx.init()
#for shtz in shtweet:
# print shtz.downs
# print shtz.ups
# print shtz.body
# print shtz.replies
#beginz.say(shtz.author)
#beginz.say(shtz.body)
#beginz.runAndWait()
# bodycom.append(shtz.body)
#bodyic
#bodycom
getnewr = r.get_subreddit('redditgetsdrawn')
rdnew = getnewr.get_new()
lisrgc = []
lisauth = []
for uz in rdnew:
#print uz
lisrgc.append(uz)
gtdrndic = dict()
imgdir = (gtsdrndir + 'imgs')
imgdir
if os.path.isdir(imgdir) == True:
print 'its true'
else:
print 'its false'
os.mkdir(imgdir)
artlist = os.listdir(imgdir)
from time import time
yearz = strftime("%y", gmtime())
monthz = strftime("%m", gmtime())
dayz = strftime("%d", gmtime())
#strftime("%y %m %d", gmtime())
yrzpat = (imgdir + yearz)
monzpath = (yrzpat + '/' + monthz)
dayzpath = (monzpath + '/' + dayz)
rmgzdays = (dayzpath + '/reference')
imgzdays = (dayzpath + '/art')
metzdays = (dayzpath + '/meta')
repathz = (imgdir + '/' + yearz + '/' + monthz + '/' + dayz + '/')
repathz
imgzdays
repathz
def ospacheck():
if os.path.isdir(imgdir + yearz) == True:
print 'its true'
else:
print 'its false'
os.mkdir(imgdir + yearz)
ospacheck()
#if os.path.isdir(imgzdir + yearz) == True:
# print 'its true'
#else:
# print 'its false'
# os.mkdir(imgzdir + yearz)
lizmon = ['monzpath', 'dayzpath', 'imgzdays', 'rmgzdays', 'metzdays']
if os.path.isdir(monzpath) == True:
print 'its true'
else:
print 'its false'
#os.mkdir('/home/wcmckee/getsdrawndotcom/' + monzpath)
if os.path.isdir(dayzpath) == True:
print 'its true'
else:
print 'its false'
os.mkdir(dayzpath)
if os.path.isdir(imgzdays) == True:
print 'its true'
else:
print 'its false'
os.mkdir(imgzdays)
if os.path.isdir(rmgzdays) == True:
print 'its true'
else:
print 'its false'
os.mkdir(rmgzdays)
if os.path.isdir(metzdays) == True:
print 'its true'
else:
print 'its false'
os.mkdir(metzdays)
#for liz in lizmon:
# if os.path.isdir(liz) == True:
## print 'its true'
# else:
# print 'its false'
# os.mkdir(liz)
fullhom = ('/home/wcmckee/getsdrawndotcom/')
#artlist
httpad = ('http://getsdrawn.com/imgs')
#im = Image.new("RGB", (512, 512), "white")
#im.save(file + ".thumbnail", "JPEG")
rmgzdays = (dayzpath + '/reference')
imgzdays = (dayzpath + '/art')
metzdays = (dayzpath + '/meta')
os.chdir(metzdays)
metadict = dict()
for lisz in lisrgc:
metadict.update({'up': lisz.ups})
metadict.update({'down': lisz.downs})
metadict.update({'title': lisz.title})
metadict.update({'created': lisz.created})
#metadict.update({'createdutc': lisz.created_utc})
#print lisz.ups
#print lisz.downs
#print lisz.created
#print lisz.comments
metadict
for lisr in lisrgc:
gtdrndic.update({'title': lisr.title})
lisauth.append(str(lisr.author))
for osliz in os.listdir(metzdays):
with open(str(lisr.author) + '.meta', "w") as f:
rstrin = lisr.title.encode('ascii', 'ignore').decode('ascii')
#print matdict
#metadict = dict()
#for lisz in lisrgc:
# metadict.update({'up': lisz.ups})
# metadict.update({'down': lisz.downs})
# metadict.update({'title': lisz.title})
# metadict.update({'created': lisz.created})
f.write(rstrin)
#matdict
#os.listdir(dayzpath)
#for lisa in lisauth:
# #print lisa + '-line.png'
# im = Image.new("RGB", (512, 512), "white")
# im.save(lisa + '-line.png')
# im = Image.new("RGB", (512, 512), "white")
# im.save(lisa + '-bw.png')
#print lisa + '-bw.png'
# im = Image.new("RGB", (512, 512), "white")
# im.save(lisa + '-colour.png')
#print lisa + '-colour.png'
os.listdir('/home/wcmckee/getsdrawndotcom/imgs')
#lisauth
#lisr.author
namlis = []
opsinz = open('/home/wcmckee/visignsys/index.meta', 'r')
panz = opsinz.read()
os.chdir(rmgzdays)
from imgurpython import ImgurClient
opps = open('/home/wcmckee/ps.txt', 'r')
opzs = open('/home/wcmckee/ps2.txt', 'r')
oprd = opps.read()
opzrd = opzs.read()
client = ImgurClient(oprd, opzrd)
# Example request
#items = client.gallery()
#for item in items:
# print(item.link)
#itz = client.get_album_images()
galim = client.get_image('SBaV275')
galim.size
gelim = client.get_album_images('LTDJ9')
gelim
from urlparse import urlparse
linklis = []
pathlis = []
for rdz in lisrgc:
if 'http://imgur.com/' in rdz.url:
print rdz.url
parsed = urlparse(rdz.url)
print parsed.path.strip('/')
pathlis.append(parsed.path.strip('/'))
#for pared in parsed.path:
# print pared.strip('/')
#itgar = client.gallery_item(parsed.path.strip('/'))
#itz = client.get_album_images(parsed.path.strip('a/'))
# reimg = requests.get(rdz.url)
## retxt = reimg.text
# souptxt = BeautifulSoup(''.join(retxt))
# soupurz = souptxt.findAll('img')
# for soupuz in soupurz:
# imgurl = soupuz['src']
# print imgurl
# linklis.append(imgurl)
#try:
# imzdata = requests.get(imgurl)
pathlis
noalis = []
for pathl in pathlis:
if 'a/' in pathl:
print 'a found'
else:
noalis.append(pathl)
#if 'a/' in pathlis:
# print 'a found'
#else:
# noalis.append(pathlis)
for noaz in noalis:
print noaz
#itgar = client.gallery_item()
linklis
if '.jpg' in linklis:
print 'yes'
else:
print 'no'
#panz()
for rdz in lisrgc:
(rdz.title)
#a(rdz.url)
if 'http://i.imgur.com' in rdz.url:
#print rdz.url
print (rdz.url)
url = rdz.url
response = requests.get(url, stream=True)
with open(str(rdz.author) + '-reference.png', 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
apsize = []
aptype = []
basewidth = 600
imgdict = dict()
for rmglis in os.listdir(rmgzdays):
#print rmglis
im = Image.open(rmglis)
#print im.size
imgdict.update({rmglis : im.size})
#im.thumbnail(size, Image.ANTIALIAS)
#im.save(file + ".thumbnail", "JPEG")
apsize.append(im.size)
aptype.append(rmglis)
#for imdva in imgdict.values():
#print imdva
#for deva in imdva:
#print deva
# if deva < 1000:
# print 'omg less than 1000'
# else:
# print 'omg more than 1000'
# print deva / 2
#print imgdict.values
# Needs to update imgdict.values with this new number. Must halve height also.
#basewidth = 300
#img = Image.open('somepic.jpg')
#wpercent = (basewidth/float(img.size[0]))
#hsize = int((float(img.size[1])*float(wpercent)))
#img = img.resize((basewidth,hsize), PIL.Image.ANTIALIAS)
#img.save('sompic.jpg')
#os.chdir(metzdays)
#for numz in apsize:
# print numz[0]
# if numz[0] > 800:
# print ('greater than 800')
# else:
# print ('less than 800!')
reliz = []
for refls in os.listdir(rmgzdays):
#print rmgzdays + refls
reliz.append(rmgzdays + '/' + refls)
reliz
aptype
opad = open('/home/wcmckee/ad.html', 'r')
opred = opad.read()
str2 = opred.replace("\n", "")
str2
doc = dominate.document(title='GetsDrawn')
with doc.head:
link(rel='stylesheet', href='style.css')
script(type ='text/javascript', src='script.js')
str(str2)
with div():
attr(cls='header')
h1('GetsDrawn')
p(img('imgs/getsdrawn-bw.png', src='imgs/getsdrawn-bw.png'))
#p(img('imgs/15/01/02/ReptileLover82-reference.png', src= 'imgs/15/01/02/ReptileLover82-reference.png'))
h1('Updated ', strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
p(panz)
p(bodycom)
with doc:
with div(id='body').add(ol()):
for rdz in reliz:
#h1(rdz.title)
#a(rdz.url)
#p(img(rdz, src='%s' % rdz))
#print rdz
p(img(rdz, src = rdz))
p(rdz)
#print rdz.url
#if '.jpg' in rdz.url:
# img(rdz.urlz)
#else:
# a(rdz.urlz)
#h1(str(rdz.author))
#li(img(i.lower(), src='%s' % i))
with div():
attr(cls='body')
p('GetsDrawn is open source')
a('https://github.com/getsdrawn/getsdrawndotcom')
a('https://reddit.com/r/redditgetsdrawn')
#print doc
docre = doc.render()
#s = docre.decode('ascii', 'ignore')
yourstring = docre.encode('ascii', 'ignore').decode('ascii')
indfil = ('/home/wcmckee/getsdrawndotcom/index.html')
mkind = open(indfil, 'w')
mkind.write(yourstring)
mkind.close()
#os.system('scp -r /home/wcmckee/getsdrawndotcom/ wcmckee@getsdrawn.com:/home/wcmckee/getsdrawndotcom')
#rsync -azP source destination
#updatehtm = raw_input('Update index? Y/n')
#updateref = raw_input('Update reference? Y/n')
#if 'y' or '' in updatehtm:
# os.system('scp -r /home/wcmckee/getsdrawndotcom/index.html wcmckee@getsdrawn.com:/home/wcmckee/getsdrawndotcom/index.html')
#elif 'n' in updatehtm:
# print 'not uploading'
#if 'y' or '' in updateref:
# os.system('rsync -azP /home/wcmckee/getsdrawndotcom/ wcmckee@getsdrawn.com:/home/wcmckee/getsdrawndotcom/')
os.system('scp -r /home/wcmckee/getsdrawndotcom/index.html wcmckee@getsdrawn.com:/home/wcmckee/getsdrawndotcom/index.html')
#os.system('scp -r /home/wcmckee/getsdrawndotcom/style.css wcmckee@getsdrawn.com:/home/wcmckee/getsdrawndotcom/style.css')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Something is wrong with the script and it's no longer creating these dir in the correct folder. How did this break?
Step2: if i save the data to the file how am i going to get it to update as the post is archieved. Such as up and down votes.
Step3: Need to save json object.
Step4: I have it creating a meta folder and creating/writing username.meta files. It wrote 'test' in each folder, but now it writes the photo author title of post.. the username/image data. It should be writing more than author title - maybe upvotes/downvotes, subreddit, time published etc.
Step5: Instead of creating these white images, why not download the art replies of the reference photo.
Step6: I want to save the list of usernames that submit images as png files in a dir.
Step7: Filter the non jpeg/png links. Need to perform request or imgur api to get the jpeg/png files from the link. Hey maybe bs4?
Step8: I need to get the image ids from each url. Strip the http
|
91 | <ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
from IPython.core.display import HTML
from __future__ import print_function, division
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
Image(url= "https://cdn-images-1.medium.com/max/1600/1*UkI9za9zTR-HL8uM15Wmzw.png")
#hyperparams
num_epochs = 100
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
num_classes = 2
echo_step = 3
batch_size = 5
num_batches = total_series_length//batch_size//truncated_backprop_length
#Step 1 - Collect data
#Now generate the training data,
#the input is basically a random binary vector. The output will be the
#“echo” of the input, shifted echo_step steps to the right.
#Notice the reshaping of the data into a matrix with batch_size rows.
#Neural networks are trained by approximating the gradient of loss function
#with respect to the neuron-weights, by looking at only a small subset of the data,
#also known as a mini-batch.The reshaping takes the whole dataset and puts it into
#a matrix, that later will be sliced up into these mini-batches.
def generateData():
#0,1, 50K samples, 50% chance each chosen
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
#shift 3 steps to the left
y = np.roll(x, echo_step)
#padd beginning 3 values with 0
y[0:echo_step] = 0
#Gives a new shape to an array without changing its data.
#The reshaping takes the whole dataset and puts it into a matrix,
#that later will be sliced up into these mini-batches.
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
data = generateData()
print(data)
#Schematic of the reshaped data-matrix, arrow curves shows adjacent time-steps that ended up on different rows.
#Light-gray rectangle represent a “zero” and dark-gray a “one”.
Image(url= "https://cdn-images-1.medium.com/max/1600/1*aFtwuFsboLV8z5PkEzNLXA.png")
#TensorFlow works by first building up a computational graph, that
#specifies what operations will be done. The input and output of this graph
#is typically multidimensional arrays, also known as tensors.
#The graph, or parts of it can then be executed iteratively in a
#session, this can either be done on the CPU, GPU or even a resource
#on a remote server.
#operations and tensors
#The two basic TensorFlow data-structures that will be used in this
#example are placeholders and variables. On each run the batch data
#is fed to the placeholders, which are “starting nodes” of the
#computational graph. Also the RNN-state is supplied in a placeholder,
#which is saved from the output of the previous run.
#Step 2 - Build the Model
#datatype, shape (5, 15) 2D array or matrix, batch size shape for later
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
batchY_placeholder = tf.placeholder(tf.int32, [batch_size, truncated_backprop_length])
#and one for the RNN state, 5,4
init_state = tf.placeholder(tf.float32, [batch_size, state_size])
#The weights and biases of the network are declared as TensorFlow variables,
#which makes them persistent across runs and enables them to be updated
#incrementally for each batch.
#3 layer recurrent net, one hidden state
#randomly initialize weights
W = tf.Variable(np.random.rand(state_size+1, state_size), dtype=tf.float32)
#anchor, improves convergance, matrix of 0s
b = tf.Variable(np.zeros((1,state_size)), dtype=tf.float32)
W2 = tf.Variable(np.random.rand(state_size, num_classes),dtype=tf.float32)
b2 = tf.Variable(np.zeros((1,num_classes)), dtype=tf.float32)
Image(url= "https://cdn-images-1.medium.com/max/1600/1*n45uYnAfTDrBvG87J-poCA.jpeg")
#Now it’s time to build the part of the graph that resembles the actual RNN computation,
#first we want to split the batch data into adjacent time-steps.
# Unpack columns
#Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.
#so a bunch of arrays, 1 batch per time step
inputs_series = tf.unpack(batchX_placeholder, axis=1)
labels_series = tf.unpack(batchY_placeholder, axis=1)
Image(url= "https://cdn-images-1.medium.com/max/1600/1*f2iL4zOkBUBGOpVE7kyajg.png")
#Schematic of the current batch split into columns, the order index is shown on each data-point
#and arrows show adjacent time-steps.
#Forward pass
#state placeholder
current_state = init_state
#series of states through time
states_series = []
#for each set of inputs
#forward pass through the network to get new state value
#store all states in memory
for current_input in inputs_series:
#format input
current_input = tf.reshape(current_input, [batch_size, 1])
#mix both state and input data
input_and_state_concatenated = tf.concat(1, [current_input, current_state]) # Increasing number of columns
#perform matrix multiplication between weights and input, add bias
#squash with a nonlinearity, for probabiolity value
next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b) # Broadcasted addition
#store the state in memory
states_series.append(next_state)
#set current state to next one
current_state = next_state
Image(url= "https://cdn-images-1.medium.com/max/1600/1*fdwNNJ5UOE3Sx0R_Cyfmyg.png")
#calculate loss
#second part of forward pass
#logits short for logistic transform
logits_series = [tf.matmul(state, W2) + b2 for state in states_series] #Broadcasted addition
#apply softmax nonlinearity for output probability
predictions_series = [tf.nn.softmax(logits) for logits in logits_series]
#measure loss, calculate softmax again on logits, then compute cross entropy
#measures the difference between two probability distributions
#this will return A Tensor of the same shape as labels and of the same type as logits
#with the softmax cross entropy loss.
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels) for logits, labels in zip(logits_series,labels_series)]
#computes average, one value
total_loss = tf.reduce_mean(losses)
#use adagrad to minimize with .3 learning rate
#minimize it with adagrad, not SGD
#One downside of SGD is that it is sensitive to
#the learning rate hyper-parameter. When the data are sparse and features have
#different frequencies, a single learning rate for every weight update can have
#exponential regret.
#Some features can be extremely useful and informative to an optimization problem but
#they may not show up in most of the training instances or data. If, when they do show up,
#they are weighted equally in terms of learning rate as a feature that has shown up hundreds
#of times we are practically saying that the influence of such features means nothing in the
#overall optimization. it's impact per step in the stochastic gradient descent will be so small
#that it can practically be discounted). To counter this, AdaGrad makes it such that features
#that are more sparse in the data have a higher learning rate which translates into a larger
#update for that feature
#sparse features can be very useful.
#Each feature has a different learning rate which is adaptable.
#gives voice to the little guy who matters a lot
#weights that receive high gradients will have their effective learning rate reduced,
#while weights that receive small or infrequent updates will have their effective learning rate increased.
#great paper http://seed.ucsd.edu/mediawiki/images/6/6a/Adagrad.pdf
train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)
#visualizer
def plot(loss_list, predictions_series, batchX, batchY):
plt.subplot(2, 3, 1)
plt.cla()
plt.plot(loss_list)
for batch_series_idx in range(5):
one_hot_output_series = np.array(predictions_series)[:, batch_series_idx, :]
single_output_series = np.array([(1 if out[0] < 0.5 else 0) for out in one_hot_output_series])
plt.subplot(2, 3, batch_series_idx + 2)
plt.cla()
plt.axis([0, truncated_backprop_length, 0, 2])
left_offset = range(truncated_backprop_length)
plt.bar(left_offset, batchX[batch_series_idx, :], width=1, color="blue")
plt.bar(left_offset, batchY[batch_series_idx, :] * 0.5, width=1, color="red")
plt.bar(left_offset, single_output_series * 0.3, width=1, color="green")
plt.draw()
plt.pause(0.0001)
#Step 3 Training the network
with tf.Session() as sess:
#we stupidly have to do this everytime, it should just know
#that we initialized these vars. v2 guys, v2..
sess.run(tf.initialize_all_variables())
#interactive mode
plt.ion()
#initialize the figure
plt.figure()
#show the graph
plt.show()
#to show the loss decrease
loss_list = []
for epoch_idx in range(num_epochs):
#generate data at eveery epoch, batches run in epochs
x,y = generateData()
#initialize an empty hidden state
_current_state = np.zeros((batch_size, state_size))
print("New data, epoch", epoch_idx)
#each batch
for batch_idx in range(num_batches):
#starting and ending point per batch
#since weights reoccuer at every layer through time
#These layers will not be unrolled to the beginning of time,
#that would be too computationally expensive, and are therefore truncated
#at a limited number of time-steps
start_idx = batch_idx * truncated_backprop_length
end_idx = start_idx + truncated_backprop_length
batchX = x[:,start_idx:end_idx]
batchY = y[:,start_idx:end_idx]
#run the computation graph, give it the values
#we calculated earlier
_total_loss, _train_step, _current_state, _predictions_series = sess.run(
[total_loss, train_step, current_state, predictions_series],
feed_dict={
batchX_placeholder:batchX,
batchY_placeholder:batchY,
init_state:_current_state
})
loss_list.append(_total_loss)
if batch_idx%100 == 0:
print("Step",batch_idx, "Loss", _total_loss)
plot(loss_list, _predictions_series, batchX, batchY)
plt.ioff()
plt.show()
Image(url= "https://cdn-images-1.medium.com/max/1600/1*uKuUKp_m55zAPCzaIemucA.png")
Image(url= "https://cdn-images-1.medium.com/max/1600/1*ytquMdmGMJo0-3kxMCi1Gg.png")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The figure below shows the input data-matrix, and the current batch batchX_placeholder
Step2: As you can see in the picture below that is done by unpacking the columns (axis = 1) of the batch into a Python list. The RNN will simultaneously be training on different parts in the time-series; steps 4 to 6, 16 to 18 and 28 to 30 in the current batch-example. The reason for using the variable names “plural”_”series” is to emphasize that the variable is a list that represent a time-series with multiple entries at each step.
Step3: The fact that the training is done on three places simultaneously in our time-series, requires us to save three instances of states when propagating forward. That has already been accounted for, as you see that the init_state placeholder has batch_size rows.
Step4: Notice the concatenation on line 6, what we actually want to do is calculate the sum of two affine transforms current_input * Wa + current_state * Wb in the figure below. By concatenating those two tensors you will only use one matrix multiplication. The addition of the bias b is broadcasted on all samples in the batch.
Step5: You may wonder the variable name truncated_backprop_length is supposed to mean. When a RNN is trained, it is actually treated as a deep neural network with reoccurring weights in every layer. These layers will not be unrolled to the beginning of time, that would be too computationally expensive, and are therefore truncated at a limited number of time-steps. In our sample schematics above, the error is backpropagated three steps in our batch
Step6: The last line is adding the training functionality, TensorFlow will perform back-propagation for us automatically — the computation graph is executed once for each mini-batch and the network-weights are updated incrementally.
Step7: There is a visualization function so we can se what’s going on in the network as we train. It will plot the loss over the time, show training input, training output and the current predictions by the network on different sample series in a training batch.
Step8: You can see that we are moving truncated_backprop_length steps forward on each iteration (line 15–19), but it is possible have different strides. This subject is further elaborated in this article. The downside with doing this is that truncated_backprop_length need to be significantly larger than the time dependencies (three steps in our case) in order to encapsulate the relevant training data. Otherwise there might a lot of “misses”, as you can see on the figure below.
Step9: Time series of squares, the elevated black square symbolizes an echo-output, which is activated three steps from the echo input (black square). The sliding batch window is also striding three steps at each run, which in our sample case means that no batch will encapsulate the dependency, so it can not train.
|
92 | <ASSISTANT_TASK:>
Python Code:
workDir = '/home/nick/notebook/SIPSim/dev/fullCyc_trim/'
emp_data = 'SIP-core_unk_trm'
emp_data_preFrac = 'bulk-core_trm'
import os
import sys
%load_ext rpy2.ipython
%load_ext pushnote
if not os.path.isdir(workDir):
os.makedirs(workDir)
%cd $workDir
!/home/nick/notebook/SIPSim/lib/R/shannon_calc.r -l \
$emp_data > SIP-core_unk_shan.txt
!echo; head SIP-core_unk_shan.txt
!/home/nick/notebook/SIPSim/lib/R/BD_span_calc.r -l \
$emp_data $emp_data_preFrac > SIP-core_unk_trm_BD-span.txt
!echo; head SIP-core_unk_trm_BD-span.txt
!/home/nick/notebook/SIPSim/lib/R/correlogram_make.r -l \
$emp_data > SIP-core_unk_trm_corr.txt
!echo; head SIP-core_unk_trm_corr.txt
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Init
Step2: Making a table of shannon index for each fraction community
Step3: Making a table of variance in BD spans
Step4: Making a community ~ BD correlogram
|
93 | <ASSISTANT_TASK:>
Python Code:
print "Hello world"
s="Hello world"
print s
print s.upper()
print s.replace("o","O")
2
-7897
3.4
-7213.6241
2.66e-23
'Ovo je niz znakova.'
"Ovo je isto niz znakova."
"Ovo je 'niz znakova' u kojem se nalazi 'kombinacija' navodnika."
''
""
'3.14'
3.14
'Ovo je niz.'[0]
niz='Ovo je niz.'
niz[0]
niz[4:6]
niz[4:]
niz[:6]
niz[-1]
niz[7:-1]
niz[7:-5]
print 'Prvi red niza.\nDrugi red niza.'
3<7
4.5>9
broj1=9
print broj1
broj2=-16.85
print broj2
niz1='Ovo je niz'
print niz1
prazan_niz1=''
print prazan_niz1
prazan_niz2=""
print prazan_niz2
lista=[]
print lista
rjecnik={}
print rjecnik
var=None
print var
a=20
b=10
print a+b
a=20
b=10
print a-b+3
a=20
b=10
print a*b
a=20
b=10
print a/b
c=7
d=2
print c/d
e=7
f=2.0
print e/f
c=7
d=2
print float(c)/d
print c/float(d)
c=7
d=2
print c%d
b=10
d=2
print b**2
a=20
b=10
print a==b
c=7
print c==7
a=20
b=10
print a!=b
c=7
print c!=7
a=20
b=10
print a>b
print b>a
a=20
b=10
print a<b
print b<a
a=20
b=10
print a>=b
print b>=a
c=7
print c>=7
a=20
b=10
print a<=b
print b<=a
c=7
print c<=7
c=7
print c
a=20
b=10
a+=b
print a
print b
a=20
c=7
a-=c
print a
print c
a=20
b=10
a*=b
print a
print b
a=20
b=10
a/=b
print a
print b
c=7
d=2
c%=d
print c
print d
c=7
d=2
c**=d
print c
print d
'a' in 'Niz znakova.'
'y' in "Niz znakova."
'a' not in 'Niz znakova.'
'y' not in 'Niz znakova.'
2*5+2>=2**3*2
int(3.7)
int('3')
print float(2)
print 7/2
print float(7)/2
print 7/float(2)
print float(7/2)
str(7.6)
type('niz')
type(2)
type(5.8)
type(False)
print 'dar'+'ovi'
niz1='Ana i Ivo'
niz2='Iva i Jan'
print niz1[:3]+niz2[-3:]
niz1='Ana i Ivo'
niz2='Iva i Jan'
print niz1[:6]+niz2[-3:]
niz1='Ana i Ivo'
niz2='Iva i Jan'
print niz1[:3]+' i '+niz2[-3:]
ana=2000
luka=1995
print 'Ana je mlađa od Luke '+str(ana-luka)+' godina.'
s='Ana i Ivo'
print s.upper()
print s
s="Ana i Ivo"
print s.lower()
print s
niz='ananas, banana, jabuka, jagoda, mandarina'
print niz.replace('a','A')
print niz.replace('a','A',5)
print niz
niz='ananas, banana, jabuka, jagoda, mandarina'
print niz.count('an')
print niz.count('an',5)
print niz.count('an',5,10)
niz='ananas, banana, jabuka, jagoda, mandarina'
print niz.find('an')
print niz.find('an',2)
print niz.find('an',10,40)
print niz.find('ga')
niz=' ananas, banana, jabuka, jagoda, mandarina '
print niz
print niz.strip()
print niz.strip(' an')
print niz.strip('g')
niz='ananas, banana, jabuka, jagoda, mandarina'
print niz.split()
print niz.split('ja')
print niz.split('ja',1)
niz='ananas, banana, jabuka, jagoda, mandarina'
lista=niz.split(', ')
print lista
print '-'.join(lista)
print ['banana',5,98.95]
lista1=['banana',5,98.95]
lista2=[3,1,5,4,2]
lista3=['a','b','c','d','d']
print lista1
print lista2
print lista3
prazna_lista=[]
print prazna_lista
lista=['banana',5,98.95]
print lista[0]
print lista[-1]
print lista[:2]
print lista[-2:]
print lista[-2:1]
lista4=['banana',5,98.95,[1,'dva',2]]
print lista4[0][-1]
print lista4[-1][1][0]
lista1=['banana',5,98.95]
lista2=[3,1,5,4,2]
print lista1+lista2
lista1=['banana',5,98.95]
lista2=[3,1,5,4,2]
lista3=['a','b','c','d','d']
lista4=['banana',5,98.95,[1,'dva',2]]
print len(lista1)
print len(lista2)
print len(lista3)
print len(lista4)
print len(lista4[-1])
lista=['banana',5,98.95]
lista[0]='jabuka'
print lista
lista=['banana',5,98.95]
lista.append('tekst')
print lista
lista=['banana',5,98.95]
lista.insert(2,'tekst')
print lista
lista5=['banana',5,98.95,'tekst',5]
lista5.remove(5)
print lista5
lista5=['banana',5,98.95,'tekst',5]
del lista5[-1]
print lista5
a=3
if a>5:
print 'Broj je veći od 5.'
b=10
if b>5:
print 'Broj je veći od 5.'
a=3
if a%2==0:
print 'Broj je paran.'
else:
print 'Broj je neparan.'
b=10
if b%2==0:
print 'Broj je paran.'
else:
print 'Broj je neparan.'
c=6
if c%2==0:
if c<10:
c*=3
else:
c*=5
print c
d=11
if d%2==0:
if d<10:
d*=3
else:
d*=5
print d
c=6
if c%2==0 and c<10:
c*=3
else:
c*=5
print c
d=11
if d%2==0 and c<10:
d*=3
else:
d*=5
print d
e=10
if e%2==0 or e%3==0:
e+=4
else:
e-=8
print e
f=5
if f%2==0 or f%3==0:
f+=4
else:
f-=8
print f
g=25
if g%2==0:
print 'Broj je djeljiv s 2.'
elif g%3==0:
print 'Broj je djeljiv s 3.'
elif g%5==0:
print 'Broj je djeljiv s 5.'
elif g%7==0:
print 'Broj je djeljiv sa 7.'
else:
print 'Broj nije djeljiv s 2, 3, 5 ni 7.'
h=6
if h%2==0:
print 'Broj je djeljiv s 2.'
elif h%3==0:
print 'Broj je djeljiv s 3.'
elif h%5==0:
print 'Broj je djeljiv s 5.'
elif h%7==0:
print 'Broj je djeljiv sa 7.'
else:
print 'Broj nije djeljiv s 2, 3, 5 ni 7.'
i=11
if i%2==0:
print 'Broj je djeljiv s 2.'
elif i%3==0:
print 'Broj je djeljiv s 3.'
elif i%5==0:
print 'Broj je djeljiv s 5.'
elif i%7==0:
print 'Broj je djeljiv sa 7.'
else:
print 'Broj nije djeljiv s 2, 3, 5 ni 7.'
for znak in 'neki niz':
print 'Pomoćna varijabla "znak" poprima vrijednost: '+znak
for znak in 'neki niz':
if znak in 'aeiou':
print znak
broj_samoglasnika=0
niz="Ovo je niz znakova."
for znak in niz:
if znak in 'aeiou':
broj_samoglasnika+=1
print broj_samoglasnika
broj_samoglasnika=0
niz="Ovo je niz znakova."
for znak in niz.lower():
if znak in 'aeiou':
broj_samoglasnika+=1
print broj_samoglasnika
samoglasnici=''
niz="Ovo je niz znakova."
for znak in niz.lower():
if znak in 'aeiou':
samoglasnici+=znak
print samoglasnici
for element in [1, 2, 'tri', [4,'pet'], 6.0]:
print element
for element in [1, 2, 'tri', [4,'pet'], 6.0]:
print 'Element je sljedećeg tipa podatka: '+ str(type(element))
for element in ['jedan', 'dva', 'tri', 'cetiri', 'pet']:
if len(element)>3:
print element
for element in ['jedan', 'dva', 'tri', 'cetiri', 'pet']:
print element[0]
duljina_znakova=0
lista=['jedan', 'dva', 'tri', 'cetiri']
for element in lista:
duljina_znakova+=len(element)
print float(duljina_znakova)/len(lista)
print duljina_znakova/len(lista)
i=0
lista=[1,2,3,4,5]
for element in lista:
i+=element
print i
i=0
lista=[1,2,3,4,5]
for element in lista:
i+=element
print i
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ime varijable je s, a vrijednost varijable je Hello world. Navedeno ime varijable s navedenom vrijednosti te varijable je instanca klase.
Step2: Cijeli brojevi mogu se prikazati od -9 223 372 036 854 775 808 do 9 223 372 036 854 775 807. U slučaju da postoji potreba za brojevima izvan navedenog raspona, mogu se pohraniti u cijele brojeve u dugom zapisu s oznakom L. U tom slučaju je ograničenje broja jedino količina radne memorije računala.
Step3: Realni brojevi mogu se prikazati od 2,2250738585072014e-308 do 1,7976931348623157e+308. Kad je rezultat operacije nad brojevima prevelik za prikaz u Pythonu, prikaže se vrijednost inf. Slično tome, kad je negativan broj manji od najmanjeg broja koji se može prikazati u Pythonu, prikaže se vrijednost 0.0.
Step4: Moguće je navesti prazan niz znakova navođenjem dva jednostruka ili dvostruka navodnika za redom. Vrijednost toga niza znakova je prazan niz duljine 1
Step5: Python razlikuje znamenke pohranjene kao nizove znakova
Step6: kao, u ovom slučaju realnu vrijednost
Step7: 1.5.3.1. Indeksiranje znakova
Step8: Ako se niz pohrani u varijablu, može se indeksirati preko naziva varijable
Step9: Indeksirati se može i više znakova navodeći raspon indeksa tako da se navede početni indeks od kojeg se kreće, dvotočje te završni indeks do kojeg završava podniz
Step10: Ako se navodeći raspon indeksa izostavi završni indeks u Pythonu, program će naredbu obraditi kao da je umjesto izostavljenog indeksa naveden posljednji indeks niza
Step11: Također, ako se navodeći raspon indeksa izostavi početni indeks, program će naredbu obraditi kao da je umjesto izostavljenog indeksa naveden prvi indeks niza
Step12: Moguće je indeksirati i posljednji znak negativnom cjelobrojnom vrijednosti indeksa
Step13: Kod indeksiranja je moguće kombinirati pozitivne i negativne vrijednosti indeksa, dok god se početni indeks nalazi na indeksnom mjestu prije završnog indeksa. U suprotnom program vraća prazan niz.
Step14: 1.5.3.2. Posebni znakovi
Step15: Osim posebnog znaka za prelazak u novi red, u Pythonu postoji još nekoliko korisnih nizova znakova posebnog značenja, koji se prikazuju pomoću obrnute kose crte (\)
Step16: Zadatke možete naći ovdje
Step17: Realna vrijednost -16.85 pohranjuje se u varijablu nazvanu broj2 na sljedeći način
Step18: Na sljedeći način se niz znakova Ovo je niz pohranjuje u varijablu nazvanu niz1
Step19: Varijabla se može deklarirati i navođenjem praznine, čime je definiran tip podatka varijable, a trenutni sadržaj vrijednosti je prazan. Niz znakova se deklarira navođenjem dva jednostruka ili dvostruka navodnika za redom. Tako inicijaliziramo varijablu čija vrijednost je prazna, a tip podataka je niz znakova
Step20: ili
Step21: Dva tipa podataka koja su kasnije obrađena, su lista i rječnik, koji se također mogu deklarirati odnosno inicijalizirati bez sadržavanja vrijednosti.
Step22: Ako ne znamo koju vrijednost i tip podataka će varijabla sadržavati, deklarirat ćemo je konstantom None
Step23: 1.7. Operatori
Step24: 1.7.1.2. Operator oduzimanja
Step25: 1.7.1.3. Operator množenja
Step26: 1.7.1.4. Operator dijeljenja
Step27: Realno dijeljenje se vrši na dva načina. Jedan način je da jedan operand ima cjelobrojnu vrijednost.
Step28: Drugi način je korištenje funkcije float() koja pretvara cijeli broj u realni.
Step29: 1.7.1.5. Operator ostatka cjelobrojnog dijeljenja
Step30: 1.7.1.6. Eksponent
Step31: 1.7.2. Operatori uspoređivanja
Step32: 1.7.2.2. Operator !=
Step33: 1.7.2.3. Operator >
Step34: 1.7.2.4 Operator <
Step35: 1.7.2.5. Operator >=
Step36: 1.7.2.6. Operator <=
Step37: 1.7.3. Operatori pridruživanja
Step38: 1.7.3.2. Operator +=
Step39: 1.7.3.3. Operator -=
Step40: 1.7.3.4. Operator *=
Step41: 1.7.3.5. Operator /=
Step42: 1.7.3.6. Operator %=
Step43: 1.7.3.7. Operator **=
Step44: 1.7.4. Operatori članstva
Step45: 1.7.4.2. Operator not in
Step46: 1.7.5. Složeni izrazi
Step47: U tom slučaju je potrebno poznavati kojim redoslijedom će se prvo izvoditi koji operator, odnosno prioritete operatora. Slijedi lista prioriteta obrađenih operatora od najvišeg do najnižeg
Step48: 1.8.2. Funkcija float()
Step49: Prvo je potrebno djeljenik ili djelitelj prebaciti iz cjelobrojne vrijednosti u realni tip podatka, a zatim izvršiti dijeljenje, ako želimo dobiti razlomački dio. Iz posljednjeg primjera vidljivo je da se količnik pretvara u realni tip podatka nakon cjelobrojnog dijeljenja, a on će u tom slučaju uvijek iznositi 0.
Step50: 1.8.4. Funkcija type()
Step51: 1.9. Rad s nizovima znakova
Step52: Konkatenacija vrši samo spajanje nizova znakova. U slučaju da želimo iz prethodnog primjera dodati razmak, veznik i razmak, potrebno je to izričito navesti.
Step53: ili
Step54: Kada se želi izvršiti ispis nizova znakova i brojčanih vrijednosti, prethodno je potrebno prebaciti brojčane vrijednosti u niz znakova funkcijom str.
Step55: 1.9.2. Odabrane metode nad nizovima znakova
Step56: 1.9.2.2. Metoda lower()
Step57: 1.9.2.3. Metoda replace()
Step58: 1.9.2.4. Metoda count()
Step59: 1.9.2.5. Metoda find()
Step60: 1.9.2.6. Metoda strip()
Step61: 1.9.2.7. Metoda split()
Step62: 1.9.2.8. Metoda join()
Step63: Zadatke možete naći ovdje
Step64: Vrijednost liste dodjeljujemo varijabli operatorom pridruživanja =.
Step65: Listu možemo inicijalizirati bez sadržavanja vrijednosti
Step66: Liste je moguće indeksirati, kao i nizove znakova
Step67: Dodatno je kod lista moguće indeksirati unutar elemenata liste onih tipova podataka koji podržavaju indeksiranje (npr. nizovi znakova i liste) na sljedeći način
Step68: Nad listama je moguće vršiti konkatenaciju na sljedeći način
Step69: Funkcija len() računa duljinu liste, odnosno vraća cjelobrojnu vrijednost broja elemenata u listi.
Step70: Liste su promjenjivi tipovi podataka, tako da je moguće vrijednost postojećeg elementa liste promijeniti indeksiranjem tog elementa i dodjeljivanjem nove vrijednosti
Step71: 1.10.1. Odabrane metode nad listama
Step72: 1.10.1.2. Metoda insert()
Step73: 1.10.1.3. Metoda remove()
Step74: Osim ovom metodom, elemente liste moguće je brisati putem naredbe del i indeksa elementa
Step75: Zadatke možete naći ovdje
Step76: Kod provjere s brojem 3, logički uvjet nije ispunjen (tj. rezultat logičkog izraza je FALSE), jer 2 nije veći od 5 te se nije ispisao zadani tekst. Kod provjere s brojem 10, logički uvjet je ispunjen (tj. rezultat logičkog izraza je TRUE) te se ispisao zadani tekst.
Step77: U ovom primjeru provjeravamo je li ostatak dijeljenja s brojem 2 jednak 0 (if a%2==0). Ako je uvjet ispunjen (tj. rezultat logičkog izraza je TRUE), ispisat će se "Broj je paran." (print 'Broj je paran.'). Za sve ostale slučajeve (else), ispisat će se "Broj je neparan" (print 'Broj je neparan.).
Step78: U ovom primjeru provjeravamo je li broj djeljiv s 2. Ako je taj uvjet ispunjen, provjeravamo je li broj manji od 10. Ako je i taj uvjet ispunjen, broj množimo s 3. Ako broj nije djeljiv s 2, broj množimo s 5.
Step79: U ovom primjeru provjeravamo je li broj djeljiv s 2 i (logički operator and) je li broj manji od 10. Tek kada se oba uvjeta ispune, broj množimo s 3. Ako uvjet nije ispunjen, broj množimo s 5.
Step80: U ovom primjeru provjeravamo je li broj djeljiv s 2 ili (logički operator or) je li broj djeljiv s 3. Samo je jedan uvjet potrebno izvršiti kako bi broju zbrojili 4. Ako nije ispunjen ni jedan od uvjeta, oduzima se broj 8.
Step81: 1.11.2. for petlja
Step82: Iz primjera je vidljivo da pomoćna varijabla znak poprima vrijednosti svakog znaka neki niz kroz koji iteriramo, od prvog do posljednjeg. U prvoj iteraciji poprima vrijednost n, u drugoj iteraciji poprima vrijednost e, u trećoj iteraciji poprima vrijednost k i tako sve do posljednjeg znaka niza kroz koji iteriramo. Vrši se onoliko iteracija koliko ima znakova u nizu. Naziv pomoćne varijable znak je proizvoljan.
Step83: U ovom primjeru iteriramo kroz neki niz. U prvoj iteraciji znak poprima vrijednost n. Zatim se provjerava nalazi li se ta vrijednost u nizu 'aeiou'. Upotrebljavamo operator članstva in koji ispituje nalazi li se vrijednost pomoćne varijable u navedenom nizu znakova te vraća logički tip podatka. Budući da se n ne nalazi, prva iteracija for petlje završava i kreće druga iteracija for petlje. U drugoj iteraciji znak poprima vrijednost e. Zatim se provjerava nalazi li se ta vrijednost u nizu 'aeiou'. Budući da je uvjet ispunjen, ispisuje se trenutna vrijednost pomoćne varijable znak, što je u ovom slučaju e. Ovdje završava druga iteracija i kreće treća iteracija for petlje, kada znak poprima vrijednost k. Zatim se provjerava nalazi li se ta vrijednost u nizu aeiou. Budući da se k ne nalazi, završava treća iteracija petlje i kreće četvrta iteracija. U četvrtoj iteraciji znak poprima vrijednost i. Zatim se provjerava nalazi li se ta vrijednost u nizu aeiou. Budući da je uvjet ispunjen, ispisuje se trenutna vrijednost pomoćne varijable znak. Ovdje završava četvrta iteracija te kreće peta iteracija for petlje. I tako sve do kraja zadnjeg znaka u nizu neki niz. Vrši se i dalje onoliko iteracija petlje koliko ima znakova u nizu, samo zbog dodatnog uvjeta ispisujemo jedino one znakove koji ispunjavaju uvjet.
Step84: Varijabla niz sadrži 7, a ne 6 samoglasnika. Logički uvjet u if naredbi provjerava nalazi li se pomoćna varijabla znak u nizu aeiou. Budući da je logički uvjet ispunjen samo kada znak poprimi vrijednost samoglasnika pisanog malim slovima, ovako postavljen uvjet preskače prvi znak u varijabli niz odnosno veliko slovo O. Problem se rješava trikom tako da se niz pretvori u mala slova metodom lower() na sljedeći način
Step85: U novi niz samoglasnici možemo pohraniti sve samoglasnike iz zadanog niza. Prvo je potrebno inicijalizirati prazan niz, a nakon toga iterirati kroz niz, provjeriti je li trenutni znak samoglasnik te ga pridružiti nizu samoglasnici ako je logički uvjet ispunjen.
Step86: 1.11.2.2. Iteracija kroz listu
Step87: U ovom primjeru iteriramo kroz listu [1, 2, 'tri', [4,'pet'], 6.0]. U prvoj iteraciji pomoćna varijabla element poprima vrijednost 1. U drugoj iteraciji element poprima vrijednost 2. U trećoj iteraciji element poprima vrijednost 'tri'. U četvrtoj iteraciji element poprima vrijednost [4, 'pet']. U petoj i posljednjoj iteraciji poprima vrijednost posljednjeg elementa liste 6.0.
Step88: Iz primjera je vidljivo da su prva dva elementa liste cjelobrojne vrijednosti, treći element je niz znakova, četvrti element je lista, a peti element je realni tip podatka.
Step89: U ovom primjeru iteriramo kroz listu ['jedan', 'dva', 'tri', 'četiri', 'pet'] i provjeravamo je li dužina elementa liste veća od 3. Ako je veća, onda ispisuje element. U prvoj iteraciji pomoćna varijabla element poprima vrijednost 'jedan'. Zatim se provjerava je li dužina trenutne vrijednosti pomoćne varijable veća od 3. Budući da je jedan ima više od 3 znaka, uvjet je ispunjen i trenutna vrijednost pomoćne varijable element se ispisuje. Kreće druga iteracija u kojoj element poprima vrijednost 'dva'. Zatim se provjerava je li dužina trenutne vrijednosti pomoćne varijable veća od 3. Budući da dva nema više od tri znaka (ima upravo tri znaka, ali ne više od tri znaka), uvjet nije ispunjen te tu završava druga iteracija i kreće treća iteracija. I tako sve do posljednjeg elementa liste.
Step90: Možemo izračunati prosječnu duljinu znakova elemenata liste.
Step91: U ovom primjeru prvo smo varijablu u koju ćemo spremati duljinu znakova sveli na vrijednost 0. Zatim smo u varijablu lista pohranili listu kroz koju ćemo iterirati. Za svaki element liste računali smo duljinu elementa (len(element)) i vrijednost dodavali u varijabli duljina_znakova. U prvoj iteraciji duljina_znakova poprimila je vrijednost 5, jer jedan ima 5 znakova. U drugoj iteraciji duljina_znakova poprimila je vrijednost 8, jer dva ima još 3 znaka, a 5+3 jednako je 8. U trećoj iteraciji duljina_znakova poprimila je vrijednost 11, jer tri ima još 3 znaka, a 5+3+3 jednako je 11. U četvrtoj iteraciji duljina_znakova poprimila je vrijednost 17, jer cetiri ima još 6 znakova, a 5+3+3+6 jednako je 17. Kada je završila iteracija, cjelobrojnu vrijednost varijable duljina_znakova pretvorili smo u realni tip podatka te podijelili s dužinom liste (tj. brojem elemenata u listi). Budući da su vrijednost varijable duljina_znakova i vrijednost izraza len(lista) cjelobrojni tipovi podataka, potrebno je jednu od vrijednosti pretvoriti u realni tip podatka, jer ćemo nećemo dobiti razlomački dio.
Step92: Moguće je pratiti koje vrijednosti poprima brojač i u svakoj iteraciji, tako da se ispiše vrijednost i unutar for petlje
|
94 | <ASSISTANT_TASK:>
Python Code:
import requests #to handle http requests to the API
from psycopg2 import connect
stationid = 3
#We'll find out the full range of possible stations further down.
lineid = 1
#[1,2,4]
# The url for the request
base_url = "http://www.ttc.ca/Subway/loadNtas.action"
# Our query parameters for this API request
payload = {#"subwayLine":lineid,
"stationId":stationid,
"searchCriteria":''} #The value in the search box
#it has to be included otherwise the query fails
#"_":request_epoch} #Great job naming variables...
# subwayLine and _ are redundant variables.
# We thought we could query historical data using the "_" parameter
# But it seems no
r = requests.get(base_url, params = payload)
r.json()
data = r.json()
data['ntasData'][0]['createDate']
#Testing whether have to be explicit about line numbers for stations with multiple lines
payload = {#"subwayLine":lineid,
"stationId":10, #St. George, Line 1
"searchCriteria":''}
r = requests.get(base_url, params = payload)
r.json()
#Testing whether have to be explicit about line numbers for stations with multiple lines
payload = {#"subwayLine":lineid,
"stationId":48, #St. George, Line 2
"searchCriteria":''}
r = requests.get(base_url, params = payload)
r.json()
data = r.json()
data['ntasData'][0]['createDate'].replace('T',' ')
lines = {1: range(1, 33), #max value must be 1 greater
2: range(33, 64), # west to east (Kipling onwards)
3: range(64, 68)} # also west to east (Sheppard onwards)
def get_API_response(*args):
baseurl = "http://www.ttc.ca/Subway/loadNtas.action"
if len(args) > 1:
line_id = args[0]
station_id = args[2]
payload = {"subwayLine":line_id,
"stationId":station_id,
"searchCriteria":''}
else:
station_id = args[0]
payload = {"stationId":station_id,
"searchCriteria":''}
r = requests.get(baseurl, params = payload)
return r.json()
def insert_request_info(con, data, line_id, station_id):
request_row = {}
request_row['data_'] = data['data']
request_row['stationid'] = station_id
request_row['lineid'] = line_id
request_row['all_stations'] = data['allStations']
request_row['create_date'] = data['ntasData'][0]['createDate'].replace( 'T', ' ')
cursor = con.cursor()
cursor.execute("INSERT INTO public.requests(data_, stationid, lineid, all_stations, create_date)"
"VALUES(%(data_)s, %(stationid)s, %(lineid)s, %(all_stations)s, %(create_date)s)"
"RETURNING requestid", request_row)
request_id = cursor.fetchone()[0]
con.commit()
return request_id
def insert_ntas_data(con, ntas_data, request_id):
cursor = con.cursor()
sql = INSERT INTO public.ntas_data(
requestid, id, station_char, subwayline, system_message_type,
timint, traindirection, trainid, train_message)
VALUES (%(requestid)s, %(id)s, %(station_char)s, %(subwayline)s, %(system_message_type)s,
%(timint)s, %(traindirection)s, %(trainid)s, %(train_message)s);
for record in ntas_data:
record_row ={}
record_row['requestid'] = request_id
record_row['id'] = record['id']
record_row['station_char'] = record['stationId']
record_row['subwayline'] = record['subwayLine']
record_row['system_message_type'] = record['systemMessageType']
record_row['timint'] = record['timeInt']
record_row['traindirection'] = record['trainDirection']
record_row['trainid'] = record['trainId']
record_row['train_message'] = record['trainMessage']
cursor.execute(sql, record_row)
con.commit()
cursor.close()
def query_stations(con, lines):
data = {}
for line_id, stations in lines.items():
for station_id in stations:
data = get_API_response(station_id)
request_id = insert_request_info(con, data, line_id, station_id)
insert_ntas_data(con, data['ntasData'], request_id)
return data, request_id
dbsettings = {'database':'ttc',
'user':'postgres'}
# 'host':'localhost'}
con = connect(database = dbsettings['database'],
user = dbsettings['user'])
#host = dbsettings['host'])
data = query_stations(con, lines) # be patient, this command can take a while to complete
data
lines = {1: [3]}# station_id = 3 (St. George), line_id = 1 (YUS)
data, request_id = query_stations(con, lines)
data
cursor = con.cursor()
cursor.execute('''SELECT timint FROM ntas_data WHERE requestid = ''' + str(request_id) + ''' limit 10''')
rows = cursor.fetchall()
print(rows)
import numpy
print(numpy.mean(rows)) # Average (expected) wait time at St. George. Note this is not the true wait time.
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: So now we've just received our first request from the API and the response is stored in the requests object r. From previous examination of the API we know that the response to an API request is in JSON format. So the below code will pretty print out the response so we can have a look at the variables.
Step3: Building a scraping script
Step4: Database schema
Step5: Querying data from database
|
95 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.preprocessing import PolynomialFeatures
estimators = [('reduce_dim', PCA()), ('poly', PolynomialFeatures()), ('svm', SVC())]
clf = Pipeline(estimators)
clf.steps.pop(-1)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
96 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncar', 'sandbox-3', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
97 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
from IPython.html import widgets
from IPython.display import Image
assert True # leave this to grade the import statements
Image(url='http://talklikeaphysicist.com/wp-content/uploads/2008/05/tesla-coil.jpg',embed=True, width=600, height=600)
assert True # leave this to grade the image display
%%html
<table>
<tr>Quarks
<th>Name</th>
<th>Symbol</th>
<th>Antiparticle</th>
<th>Charge ($e$)</th>
<th>Mass($MeV/c^2$)
</tr>
<tr>
<td>up</td>
<td>$u$</td>
<td>$\bar{u}$</td>
<td>$+\frac{2}{3}$</td>
<td>1.5-3.3</td>
</tr>
<tr>
<td>down</td>
<td>$d$</td>
<td>$\bar{d}$</td>
<td>$-\frac{1}{3}$</td>
<td>3.5-6.0</td>
</tr>
<tr>
<td>charm</td>
<td>$c$</td>
<td>$\bar{c}$</td>
<td>$+\frac{2}{3}$</td>
<td>1,160-1,340</td>
</tr>
<tr>
<td>strange</td>
<td>$s$</td>
<td>$\bar{s}$</td>
<td>$-\frac{1}{3}$</td>
<td>70-130</td>
</tr>
<tr>
<td>top</td>
<td>$t$</td>
<td>$\bar{t}$</td>
<td>$+\frac{2}{3}$</td>
<td>169,100-173,300</td>
</tr>
<tr>
<td>bottom</td>
<td>$b$</td>
<td>$\bar{b}$</td>
<td>$-\frac{1}{3}$</td>
<td>4,130-4,370</td>
</tr>
</table>
assert True # leave this here to grade the quark table
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic rich display
Step2: Use the HTML object to display HTML in the notebook that reproduces the table of Quarks on this page. This will require you to learn about how to create HTML tables and then pass that to the HTML object for display. Don't worry about styling and formatting the table, but you should use LaTeX where appropriate.
|
98 | <ASSISTANT_TASK:>
Python Code:
s3_client = boto3.client('s3')
resource = boto3.resource('s3')
# Disable signing for anonymous requests to public bucket
resource.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
def file_list(client, bucket, prefix=''):
paginator = client.get_paginator('list_objects')
for result in client.list_objects(Bucket=bucket, Prefix=prefix, Delimiter='/')['Contents']:
yield result.get('Key')
gen_s3_files = list(file_list(s3_client, 'nexrad-etl', prefix='test-aug3/'))
for i, f in enumerate(gen_s3_files):
s3_client.download_file('nexrad-etl',f,'test-aug3/nexrad{}.csv'.format(i))
folder_files = os.listdir(os.path.join(os.getcwd(), 'test-aug3'))
nexrad_df_list = list()
for f in folder_files:
if f.endswith('.csv'):
try:
nexrad_df_list.append(pd.read_csv('test-aug3/{}'.format(f)))
except:
#print(f)
pass
print(len(nexrad_df_list))
merged_nexrad = pd.concat(nexrad_df_list)
merged_nexrad['timestamp'] = pd.to_datetime(((merged_nexrad['timestamp'] / 1000) - (5*3600*1000)), unit='ms')
#merged_nexrad['timestamp'] = pd.to_datetime(merged_nexrad['timestamp'] / 1000, unit='ms')
merged_nexrad = merged_nexrad.set_index(pd.DatetimeIndex(merged_nexrad['timestamp']))
merged_nexrad = merged_nexrad.sort_values('timestamp')
merged_nexrad = merged_nexrad.fillna(0.0)
# Get diff between previous two reads
merged_nexrad['diff'] = merged_nexrad['timestamp'].diff()
merged_nexrad = merged_nexrad[1:]
print(merged_nexrad.shape)
merged_nexrad.index.min()
merged_nexrad['diff'] = (merged_nexrad['diff'] / np.timedelta64(1, 'm')).astype(float) / 60
merged_nexrad.head()
aug_day_ohare = merged_nexrad['2016-08-12'][['timestamp','60666','diff']]
aug_day_ohare.head()
aug_day_ohare['60666'] = (aug_day_ohare['60666']*aug_day_ohare['diff'])/25.4
aug_day_ohare.head()
# Checking against Weather Underground read for O'Hare on this day
print(aug_day_ohare['60666'].sum())
aug_day_ohare['60666'].plot()
wunderground = pd.read_csv('test-aug3/aug-12.csv')
wunderground['PrecipitationIn'] = wunderground['PrecipitationIn'].fillna(0.0)
wunderground['TimeCDT'] = pd.to_datetime(wunderground['TimeCDT'])
wunderground = wunderground.set_index(pd.DatetimeIndex(wunderground['TimeCDT']))
wund_hour = wunderground['PrecipitationIn'].resample('1H').max()
print(wund_hour.sum())
wund_hour.plot()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NEXRAD at O'Hare Zip 60666
Step2: Wunderground
|
99 | <ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
from pylab import *
from __future__ import print_function
import sys, os
from ipywidgets import interact,interact_manual, Image
from ptha_paths import data_dir, events_dir
# Read in topography data:
fixed_grid_file = os.path.join(data_dir, 'MapsTopo', 'fixedgrid_xyB_small.npy')
d=load(fixed_grid_file)
x=d[:,0]
y=d[:,1]
B=d[:,2]
topo = reshape(B, (250,250), order='F')
X = reshape(x, (250,250), order='F')
Y = reshape(y, (250,250), order='F')
def plot_topo():
fig = figure(figsize=(6,6))
ax = axes()
topo_clines = arange(0,20,2)
contour(X,Y,topo,topo_clines,colors='k')
CClatitude = 41.75 # to rescale longitude
ax.set_aspect(1. / cos(pi*CClatitude/180.))
ax.ticklabel_format(format='plain',useOffset=False)
return fig
CCmap = imread('%s/MapsTopo/CCimage.png' % data_dir)
extent = (235.79781, 235.82087, 41.739671,41.762726) #small region
def plot_CCmap():
fig = figure(figsize=(6,6))
ax = axes()
imshow(CCmap,extent=extent)
CClatitude = 41.75 # to rescale longitude
ax.set_aspect(1. / cos(pi*CClatitude/180.))
ax.ticklabel_format(format='plain',useOffset=False)
axis(extent)
return fig
# these levels were used in original study:
#zeta = hstack((arange(0,2.,.1), arange(2.0,12.5,.5)))
# you get nicer looking curves by using a denser set of exceedance values:
zeta = linspace(0,12,121)
nzeta = len(zeta)
print('%i exceedance values, \nzeta = %s' % (nzeta,zeta))
all_events = ['AASZa', 'AASZb', 'AASZc', 'AASZd', 'CSZa', 'CSZb', 'CSZc', 'CSZd', 'CSZe', \
'CSZf', 'KmSZa', 'KrSZa', 'SChSZa', 'TOHa']
event_prob = {}
event_prob['AASZa'] = 1./394.
event_prob['AASZb'] = 1./750.
event_prob['AASZc'] = 1./563.
event_prob['AASZd'] = 1./324.
event_prob['CSZa'] = 1./250. * .0125
event_prob['CSZb'] = 1./250. * .0125
event_prob['CSZc'] = 1./250. * .0750
event_prob['CSZd'] = 1./250. * .5000
event_prob['CSZe'] = 1./250. * .1750
event_prob['CSZf'] = 1./250. * .2250
event_prob['KmSZa'] = 1./50.
event_prob['KrSZa'] = 1./167.
event_prob['SChSZa'] = 1./300.
event_prob['TOHa'] = 1./103.
print("Annual probability of each event is set to:")
print(event_prob)
def combine_prob(p1,p2):
Returns the probability that event 1 or 2 happens
return 1. - (1-p1)*(1-p2)
events = all_events
# Instead, to use a subset of the events, specify a list such as:
#events = ['AASZa', 'AASZb', 'AASZc']
nx, ny = X.shape # note that X is a 2d array of longitude values at each point
exceed_prob = zeros((nx,ny,nzeta)) # initialize to zero
# loop over all events and update exceed_prob at each grid point by combining
# current value with the probability Pk of this event:
for event in events:
event_dir = os.path.join(events_dir, event)
hmax_file = os.path.join(event_dir, 'h_eta_small.npy')
hmax = load(hmax_file)
Hmax = hmax.reshape((nx,ny),order='F')
for k in range(nzeta):
Pk = exceed_prob[:,:,k] # probabilities at all points for one exceedance value zeta_k
exceed_prob[:,:,k] = where(Hmax > zeta[k], combine_prob(event_prob[event],Pk), Pk)
print("Computed exceedance probabilities. \nMaximum over all grid points is %g" % exceed_prob.max())
dx = X[1,0] - X[0,0]
dy = Y[0,1] - Y[0,0]
nx, ny = X.shape
xmin = X.min(); xmax = X.max()
ymin = Y.min(); ymax = Y.max()
def plot_hcurve(longitude, latitude):
i = int(round((longitude - X[0,0]) / dx))
j = int(round((latitude - Y[0,0]) / dy))
if (i<0) or (i>=nx) or (j<0) or (j>=ny):
print("out of domain")
return
fig = figure(figsize=(10,5))
subplot(1,2,1)
p = maximum(exceed_prob[i,j,:], 1e-10)
semilogy(zeta, p, 'b')
ylim(1e-5,1)
xlabel('zeta in meters')
ylabel('annual probability')
title('Hazard Curve')
# Also plot the CC image with a red dot showing the location:
ax = subplot(1,2,2)
imshow(CCmap,extent=extent)
CClatitude = 41.75 # to rescale longitude
ax.set_aspect(1. / cos(pi*CClatitude/180.))
ax.ticklabel_format(format='plain',useOffset=False)
xticks(rotation=20)
plot([longitude], [latitude], 'ro')
xlim(xmin,xmax)
ylim(ymin,ymax)
title('Location')
#return fig
fig = plot_hcurve(235.805, 41.75)
dx = X[1,0] - X[0,0]
dy = Y[0,1] - Y[0,0]
nx, ny = X.shape
xmin = X.min(); xmax = X.max()
ymin = Y.min(); ymax = Y.max()
xmean = X.mean()
ymean = Y.mean()
def p_haz_curve(longitude,latitude):
i = int(round((longitude - X[0,0]) / dx))
j = int(round((latitude - Y[0,0]) / dy))
if (i<0) or (i>=nx) or (j<0) or (j>=ny):
print("out of domain")
p = maximum(exceed_prob[i,j,:], 1e-10)
return p
fig = figure(figsize=(10,5))
ax1 = subplot(1,2,1)
p = p_haz_curve(xmean, ymean)
line = semilogy(zeta, p, 'b')
ylim(1e-5,1)
xlabel('zeta in meters')
ylabel('annual probability')
title('Hazard Curve')
# Also plot the CC image with a red dot showing the location:
ax2 = subplot(1,2,2)
imshow(CCmap,extent=extent)
CClatitude = 41.75 # to rescale longitude
ax2.set_aspect(1. / cos(pi*CClatitude/180.))
ax2.ticklabel_format(format='plain',useOffset=False)
xticks(rotation=20)
point = ax2.plot([xmean], [ymean], 'ro')
xlim(xmin,xmax)
ylim(ymin,ymax)
title("Click on a point below")
def onclick(event):
point[0].set_data([[event.xdata],[event.ydata]])
line[0].set_data([zeta, p_haz_curve(event.xdata, event.ydata)])
ax1.set_title("Hazard curve at \nx = %6.3f, y = %6.3f" % (event.xdata, event.ydata))
draw()
cid = fig.canvas.mpl_connect('button_press_event', onclick)
interact_manual(plot_hcurve, longitude=(xmin,xmax,.001),latitude=(ymin,ymax,0.001));
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read in the topography data and define a function to make a contour plot
Step2: Read in image of Crescent City as background for plots
Step3: Set the exceedance values
Step4: Set the desired annual probability for each event
Step6: Define a function to combine two events
Step7: Specify the set of events to include in computing hazard curves
Step8: Compute the combined probability of exceeding each exceedance value
Step9: Plot hazard curves
Step10: Plot the hazard curve for one location
Step11: Version that allows clicking on a point
Step12: Interactive viewer to move the point around
|