Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
---|---|---|
500 | <ASSISTANT_TASK:>
Python Code:
from google.cloud import storage
import pandas as pd
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data import random_split
# Public bucket holding the census data
bucket = storage.Client().bucket('cloud-samples-data')
# Path to the data inside the public bucket
blob = bucket.blob('ml-engine/sonar/sonar.all-data')
# Download the data
blob.download_to_filename('sonar.all-data')
# Define the dataset to be used by PyTorch
class SonarDataset(Dataset):
def __init__(self, csv_file):
self.dataframe = pd.read_csv(csv_file, header=None)
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
# When iterating through the dataset get the features and targets
features = self.dataframe.iloc[idx, :-1].values.astype(dtype='float64')
# Convert the targets to binary values:
# R = rock --> 0
# M = mine --> 1
target = self.dataframe.iloc[idx, -1:].values
if target[0] == 'R':
target[0] = 0
elif target[0] == 'M':
target[0] = 1
target = target.astype(dtype='float64')
# Load the data as a tensor
data = {'features': torch.from_numpy(features),
'target': target}
return data
# Load the data
sonar_dataset = SonarDataset('./sonar.all-data')
# Create indices for the split
dataset_size = len(sonar_dataset)
test_size = int(0.2 * dataset_size) # Use a test_split of 0.2
train_size = dataset_size - test_size
# Split the dataset
train_dataset, test_dataset = random_split(sonar_dataset,
[train_size, test_size])
# Create our Dataloaders for training and test data
train_loader = DataLoader(
train_dataset.dataset,
batch_size=4,
shuffle=True)
test_loader = DataLoader(
test_dataset.dataset,
batch_size=4,
shuffle=True)
torch.manual_seed(42)
# Create the Deep Neural Network
class SonarDNN(nn.Module):
def __init__(self):
super(SonarDNN, self).__init__()
self.net = nn.Sequential(
nn.Linear(60, 60),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(60, 30),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(30, 1),
nn.Sigmoid()
)
def forward(self, x):
return self.net(x)
# Create the model
net = SonarDNN().double()
optimizer = optim.SGD(net.parameters(),
lr=0.01,
momentum=0.5,
nesterov=False)
def train(net, train_loader, optimizer, epoch):
Create the training loop
net.train()
criterion = nn.BCELoss()
running_loss = 0.0
for batch_index, data in enumerate(train_loader):
features = data['features']
target = data['target']
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(features)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if batch_index % 6 == 5: # print every 6 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch, batch_index + 1, running_loss / 6))
running_loss = 0.0
def test(net, test_loader):
Test the DNN
isp = False
net.eval()
criterion = nn.BCELoss() # https://pytorch.org/docs/stable/nn.html#bceloss
test_loss = 0
correct = 0
with torch.no_grad():
for i, data in enumerate(test_loader, 0):
features = data['features']
target = data['target']
if not isp:
isp = True
print(features)
print(target)
output = net(features)
# Binarize the output
pred = output.apply_(lambda x: 0.0 if x < 0.5 else 1.0)
test_loss += criterion(output, target) # sum up batch loss
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set:\n\tAverage loss: {:.4f}'.format(test_loss))
print('\tAccuracy: {}/{} ({:.0f}%)\n'.format(
correct,
(len(test_loader) * test_loader.batch_size),
100. * correct / (len(test_loader) * test_loader.batch_size)))
epochs = 10
for epoch in range(1, epochs + 1):
train(net, train_loader, optimizer, epoch)
test(net, test_loader)
torch.save(net.state_dict(), 'model.pth')
! ls -al model.pth
rock_feature = torch.tensor([[3.6800e-02, 4.0300e-02, 3.1700e-02, 2.9300e-02, 8.2000e-02, 1.3420e-01,
1.1610e-01, 6.6300e-02, 1.5500e-02, 5.0600e-02, 9.0600e-02, 2.5450e-01,
1.4640e-01, 1.2720e-01, 1.2230e-01, 1.6690e-01, 1.4240e-01, 1.2850e-01,
1.8570e-01, 1.1360e-01, 2.0690e-01, 2.1900e-02, 2.4000e-01, 2.5470e-01,
2.4000e-02, 1.9230e-01, 4.7530e-01, 7.0030e-01, 6.8250e-01, 6.4430e-01,
7.0630e-01, 5.3730e-01, 6.6010e-01, 8.7080e-01, 9.5180e-01, 9.6050e-01,
7.7120e-01, 6.7720e-01, 6.4310e-01, 6.7200e-01, 6.0350e-01, 5.1550e-01,
3.8020e-01, 2.2780e-01, 1.5220e-01, 8.0100e-02, 8.0400e-02, 7.5200e-02,
5.6600e-02, 1.7500e-02, 5.8000e-03, 9.1000e-03, 1.6000e-02, 1.6000e-02,
8.1000e-03, 7.0000e-03, 1.3500e-02, 6.7000e-03, 7.8000e-03, 6.8000e-03]], dtype=torch.float64)
rock_prediction = net(rock_feature)
mine_feature = torch.tensor([[5.9900e-02, 4.7400e-02, 4.9800e-02, 3.8700e-02, 1.0260e-01, 7.7300e-02,
8.5300e-02, 4.4700e-02, 1.0940e-01, 3.5100e-02, 1.5820e-01, 2.0230e-01,
2.2680e-01, 2.8290e-01, 3.8190e-01, 4.6650e-01, 6.6870e-01, 8.6470e-01,
9.3610e-01, 9.3670e-01, 9.1440e-01, 9.1620e-01, 9.3110e-01, 8.6040e-01,
7.3270e-01, 5.7630e-01, 4.1620e-01, 4.1130e-01, 4.1460e-01, 3.1490e-01,
2.9360e-01, 3.1690e-01, 3.1490e-01, 4.1320e-01, 3.9940e-01, 4.1950e-01,
4.5320e-01, 4.4190e-01, 4.7370e-01, 3.4310e-01, 3.1940e-01, 3.3700e-01,
2.4930e-01, 2.6500e-01, 1.7480e-01, 9.3200e-02, 5.3000e-02, 8.1000e-03,
3.4200e-02, 1.3700e-02, 2.8000e-03, 1.3000e-03, 5.0000e-04, 2.2700e-02,
2.0900e-02, 8.1000e-03, 1.1700e-02, 1.1400e-02, 1.1200e-02, 1.0000e-02]], dtype=torch.float64)
mine_prediction = net(mine_feature)
# Note: Try increasing the number of epochs above to see more accurate results.
print('Result Values: (Rock: 0) - (Mine: 1)')
print('Rock Prediction:\n\t{} - {}'.format('Rock' if rock_prediction <= 0.5 else 'Mine', rock_prediction.item()))
print('Mine Prediction:\n\t{} - {}'.format('Rock' if mine_prediction <= 0.5 else 'Mine', mine_prediction.item()))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Add code to download the data from GCS (in this case, using the publicly hosted data). You will then be able to use the data when training your model.
Step2: Read in the data
Step3: This is where your model code would go. Below is an example model using the census dataset.
Step5: Define the training loop
Step7: Define the testing loop
Step8: Train / Test the model
Step9: Export the trained model
Step10: Run a simple prediction with set values
|
501 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import seaborn as snb
import numpy as np
import matplotlib.pyplot as plt
def create_plot():
x = np.arange(0.0, 10.0, 0.1)
plt.plot(x, x**2)
plt.xlabel("$x$")
plt.ylabel("$y=x^2$")
create_plot()
plt.show()
def save_to_file(filename, fig=None):
Save to @filename with a custom set of file formats.
By default, this function takes to most recent figure,
but a @fig can also be passed to this function as an argument.
formats = [
"pdf",
"eps",
"png",
"pgf",
]
if fig is None:
for form in formats:
plt.savefig("%s.%s"%(filename, form))
else:
for form in formats:
fig.savefig("%s.%s"%(filename, form))
create_plot()
save_to_file("simple_plot")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating a quatratic plot
Step3: Save the figure
Step4: And it can be easily saved with
|
502 | <ASSISTANT_TASK:>
Python Code:
import math
import datetime ## to deal with dates
from IPython.display import Image
# will return True if a year is a leap year on Mars
def is_leap_year_mars(year):
if year % 3000 == 0:
return False
elif year % 1000 == 0:
return True
elif year % 100 == 0:
return False
elif (year % 2 != 0) or (year % 10 == 0):
return True
else:
return False
def total_days(year):
return 668 + is_leap_year_mars(year)
print total_days(10)
print total_days(11)
def month_total_days(year,month):
if is_leap_year_mars(year):
if month == 24:
return 28
else:
return 27 + ((month)%6!=0)
if not is_leap_year_mars(year):
return 27 + ((month)%6!=0)
print month_total_days(1,24)
# returns the month and date given a number of dates.
# calendar_date(a number of days, which year we are calculating)
def calendar_date(days, year):
if days > total_days(year):
return "Error! Given number of days are more than the overall possible days of the year."
else:
for month in range(24):
month = month + 1.0
if month == 24: # the last month is special because it might change with year
return tuple([int(month),int(days)]) # using tuple to avoid modification
elif ((month) < 24) & ( (days) < (27 + 1 + (month%6!=0))): # the condition for days makes sure that days is never larger than days in that month
return tuple([int(month),int(days)])
break
elif ((month) < 24) & ((days) > (27 + (month%6!=0))):
days = days - 27 - ((month)%6!=0)
continue
else:
return "Unhandled in calendar_date(days, year) function! (inner for loop) "
break
print "the 669th day of the tenth year is (month:{}, day:{})".format(calendar_date(669,10)[0],calendar_date(669,10)[1])
print "the 669th day of year one is (month:{}, day:{})".format(calendar_date(669,1)[0],calendar_date(669,1)[1]) # should return error message
print "the 1000th day of year one is (month:{}, day:{})".format(calendar_date(1000,1)[0],calendar_date(1000,1)[1]) # should return error message
print "the 30th day of year one is (month:{}, day:{})".format(calendar_date(30,1)[0],calendar_date(30,1)[1])
calendar_date(1,1)
m2e = 88775.0/86400 # the ratio of mars sol day and earth 24-hour day or we can use 1.027491252
print m2e
mars_sol_day_coor = math.floor(10846/m2e)
print mars_sol_day_coor
calendar_date(534,16)
Image(url="https://raw.githubusercontent.com/InterImm/marsCalendar/master/py/assets/timeCalibration.png")
earth_day_coor = mars_sol_day_coor * m2e
print earth_day_coor
def julian_days(cdate): # Input is calendar date with the list format [year like 2014, month like 07, day like 24, hours like 12, min]
y = cdate[0]
m = cdate[1]
d = cdate[2]
h = cdate[3] + cdate[4]/60.0
return 367.0 * y - math.floor(7.0 * ( y * 1.0 + math.floor( ( m*1.0 + 9.0 )/12.0 ) )/4.0 ) - math.floor(3.0*(math.floor( (y*1.0 + (m*1.0 - 9.0)/7.0 )/100.0 ) + 1.0 )/4.0 ) + math.floor( 275.0 * m / 9.0 ) + d*1.0 + 1721028.5 + h / 24.0 ## Using an equation from http://scienceworld.wolfram.com/astronomy/JulianDate.html
julian_days([1970,1,1,0,0])
julian_coor_earth = julian_days([1970.0,4.0,28.0,21.0,17.0 + 30.0/60]) # 1970.0,4.0,28.0,20.0 + 38.0/60 + 55.0/3600
print julian_coor_earth
def julian_diff(datelist):
return julian_days(datelist) - julian_coor_earth
julian_diff([1970.0,4.0,28.0,21,17])
def mars_days_origin(datelist):
return math.floor(julian_diff(datelist)/m2e)
mars_days_origin([1970.0,4.0,28.0,21,17])
def mars_days_origin_rem(datelist):
return julian_diff(datelist)/m2e - math.floor(julian_diff(datelist)/m2e)
def earth2mars_calendar(datelist):
year = 1
md = mars_days_origin(datelist)
while (md > (668.0 + is_leap_year_mars(year) ) ):
year = year + 1
md = md - 668 - is_leap_year_mars(year)
return [year,calendar_date(md+1,year-1)]
earth2mars_calendar([1970,4,29,20,0])
earth2mars_calendar([1971,4,30,23,23])
earth2mars_calendar([2000,1,6,0,0])
24 * 3600 + 39 * 60 + 35.244
def mday2time(remainder): # input a number smaller than 1.
if remainder > 1:
return "Input should a fraction of a martian day, i.e., a number smaller than 1."
else:
total_sec = 24.0 * 3600 + 39.0 * 60 + 35.244 # total seconds in a martian day
seconds = remainder * total_sec # total seconds of the given input
hour = int(math.floor(seconds / 3600.0)) # hour
min = int(math.floor( (seconds - 3600.0 * hour)/60 ) ) # min
sec = int(seconds - hour * 3600.0 - min * 60.0)
return [hour, min, sec]
mday2time(0.3)
def earth2mars_calendar_time(datelist):
return mday2time(mars_days_origin_rem(datelist))
earth2mars_calendar_time([1971,4,30,23,59])
earth2mars_calendar_time([2000,1,6,0,0])
mars_days_origin_rem([2000,1,6,0,0])
10846/m2e
earth2mars_calendar_time([1970.0,4.0,28.0,20,55])
month_name = ('春分','清明','谷雨','立夏','小满','芒种','夏至','小暑','大暑','立秋','处暑','白露','秋分','寒露','霜降','立冬','小雪','大雪','冬至','小寒','大寒','立春','雨水','惊蛰')
week_name = ('六','日','一','二','三','四','五')
def chinese_character(mars_calendar,mars_time):
calendar_str = "火历"+str(mars_calendar[0])+"年 "+str(month_name[mars_calendar[1][0]-1])+"月"+str(int(mars_calendar[1][1]))+"日"+"星期"+str(week_name[int(mars_calendar[1][1])%7])
if mars_time[0] < 24.0:
if mars_time[2] < 10:
return calendar_str+" "+str(int(mars_time[0]))+":"+str(int(mars_time[1]))+":0"+str(int(mars_time[2]))
else:
return calendar_str+" "+str(int(mars_time[0]))+":"+str(int(mars_time[1]))+":"+str(int(mars_time[2]))
else:
if mars_time[2] < 10:
return calendar_str+" +"+str(int(mars_time[1]))+":0"+str(int(mars_time[2]))
else:
return calendar_str+" +"+str(int(mars_time[1]))+":"+str(int(mars_time[2]))
print chinese_character([16, (20, 4.0)],[24.0, 17.0, 5.240289495923207])
print chinese_character([1, (1, 1.0)],[7.0, 10.0, 15.070938780310826])
print chinese_character(earth2mars_calendar([2014,10,13,23,52]),earth2mars_calendar_time([2014,10,13,23,52]))
earth2mars_calendar_time([2000,1,6,0,0])
earth2mars_calendar_time([2014,10,13,21,1])
test11 = (12.0 + 46.0/60)/24 * (24.0*m2e)
print str(test11) + "h" + "=" + str(int(math.floor(test11))) + ":" + str(int((test11 - math.floor(test11))*60))
earth2mars_calendar_time([2020,1,1,0,0])
test12 = (16.0 + 23.0/60) * m2e
print str(test12) + "h" + "=" + str(int(math.floor(test12))) + ":" + str(int((test12 - math.floor(test12))*60))
def mars242iic(hour):
print str(hour) + "h" + "=" + str(int(math.floor(hour))) + ":" + str(int((hour - math.floor(hour))*60))
mars242iic(test12)
earth2mars_calendar_time([2030,1,1,0,0])
mars242iic((22.0+40.0/60)*m2e)
earth2mars_calendar_time([2030,1,1,0,0])[0] + earth2mars_calendar_time([2030,1,1,0,0])[1]/60 - (22.0+40.0/60)*m2e
def mars24_iic_diff(datelist,result):
print earth2mars_calendar_time(datelist)[0] + earth2mars_calendar_time(datelist)[1]/60 - (result[0]*1.0 + result[1]*1.0/60.0)*m2e
mars24_iic_diff([2020,1,1,0,0],[16,23])
mars24_iic_diff([2021,1,1,0,0],[21,22])
mars24_iic_diff([2022,1,1,0,0],[2,29])
mars24_iic_diff([2023,1,1,0,0],[8,26])
mars24_iic_diff([2030,1,1,0,0],[22,40])
mars24_iic_diff([2040,1,1,0,0],[5,35])
mars24_iic_diff([2050,1,1,0,0],[11,51])
mars24_iic_diff([2021,2,7,0,0],[21,36])
mars24_iic_diff([2022,12,26,0,0],[12,28])
mars24_iic_diff([2021,8,25,0,0],[13,49])
mars24_iic_diff([2023,7,12,0,0],[5,19])
mars24_iic_diff([2022,2,24,0,0],[16,19])
mars24_iic_diff([2024,1,12,0,0],[7,10])
mars24_iic_diff([2022,7,21,0,0],[17,55])
mars24_iic_diff([2024,6,7,0,0],[8,46])
def mars_date2days(datelist): # given a date, calculate the days since the first calendar day. [martian year, month in number, day, hour, min]
y = datelist[0]
m = datelist[1]
d = datelist[2]
h = datelist[3] * 1.0
min = datelist[4] * 1.0
year = 1
days = 0
while year < y:
days = days + 668 + is_leap_year_mars(year)
year = year + 1
month = 1
while month < m:
days = days + 27 + (month%6!=0)
month = month + 1
days = days + d + ( h + min/60.0 )/(24*m2e)
return days - 1
mars_date2days([1,1,1,0,0])
mars_date2days([1,1,1,24,39])
mars_date2days([12,24,28,24,39.5])
mars_date2days([1,1,1,24,39]) * m2e + julian_coor_earth
def ipart(x):
Return integer part of given number.
return math.modf(x)[1]
def jd2gcal(jd1,jd2):
Julian date to Gregorian calendar date and time of day.
The input and output are for the proleptic Gregorian calendar,
i.e., no consideration of historical usage of the calendar is
made.
Parameters
----------
jd1, jd2: int
Sum of the two numbers is taken as the given Julian date. For
example `jd1` can be the zero point of MJD (MJD_0) and `jd2`
can be the MJD of the date and time. But any combination will
work.
Returns
-------
y, m, d, f : int, int, int, float
Four element tuple containing year, month, day and the
fractional part of the day in the Gregorian calendar. The first
three are integers, and the last part is a float.
Examples
--------
>>> jd2gcal(*gcal2jd(2000,1,1))
(2000, 1, 1, 0.0)
>>> jd2gcal(*gcal2jd(1950,1,1))
(1950, 1, 1, 0.0)
Out of range months and days are carried over to the next/previous
year or next/previous month. See gcal2jd for more examples.
>>> jd2gcal(*gcal2jd(1999,10,12))
(1999, 10, 12, 0.0)
>>> jd2gcal(*gcal2jd(2000,2,30))
(2000, 3, 1, 0.0)
>>> jd2gcal(*gcal2jd(-1999,10,12))
(-1999, 10, 12, 0.0)
>>> jd2gcal(*gcal2jd(2000, -2, -4))
(1999, 9, 26, 0.0)
>>> gcal2jd(2000,1,1)
(2400000.5, 51544.0)
>>> jd2gcal(2400000.5, 51544.0)
(2000, 1, 1, 0.0)
>>> jd2gcal(2400000.5, 51544.5)
(2000, 1, 1, 0.5)
>>> jd2gcal(2400000.5, 51544.245)
(2000, 1, 1, 0.24500000000261934)
>>> jd2gcal(2400000.5, 51544.1)
(2000, 1, 1, 0.099999999998544808)
>>> jd2gcal(2400000.5, 51544.75)
(2000, 1, 1, 0.75)
Notes
-----
The last element of the tuple is the same as
(hh + mm / 60.0 + ss / 3600.0) / 24.0
where hh, mm, and ss are the hour, minute and second of the day.
See Also
--------
gcal2jd
from math import modf
jd1_f, jd1_i = modf(jd1)
jd2_f, jd2_i = modf(jd2)
jd_i = jd1_i + jd2_i
f = jd1_f + jd2_f
# Set JD to noon of the current date. Fractional part is the
# fraction from midnight of the current date.
if -0.5 < f < 0.5:
f += 0.5
elif f >= 0.5:
jd_i += 1
f -= 0.5
elif f <= -0.5:
jd_i -= 1
f += 1.5
l = jd_i + 68569
n = ipart((4 * l) / 146097.0)
l -= ipart(((146097 * n) + 3) / 4.0)
i = ipart((4000 * (l + 1)) / 1461001)
l -= ipart((1461 * i) / 4.0) - 31
j = ipart((80 * l) / 2447.0)
day = l - ipart((2447 * j) / 80.0)
l = ipart(j / 11.0)
month = j + 2 - (12 * l)
year = 100 * (n - 49) + i + l
return int(year), int(month), int(day), f
def julian_days2gcal(jd): # jd is the julian day difference from julian_coor_earth
tmp = jd2gcal(julian_coor_earth,jd)
return (tmp[0],tmp[1],tmp[2],math.floor(tmp[3]*24.0),math.floor((tmp[3]*24.0 - math.floor(tmp[3]*24.0))*60.0))
julian_days2gcal(0)
def mars2earth_calendar(datelist): # given a mars calendar date list [year, month, day, hour, min], calculate the corresponding earth g calendar
jd_diff = mars_date2days(datelist) * m2e # julian days difference from the beginning of martian calendar: julian_coor_earth
return julian_days2gcal(jd_diff)
mars2earth_calendar([1,1,1,0,0])
zoneDegree = 15/m2e
def mars_time_zones(longitude): # given a longitude on Mars, return the time zone
if not(0 <= longitude < 360):
return "Wrong input(0 <= longitude < 360)"
else:
n = longitude/zoneDegree
if math.floor(n) < 24:
return int(n)
else:
return 24
mars_time_zones(351)
time_zones_list = []
for n in range(1,25):
time_zones_list += [15/m2e*n]
time_zones_list
zoneDegree25 = 360 - time_zones_list[23]
print zoneDegree25
def days2mars_date(days):
year = 1
month = 1
hour = 0
minute = 0
while days >= total_days(year):
days -= total_days(year)
year += 1
while days >= month_total_days(year,month):
days -= month_total_days(year,month)
month += 1
remainder = days - math.floor(days)
day = int(math.floor(days)) + 1
time = remainder * 86400 * m2e
while time >= 3600:
time -= 3600
hour += 1
while time >= 60:
time -=60
minute += 1
return [year,month,day,hour,minute]
mars_date2days([12,12,12,24,39])
days2mars_date(7673.999605744861)
def mars_time_zones_convert(datelist,timezones):
time = mars_date2days(datelist)
time += timezones / (24.0 * m2e)
return days2mars_date(time)
mars_time_zones_convert([12, 12, 12, 24, 39],24)
mars_time_zones_convert([1, 1, 1, 0, 0],1)
def earth_time_zones_convert(datelist,timezones):
time = julian_days([datelist[0],datelist[1],datelist[2],datelist[3],datelist[4]])
time -= timezones / 24.0
timelist = jd2gcal(2400000.5,time - 2400000.5)
hour = math.floor(timelist[3]*24)
minute = round((timelist[3]*24 - hour)*60)
return [timelist[0],timelist[1],timelist[2],int(hour),int(minute)]
earth_time_zones_convert([1970,1,1,5,5],+8.5)
def earth2mars_timezones(datelist,earthTimezones,marsTimezones):
Earthdate = earth_time_zones_convert(datelist,earthTimezones)
calendar = earth2mars_calendar(Earthdate)
time = earth2mars_calendar_time(Earthdate)
Marsdate = [calendar[0],calendar[1][0],calendar[1][1],time[0],time[1]]
return mars_time_zones_convert(Marsdate,marsTimezones)
print chinese_character(earth2mars_calendar([2014,10,16,23,50]),earth2mars_calendar_time([2014,10,16,23,50]))
earth2mars_timezones([2014,10,16,23,50],0,0)
earth2mars_timezones([2014,10,17,0,0],+8,mars_time_zones(137.4))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 火星历法
Step2: 所以,一年的总天数,我们可以这样表示
Step3: 测试一下这两个函数。
Step4: 火星月日的计算
Step5: 测试此函数
Step6: 可以看到函数正确的返回了月日以及错误信息。
Step7: 火星元年与校准日期
Step8: 注意到 floor() 里面的除法不能整除,是因为火星元年并非是从 1970 年 04 月 28 日的 0 点开始的,所以我们这里扔掉后面的小数位。
Step9: 也就是说,是火星历法中的第 20 个月的 5 日,即小寒月 5 日。
Step10: 由于我们的计算中,只考虑了 mars_sol_day_coor = 10555.0 个火星日,所以我们现在换算回地球日,并且从地球日历的 2000-01-06 (包含)反推回地球日历中所对应的日期和时间。
Step11: 简单的数学可以得知元年对应的地球日历是
Step12: 作为测试,我们知道 UNIX 系统里面使用的,1970-01-01,00
Step13: 与已知结果是一致的。需要强调的是,这个计算在使用的时候,需要先把分钟和秒换算成小数点的小时数目。
Step14: 因此我们可以定义这样一个函数,来计算地球日的数目
Step15: 同样我们可以定义换算出火星日数目的函数
Step16: 测试此函数
Step17: 如果要计算当日的时间,需要用到被 floor 掉的小数部分。
Step18: 然后我们可以利用 calendar_date() 函数给出对应的火星历的日期。
Step19: 那么这个函数给出的地球上 2000 年 01 月 06 日午夜必须是火星历的 0016 年小寒月 05 日。下面是测试
Step20: 火星时刻
Step21: 利用 mars_days_origin_rem() 函数可以计算地球 UTC 日期对应的火星零度经线当天的火星时间。
Step22: 测试地球历法到火星历法的换算
Step23: 由 Mars24 给出的 AMT 是 12
Step24: 这样看来相差了大约 21 分钟的时间。
Step25: 为了测试方便,定义一个 Mars24 的 24 小时计时方法与我中心的计时方法的换算的函数。
Step26: 测试多个年份看看是不是“误差”随着时间推移增加。如果是的话,很可能是某个常数用的不对或者那个变量的精度有问题。
Step27: Mars24 给出的是 22
Step28: 差距为
Step29: 这样看趋势似乎并不是一直增大,而是在不停的浮动。这样就怀疑是由于 Mars24 AMT 采用的当地的太阳日的时刻,而我们采用的是平均太阳日。如何验证呢?可以考虑一个火星年的周期来看看。
Step30: 由此可见并非是因为太阳日的变化造成的。但是所有的浮动都在一个小时之内。
Step31: 火星日对应的 julian days 可以直接通过乘以 m2e 来换算。
Step34: 获得了 julian day,下一步就只需要将这个对应的日期找出来就好了。
Step35: 测试一下这个函数
Step36: 这个结果正好是对应我们的火星元年开始时刻。
Step37: 时区划分
Step38: 为了以后画图方便,做出一个时区列表
Step39: 最后一个地区的经度范围为:
Step40: 再用一个函数来进行时间转换:
Step41: 再定义一个简单的地球上的时区转换函数:
Step42: 最后定义地球上任意时区的时间,转换到火星上任意时区的时间的函数:
Step43: 经多次测试,earth2mars_timezones()的分钟和earth2mars_calendar_time()有时会存在1分钟的误差
|
503 | <ASSISTANT_TASK:>
Python Code:
!git clone https://bitbucket.org/luisfernando/html2pdf.git
%%!
echo "Install Xvfd:"
sudo apt-get install xvfb
echo "Install Fonts:"
sudo apt-get install xfonts-100dpi xfonts-75dpi xfonts-scalable xfonts-cyrillic
echo "Install wkhtmltopdf:"
sudo apt-get install wkhtmltopdf
%%!
source activate GISpark
pip install html2pdf
from wkhtmltopdf import HTMLURLToPDF
make_pdf = HTMLURLToPDF(
url='http://www.example.com',
output_file='~/example.pdf',
)
make_pdf.render()
from html2pdf import HTMLToPDF
HTML =
<!DOCTYPE html>
<html>
<body>
<h1>Hello World</h1>
</body>
</html>
h = HTMLToPDF(HTML, self.output_file)
!wkhtmltopdf
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: python-wkhtmltopdf (Any Platform)
Step2: Render a URL
Step4: Render a HTML string
|
504 | <ASSISTANT_TASK:>
Python Code:
import larch, numpy, pandas, os
from larch import P, X
larch.__version__
hh, pp, tour, skims, emp = larch.example(200, ['hh', 'pp', 'tour', 'skims', 'emp'])
logsums_file = larch.example(202, output_file='logsums.pkl.gz')
logsums = pandas.read_pickle(logsums_file)
raw = tour.merge(hh, on='HHID').merge(pp, on=('HHID', 'PERSONID'))
raw["HOMETAZi"] = raw["HOMETAZ"] - 1
raw["DTAZi"] = raw["DTAZ"] - 1
raw = raw[raw.TOURPURP == 1]
raw.index.name = 'CASE_ID'
distance = pandas.DataFrame(
data=skims.AUTO_DIST[:][raw["HOMETAZi"], :],
index=raw.index,
columns=skims.TAZ_ID,
)
ca = pandas.concat([
distance.stack().rename("distance"),
logsums.stack().rename("logsum"),
], axis=1).rename_axis(index=('caseid', 'TAZ_ID'))
ca.info()
emp.info()
ca.index.names
ca = ca.join(emp, on='TAZ_ID')
dfs = larch.DataFrames(
co=raw,
ca=ca,
alt_codes=skims.TAZ_ID,
alt_names=['TAZ{i}' for i in skims.TAZ_ID],
ch_name='DTAZ',
av=1,
)
dfs.info(1)
m = larch.Model(dataservice=dfs)
m.title = "Exampville Work Tour Destination Choice v1"
m.quantity_ca = (
+ P.EmpRetail_HighInc * X('RETAIL_EMP * (INCOME>50000)')
+ P.EmpNonRetail_HighInc * X('NONRETAIL_EMP') * X("INCOME>50000")
+ P.EmpRetail_LowInc * X('RETAIL_EMP') * X("INCOME<=50000")
+ P.EmpNonRetail_LowInc * X('NONRETAIL_EMP') * X("INCOME<=50000")
)
m.quantity_scale = P.Theta
m.utility_ca = (
+ P.logsum * X.logsum
+ P.distance * X.distance
)
m.lock_values(
EmpRetail_HighInc=0,
EmpRetail_LowInc=0,
)
m.load_data()
m.loglike()
m.maximize_loglike()
m.calculate_parameter_covariance()
m.distribution_on_idca_variable('distance')
m.distribution_on_idca_variable('distance', bins=40, range=(0,10))
m.distribution_on_idca_variable(
'distance',
style='kde',
)
m.distribution_on_idca_variable(
'distance',
xlabel="Distance (miles)",
bins=26,
subselector='INCOME<10000',
range=(0,13),
header='Destination Distance, Very Low Income (<$10k) Households',
)
report = larch.Reporter(title=m.title)
report << '# Parameter Summary' << m.parameter_summary()
report << "# Estimation Statistics" << m.estimation_statistics()
report << "# Utility Functions" << m.utility_functions()
figure = m.distribution_on_idca_variable(
'distance',
xlabel="Distance (miles)",
style='kde',
header='Destination Distance',
)
report << "# Visualization"
report << figure
report.save(
'exampville_dest_choice.html',
overwrite=True,
metadata=m,
)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this example notebook, we will walk through the estimation of a tour
Step2: For this destination choice model, we'll want to use the mode choice
Step3: Preprocessing
Step4: The alternatives in
Step5: This command pulls the relevant row, identified by the "HOMETAZi" column
Step6: For our destination choice model, we'll also want to use employment data.
Step7: To make this work with the computational
Step8: Knowing the name on the alternatives portion of the idca data lets us
Step9: Then we bundle the raw data along with this newly organized idca data,
Step10: Model Definition
Step11: Model Estimation
Step12: Model Visualization
Step13: The distribution_on_idca_variable has a variety of options,
Step14: Alternatively, the histogram style can be swapped out for a smoothed kernel density
Step15: Subsets of the observations can be pulled out, to observe the
Step16: Save and Report Model
Step17: The figures shown above can also be inserted directly into reports.
|
505 | <ASSISTANT_TASK:>
Python Code:
# scientific python
import numpy as np
import scipy as sp
# interactive plotting
%pylab inline
# Create a random array of size 3 x 5
X = np.random.random((3, 5))
# Create an array of zeros of size 3 x 5
np.zeros((3, 5))
# Create an array of ones of size 3 x 5
np.ones((3, 5))
# Create the identity matrix of size 4 x 4
np.eye(4)
# Visualize X
print X
# The dimensions of X are accessible via
print X.shape
# The total number of elements of X are accessible via
print X.size
# Get a single element: X[0,1]
print X[0, 1]
# Get a row
print X[0, :]
print X[0]
print "shape of a row vector:", X[0].shape
# Get a column
print X[:, 3]
# Transposing an array
print X.T
# Applying the same transformation to all entries in an array
# Multiply all entries of X by 2:
print 2*X
# Add 1 to all entries of x
# Compute the array that has as entries the logarithm (base 2) of the entries of X
# Square all entries of X
# Compute the array that has as entries the logarithm (base 10) of the entries of X
# Element-wise matrix multiplication
print X*X
# Matrix multiplication
print np.dot(X, X.T)
print X.dot(X.T)
# Create a random array B of size 5 x 4
# Multiply X by B
# Get the diagonal of X. Note that X is not square.
np.diag(X)
# Compute the trace of X
np.trace(X)
# Compute the determinant of X'X
np.linalg.det(X.dot(X.T))
# Compute the eigenvalues and eigenvectors of X'X
np.linalg.eig(X.dot(X.T))
# Compute the inverse of X'X
np.linalg.inv(X.dot(X.T))
# Plotting a sinusoide
# create an array of 100 equally-spaced points between 0 and 10 (to serve as x coordinates)
x = np.linspace(0, 10, 100)
# create the y coordinates
y = np.sin(x)
plt.plot(x, y)
# Tweak some options
plt.plot(x, y, color='orange', linestyle='--', linewidth=3)
# Plot the individual points
plt.plot(x, y, color='orange', marker='x', linestyle='')
# Plot multiple lines
plt.plot(x, y, color='orange', linewidth=2, label='sine')
plt.plot(x, np.cos(x), color='blue', linewidth=2, label='cosine')
plt.legend()
# Add a title and caption and label the axes
plt.plot(x, y, color='orange', linewidth=2, label='sine')
plt.plot(x, np.cos(x), color='blue', linewidth=2, label='cosine')
plt.legend(loc='lower left', fontsize=14)
plt.title("Sinusoides", fontsize=14)
plt.xlabel("$f(x)$", fontsize=16)
plt.ylabel("$sin(x)$", fontsize=16)
# Save the plot
plt.plot(x, y, color='orange', linewidth=2, label='sine')
plt.plot(x, np.cos(x), color='blue', linewidth=2, label='cosine')
plt.legend(loc='lower left', fontsize=14)
plt.title("Sinusoides", fontsize=14)
plt.xlabel("$x$", fontsize=16)
plt.ylabel("$f(x)$", fontsize=16)
plt.savefig("my_sinusoide.png")
# Add to the previous plot a sinusoide of half the amplitude and twice the frequency of the sine one.
# Plot the line in green and give each line a different line style.
# Create 500 points with random (x, y) coordinates
x = np.random.normal(size=500)
y = np.random.normal(size=500)
# Plot them
plt.scatter(x, y)
# Use the same ranges for both axes
plt.scatter(x, y)
plt.xlim([-4, 4])
plt.ylim([-4, 4])
# Add a title and axis captions to the previous plot. Change the marker style and color.
# Create a random 50 x 100 array
X = np.random.random((50, 100))
heatmap = plt.pcolor(X, cmap=plt.cm.Blues)
plt.colorbar(heatmap)
# Create a random vector (normally distributed) of size 5000
X = np.random.normal(size=(5000,))
# Plot the histogram of its values over 50 bins
h = plt.hist(X, bins=50, color='orange', histtype='stepfilled')
# create an image
x = np.linspace(1, 12, 100)
# transform an array of shape (100,) into an array of shape (100, 1)
y = x[:, np.newaxis]
y = y * np.cos(y)
# Create an image matrix: image[i,j] = y cos(y)[i] * sin(x)[j]
image = y * np.sin(x)
# show the image (the origin is, by default, at the top-left corner!)
plt.imshow(image, cmap=plt.cm.prism)
# Contour plot - note that origin here is at the bottom-left by default!
# A contour line or isoline of a function of two variables is a curve along which the function has a constant value.
contours = plt.contour(image, cmap=plt.cm.prism)
plt.clabel(contours, inline=1, fontsize=10)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The previous command is one of the "magics" of Jupyter. As indicated by the message you have gotten, it imports numpy and matplotlib.
Step2: Accessing elements, rows, and columns of arrays
Step3: Array manipulation
Step4: More complex linear algebra operations are available via numpy.linalg
Step5: For more on arrays, you can refer to http
Step6: Scatterplots
Step7: Heatmaps
Step8: Histograms
Step9: Images
|
506 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import lsst.sims.maf.db as db
import lsst.sims.maf.metrics as metrics
import lsst.sims.maf.slicers as slicers
import lsst.sims.maf.metricBundles as metricBundles
from lsst.sims.maf.metrics import BaseMetric
class Coaddm5Metric(BaseMetric):
Calculate the coadded m5 value at this gridpoint.
def __init__(self, m5Col = 'fiveSigmaDepth', metricName='CoaddM5', **kwargs):
Instantiate metric.
m5col = the column name of the individual visit m5 data.
self.m5col = m5col
super(Coaddm5Metric, self).__init__(col=m5Col, metricName=metricName, **kwargs)
def run(self, dataSlice, slicePoint=None):
return 1.25 * np.log10(np.sum(10.**(.8*dataSlice[self.m5col])))
# Import BaseMetric, or have it available to inherit from
from lsst.sims.maf.metrics import BaseMetric
# Define our class, inheriting from BaseMetric
class OurPercentileMetric(BaseMetric):
# Add a doc string to describe the metric.
Calculate the percentile value of a data column
# Add our "__init__" method to instantiate the class.
# We will make the 'percentile' value an additional value to be set by the user.
# **kwargs allows additional values to be passed to the BaseMetric that you
# may not have been using here and don't want to bother with.
def __init__(self, colname, percentile, **kwargs):
# Set the values we want to keep for our class.
self.colname = colname
self.percentile = percentile
# Now we have to call the BaseMetric's __init__ method, to get the "framework" part set up.
# We currently do this using 'super', which just calls BaseMetric's method.
# The call to super just basically looks like this .. you must pass the columns you need, and the kwargs.
super(OurPercentileMetric, self).__init__(col=colname, **kwargs)
# Now write out "run" method, the part that does the metric calculation.
def run(self, dataSlice, slicePoint=None):
# for this calculation, I'll just call numpy's percentile function.
result = np.percentile(dataSlice[self.colname], self.percentile)
return result
metric = OurPercentileMetric('airmass', 20)
slicer = slicers.HealpixSlicer(nside=64)
sqlconstraint = 'filter = "r" and night<365'
myBundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint)
opsdb = db.OpsimDatabase('minion_1016_sqlite.db')
bgroup = metricBundles.MetricBundleGroup({0: myBundle}, opsdb, outDir='newmetric_test', resultsDb=None)
bgroup.runAll()
myBundle.setPlotDict({'colorMin':1.0, 'colorMax':1.8})
bgroup.plotAll(closefigs=False)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Writing a new metric
Step5: To understand this, you need to know a little bit about "classes" and "inheritance".
Step6: So then how do we use this new metric? Just as before, although you may have to adjust the namespace.
|
507 | <ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import \
eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
# set default size of plots
plt.rcParams['figure.figsize'] = (10.0, 8.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) \
/ (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.iteritems():
print('{}:'.format(k), v.shape)
# Check the training-time forward pass by checking means and variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
N, D1, D2, D3 = 200, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print('Before batch normalization:')
print(' means: ', a.mean(axis=0))
print(' stds: ', a.std(axis=0))
# Means should be close to zero and stds close to one
print('After batch normalization (gamma=1, beta=0)')
a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3),
{'mode': 'train'})
print(' mean: ', a_norm.mean(axis=0))
print(' std: ', a_norm.std(axis=0))
# Now means should be close to beta and stds close to gamma
gamma = np.asarray([1.0, 2.0, 3.0])
beta = np.asarray([11.0, 12.0, 13.0])
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print('After batch normalization (nontrivial gamma, beta)')
print(' means: ', a_norm.mean(axis=0))
print(' stds: ', a_norm.std(axis=0))
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
N, D1, D2, D3 = 200, 50, 60, 3
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
bn_param = {'mode': 'train'}
gamma = np.ones(D3)
beta = np.zeros(D3)
for t in xrange(50):
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
batchnorm_forward(a, gamma, beta, bn_param)
bn_param['mode'] = 'test'
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print('After batch normalization (test-time):')
print(' means: ', a_norm.mean(axis=0))
print(' stds: ', a_norm.std(axis=0))
# Gradient check batchnorm backward pass
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
_, cache = batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = batchnorm_backward(dout, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dgamma error: ', rel_error(da_num, dgamma))
print('dbeta error: ', rel_error(db_num, dbeta))
N, D = 100, 500
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
out, cache = batchnorm_forward(x, gamma, beta, bn_param)
t1 = time.time()
dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)
t2 = time.time()
dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)
t3 = time.time()
print('dx difference:', rel_error(dx1, dx2))
print('dgamma difference:', rel_error(dgamma1, dgamma2))
print('dbeta difference:', rel_error(dbeta1, dbeta2))
print('speedup: {:.2f}x'.format((t2 - t1) / (t3 - t2)))
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print('Running check with reg =', reg)
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2,
dtype=np.float64, use_batchnorm=True)
loss, grads = model.loss(X, y)
print('Initial loss:', loss)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name],
verbose=False, h=1e-5)
print('{} relative error: {:.2e}'.format(
name, rel_error(grad_num, grads[name])))
if reg == 0:
print('')
# Try training a very deep net with batchnorm
hidden_dims = [100, 100, 100, 100, 100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 2e-2
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale,
use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale,
use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
print('With batch normalization...')
bn_solver.train()
print()
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
print('Without batch normalization...')
solver.train()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label='baseline')
plt.plot(bn_solver.loss_history, 'o', label='batchnorm')
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label='baseline')
plt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label='baseline')
plt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
# Try training a very deep net with batchnorm
hidden_dims = [50, 50, 50, 50, 50, 50, 50]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
bn_solvers = {}
solvers = {}
weight_scales = np.logspace(-4, 0, num=20)
for i, weight_scale in enumerate(weight_scales):
print('Running weight scale {} / {}'.format(
i + 1, len(weight_scales)))
bn_model = FullyConnectedNet(hidden_dims,
weight_scale=weight_scale,
use_batchnorm=True)
model = FullyConnectedNet(hidden_dims,
weight_scale=weight_scale,
use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
bn_solver.train()
bn_solvers[weight_scale] = bn_solver
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
solver.train()
solvers[weight_scale] = solver
# Plot results of weight scale experiment
best_train_accs, bn_best_train_accs = [], []
best_val_accs, bn_best_val_accs = [], []
final_train_loss, bn_final_train_loss = [], []
for ws in weight_scales:
best_train_accs.append(max(solvers[ws].train_acc_history))
bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))
best_val_accs.append(max(solvers[ws].val_acc_history))
bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))
final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))
bn_final_train_loss.append(np.mean(
bn_solvers[ws].loss_history[-100:]))
plt.subplot(3, 1, 1)
plt.title('Best val accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best val accuracy')
plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
plt.title('Best train accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best training accuracy')
plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')
plt.legend()
plt.subplot(3, 1, 3)
plt.title('Final training loss vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Final training loss')
plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')
plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')
plt.legend()
plt.gcf().set_size_inches(10, 15)
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Batch Normalization
Step2: Batch normalization
Step3: Batch Normalization
Step4: Batch Normalization
Step5: Fully Connected Nets with Batch Normalization
Step6: Batchnorm for deep networks
Step7: Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.
Step8: Batch normalization and initialization
|
508 | <ASSISTANT_TASK:>
Python Code:
df.dtypes #dtype: Data type for data or columns
print("The data type is",(type(df['Plate ID'][0])))
df['Vehicle Year'] = df['Vehicle Year'].replace("0","NaN") #str.replace(old, new[, max])
df.head()
# Function to use for converting a sequence of string columns to an array of datetime instances: dateutil.parser.parser
type(df['Issue Date'][0])
def to_dates(date):
yourdate = dateutil.parser.parse(date)
return yourdate
df['Issue Date Converted'] = df['Issue Date'].apply(to_dates) #DataFrame.apply(func):apply function to apply to each column/row
df['Issue Date Converted'].head()
df['Date First Observed'].tail()
import numpy as np #numpy object
def pydate(num):
num = str(num) #to work with dateutil.parser.parse():it has to be a string
print(num)
if num == "0":
print("replacing 0")
return np.NaN #if number==0,replace 0 with NaN
else:
print("parsing date")
yourdate = dateutil.parser.parse(num)#recognize the string as a time object
strf = yourdate.strftime("%Y-%B-%d")#strftime turns a time object into a date and time format
print(strf)
return strf
df['Date First Observed Converted'] = df['Date First Observed'].apply(pydate)
df['Violation Time'].head()
type(df['Violation Time'][0])
def str_to_time(time_str):
s = str(time_str).replace("P"," PM").replace("A"," AM") #str(time_str) because str.replace()
x = s[:2] + ":" + s[2:]
return x
str_to_time("1239P")
df['Violation Time Converted'] = df['Violation Time'].apply(str_to_time)
df['Violation Time Converted']
df['Vehicle Color'].value_counts()
def to_color(color):
if color=='BLK'or color=='BK'or color=='BLCK':
return "BLACK"
if color=='WH' or color=='WHT'or color=='WT':
return "WHITE"
if color=='GY'or color=='GREY'or color=='GRY':
return "GRAY"
if color=='BL'or color=='BLE' or color=='B LUE' or color=='BU':
return "BLUE"
if color=='BR' or color == 'BRN' or color == 'BRWN':
return "BROWN"
if color== 'SILV' or color == 'SIL' or color == 'SILVR' or color == 'SILVE' or color == 'SL':
return "SILVER"
else:
return color
df['Vehicle Color'] = df['Vehicle Color'].apply(to_color)
df2=pd.read_csv("DOF_Parking_Violation_Codes.csv")
df2.head()
df2.iloc[[38]]#how to select a row from dataframe
df2['CODE'].value_counts() #code 37-38
def many_digits_to_single_digit(x):
try:
single__digit_code = x[0:2]
return single_digit_code
except:
return None
one_code = df2['CODE'].apply(many_digits_to_single_digit)
new_df['All Other Areas clean'].value_counts().head(5).plot(kind='barh')
plt.ylabel('Number of Fines')
plt.xlabel('Fine in $')
time_of_day.value_counts().head(5).plot(kind='barh')
plt.style.use('seaborn-poster')
graph_daily_tickets= new_df['Issue Date'].value_counts(sort=False).plot(kind='bar')
plt.ylabel('Number of tickets')
plt.xlabel('Days')
graph_daily_tickets.axes.get_xaxis().set_ticks([])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. I don't think anyone's car was built in 0AD. Discard the '0's as NaN.
Step2: 3. I want the dates to be dates! Read the read_csv documentation to find out how to make pandas automatically parse dates.
Step3: 4. "Date first observed" is a pretty weird column, but it seems like it has a date hiding inside. Using a function with .apply, transform the string (e.g. "20140324") into a Python date. Make the 0's show up as NaN.
Step4: 5. "Violation time" is... not a time. Make it a time.
Step5: 6. There sure are a lot of colors of cars, too bad so many of them are the same. Make "BLK" and "BLACK", "WT" and "WHITE", and any other combinations that you notice.
Step6: 7. Join the data with the Parking Violations Code dataset from the NYC Open Data site.
Step7: 8. How much money did NYC make off of parking violations?
Step8: 12. What time of day do people usually get their tickets? You can break the day up into several blocks - for example 12am-6am, 6am-12pm, 12pm-6pm, 6pm-12am.
Step9: 13. What's the average ticket cost in NYC?
|
509 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
IMG_SIZE = 28
class Model(tf.Module):
def __init__(self):
self.model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(IMG_SIZE, IMG_SIZE), name='flatten'),
tf.keras.layers.Dense(128, activation='relu', name='dense_1'),
tf.keras.layers.Dense(10, name='dense_2')
])
self.model.compile(
optimizer='sgd',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True))
# The `train` function takes a batch of input images and labels.
@tf.function(input_signature=[
tf.TensorSpec([None, IMG_SIZE, IMG_SIZE], tf.float32),
tf.TensorSpec([None, 10], tf.float32),
])
def train(self, x, y):
with tf.GradientTape() as tape:
prediction = self.model(x)
loss = self.model.loss(y, prediction)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.model.optimizer.apply_gradients(
zip(gradients, self.model.trainable_variables))
result = {"loss": loss}
return result
@tf.function(input_signature=[
tf.TensorSpec([None, IMG_SIZE, IMG_SIZE], tf.float32),
])
def infer(self, x):
logits = self.model(x)
probabilities = tf.nn.softmax(logits, axis=-1)
return {
"output": probabilities,
"logits": logits
}
@tf.function(input_signature=[tf.TensorSpec(shape=[], dtype=tf.string)])
def save(self, checkpoint_path):
tensor_names = [weight.name for weight in self.model.weights]
tensors_to_save = [weight.read_value() for weight in self.model.weights]
tf.raw_ops.Save(
filename=checkpoint_path, tensor_names=tensor_names,
data=tensors_to_save, name='save')
return {
"checkpoint_path": checkpoint_path
}
@tf.function(input_signature=[tf.TensorSpec(shape=[], dtype=tf.string)])
def restore(self, checkpoint_path):
restored_tensors = {}
for var in self.model.weights:
restored = tf.raw_ops.Restore(
file_pattern=checkpoint_path, tensor_name=var.name, dt=var.dtype,
name='restore')
var.assign(restored)
restored_tensors[var.name] = restored
return restored_tensors
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = (train_images / 255.0).astype(np.float32)
test_images = (test_images / 255.0).astype(np.float32)
train_labels = tf.keras.utils.to_categorical(train_labels)
test_labels = tf.keras.utils.to_categorical(test_labels)
NUM_EPOCHS = 100
BATCH_SIZE = 100
epochs = np.arange(1, NUM_EPOCHS + 1, 1)
losses = np.zeros([NUM_EPOCHS])
m = Model()
train_ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
train_ds = train_ds.batch(BATCH_SIZE)
for i in range(NUM_EPOCHS):
for x,y in train_ds:
result = m.train(x, y)
losses[i] = result['loss']
if (i + 1) % 10 == 0:
print(f"Finished {i+1} epochs")
print(f" loss: {losses[i]:.3f}")
# Save the trained weights to a checkpoint.
m.save('/tmp/model.ckpt')
plt.plot(epochs, losses, label='Pre-training')
plt.ylim([0, max(plt.ylim())])
plt.xlabel('Epoch')
plt.ylabel('Loss [Cross Entropy]')
plt.legend();
SAVED_MODEL_DIR = "saved_model"
tf.saved_model.save(
m,
SAVED_MODEL_DIR,
signatures={
'train':
m.train.get_concrete_function(),
'infer':
m.infer.get_concrete_function(),
'save':
m.save.get_concrete_function(),
'restore':
m.restore.get_concrete_function(),
})
# Convert the model
converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL_DIR)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
converter.experimental_enable_resource_variables = True
tflite_model = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
infer = interpreter.get_signature_runner("infer")
logits_original = m.infer(x=train_images[:1])['logits'][0]
logits_lite = infer(x=train_images[:1])['logits'][0]
#@title
def compare_logits(logits):
width = 0.35
offset = width/2
assert len(logits)==2
keys = list(logits.keys())
plt.bar(x = np.arange(len(logits[keys[0]]))-offset,
height=logits[keys[0]], width=0.35, label=keys[0])
plt.bar(x = np.arange(len(logits[keys[1]]))+offset,
height=logits[keys[1]], width=0.35, label=keys[1])
plt.legend()
plt.grid(True)
plt.ylabel('Logit')
plt.xlabel('ClassID')
delta = np.sum(np.abs(logits[keys[0]] - logits[keys[1]]))
plt.title(f"Total difference: {delta:.3g}")
compare_logits({'Original': logits_original, 'Lite': logits_lite})
train = interpreter.get_signature_runner("train")
NUM_EPOCHS = 50
BATCH_SIZE = 100
more_epochs = np.arange(epochs[-1]+1, epochs[-1] + NUM_EPOCHS + 1, 1)
more_losses = np.zeros([NUM_EPOCHS])
for i in range(NUM_EPOCHS):
for x,y in train_ds:
result = train(x=x, y=y)
more_losses[i] = result['loss']
if (i + 1) % 10 == 0:
print(f"Finished {i+1} epochs")
print(f" loss: {more_losses[i]:.3f}")
plt.plot(epochs, losses, label='Pre-training')
plt.plot(more_epochs, more_losses, label='On device')
plt.ylim([0, max(plt.ylim())])
plt.xlabel('Epoch')
plt.ylabel('Loss [Cross Entropy]')
plt.legend();
save = interpreter.get_signature_runner("save")
save(checkpoint_path=np.array("/tmp/model.ckpt", dtype=np.string_))
another_interpreter = tf.lite.Interpreter(model_content=tflite_model)
another_interpreter.allocate_tensors()
infer = another_interpreter.get_signature_runner("infer")
restore = another_interpreter.get_signature_runner("restore")
logits_before = infer(x=train_images[:1])['logits'][0]
# Restore the trained weights from /tmp/model.ckpt
restore(checkpoint_path=np.array("/tmp/model.ckpt", dtype=np.string_))
logits_after = infer(x=train_images[:1])['logits'][0]
compare_logits({'Before': logits_before, 'After': logits_after})
infer = another_interpreter.get_signature_runner("infer")
result = infer(x=test_images)
predictions = np.argmax(result["output"], axis=1)
true_labels = np.argmax(test_labels, axis=1)
result['output'].shape
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
def plot(images, predictions, true_labels):
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(images[i], cmap=plt.cm.binary)
color = 'b' if predictions[i] == true_labels[i] else 'r'
plt.xlabel(class_names[predictions[i]], color=color)
plt.show()
plot(test_images, predictions, true_labels)
predictions.shape
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: On-Device Training with TensorFlow Lite
Step2: Note
Step3: The train function in the code above uses the GradientTape class to record operations for automatic differentiation. For more information on how to use this class, see the Introduction to gradients and automatic differentiation.
Step4: Preprocess the dataset
Step5: Convert the data labels to categorical values by performing one-hot encoding.
Step6: Note
Step7: Note
Step8: Setup the TensorFlow Lite signatures
Step9: Compare the output of the original model, and the converted lite model
Step10: Above, you can see that the behavior of the model is not changed by the conversion to TFLite.
Step11: Above you can see that the on-device training picks up exactly where the pretraining stopped.
Step12: In your Android application, you can store the generated weights as a checkpoint file in the internal storage space allocated for your app.
Step13: The checkpoint was generated by training and saving with TFLite. Above you can see that applying the checkpoint updates the behavior of the model.
Step14: Plot the predicted labels.
|
510 | <ASSISTANT_TASK:>
Python Code:
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the results of that search
# to a name in the local scope.
import numpy as np
import pandas as pd
# Import matplotlib to visualize the model
import matplotlib.pyplot as plt
# Seaborn is a Python data visualization library based on matplotlib
import seaborn as sns
# %matplotlib inline sets the backend of matplotlib to the `inline` backend
%matplotlib inline
import tensorflow as tf
from tensorflow import feature_column
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
print("TensorFlow version: ",tf.version.VERSION)
URL = 'https://storage.googleapis.com/download.tensorflow.org/data/heart.csv'
# Read a comma-separated values (csv) file into a DataFrame using the read_csv() function
dataframe = pd.read_csv(URL)
# Get the first five rows using the head() method
dataframe.head()
# Get a concise summary of a DataFrame
dataframe.info()
# TODO 1a
# Create test, validation and train samples from one dataframe with pandas.
train, test = train_test_split(dataframe, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
# A utility method to create a tf.data dataset from a Pandas Dataframe
def df_to_dataset(dataframe, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
labels = dataframe.pop('target')
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels)) # TODO 2a
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
# A small batch size is used for demonstration purposes
batch_size = 5
# TODO 2b
train_ds = df_to_dataset(train, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
# If you don't use take(1), all elements will eventually be fetched
for feature_batch, label_batch in train_ds.take(1):
print('Every feature:', list(feature_batch.keys()))
print('A batch of ages:', feature_batch['age'])
print('A batch of targets:', label_batch)
# We will use this batch to demonstrate several types of feature columns
example_batch = next(iter(train_ds))[0]
# A utility method to create a feature column
# and to transform a batch of data
def demo(feature_column):
feature_layer = layers.DenseFeatures(feature_column)
print(feature_layer(example_batch).numpy())
# Create a numeric feature column out of `age`
age = feature_column.numeric_column("age")
tf.feature_column.numeric_column
print(age)
# Demo of a numeric feature column out of `age`
demo(age)
# Create a bucketized feature column out of `age` with the following boundaries and demo it.
age_buckets = tf.feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
demo(age_buckets) # TODO 3a
# Create a categorical vocabulary column out of the
# above mentioned categories with the key specified as `thal`.
thal = tf.feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
# Create an indicator column out of the created categorical column.
thal_one_hot = tf.feature_column.indicator_column(thal)
demo(thal_one_hot)
# Notice the input to the embedding column is the categorical column
# we previously created
# Set the size of the embedding to 8, by using the dimension parameter
thal_embedding = tf.feature_column.embedding_column(thal, dimension=8)
demo(thal_embedding)
# Create a hashed feature column with `thal` as the key and 1000 hash buckets.
thal_hashed = tf.feature_column.categorical_column_with_hash_bucket(
'thal', hash_bucket_size=1000)
demo(tf.feature_column.indicator_column(thal_hashed))
# Create a crossed column using the bucketized column (age_buckets)
# the categorical vocabulary column (thal), and 1000 hash buckets.
crossed_feature = tf.feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
demo(tf.feature_column.indicator_column(crossed_feature))
feature_columns = []
# numeric cols
# Create a feature column out of the header using a numeric column.
for header in ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'slope', 'ca']:
feature_columns.append(feature_column.numeric_column(header))
# bucketized cols
# Create a bucketized feature column out of the age column using the following boundaries.
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
# indicator cols
# Create a categorical vocabulary column out of the below categories with the key specified as `thal`.
thal = feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
thal_one_hot = feature_column.indicator_column(thal)
feature_columns.append(thal_one_hot)
# embedding cols
# Create an embedding column out of the categorical vocabulary
thal_embedding = feature_column.embedding_column(thal, dimension=8)
feature_columns.append(thal_embedding)
# crossed cols
# Create a crossed column using the bucketized column (age_buckets),
# the categorical vocabulary column (thal), and 1000 hash buckets.
crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
crossed_feature = feature_column.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
# Create a Keras DenseFeatures layer and pass the feature_columns
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
batch_size = 32
train_ds = df_to_dataset(train, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
# `Sequential` provides training and inference features on this model.
model = tf.keras.Sequential([
feature_layer,
layers.Dense(128, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(1)
])
# `Compile` configures the model for training.
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
# `Fit` trains the model for a fixed number of epochs
history = model.fit(train_ds,
validation_data=val_ds,
epochs=5)
# `Evaluate` returns the loss value & metrics values for the model in test mode.
loss, accuracy = model.evaluate(test_ds)
print("Accuracy", accuracy)
def plot_curves(history, metrics):
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(metrics):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
plot_curves(history, ['loss', 'accuracy'])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lab Task 1
Step2: Split the dataframe into train, validation, and test
Step3: Lab Task 2
Step4: Understand the input pipeline
Step5: Lab Task 3
Step6: Numeric columns
Step7: Let's have a look at the output
Step8: Bucketized columns
Step9: Categorical columns
Step10: In a more complex dataset, many columns would be categorical (e.g. strings). Feature columns are most valuable when working with categorical data. Although there is only one categorical column in this dataset, we will use it to demonstrate several important types of feature columns that you could use when working with other datasets.
Step11: Hashed feature columns
Step12: Crossed feature columns
Step13: Choose which columns to use
Step14: How to Input Feature Columns to a Keras Model
Step15: Earlier, we used a small batch size to demonstrate how feature columns worked. We create a new input pipeline with a larger batch size.
Step16: Create, compile, and train the model
Step17: Visualize the model loss curve
|
511 | <ASSISTANT_TASK:>
Python Code:
from pylab import *
from copy import deepcopy
from matplotlib import animation, rc
from IPython.display import HTML
%matplotlib inline
rc('text', usetex=True)
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 15}
matplotlib.rc('font', **font)
E1, E2, E3 = 0., 20., 0.
V12, V23 = 1., 1.
psi0 = array([1, 0, 0], dtype='complex')
Nt = int(1e4)
psi = zeros((Nt, 3), dtype='complex')
psi[0, :] = psi0
for E2, tf in zip(arange(4) * 20, [20, 200, 200, 200]):
times = linspace(0, tf, Nt)
H = array([[E1, V12, 0],
[V12, E2, V23],
[0, V23, E3]])
lambd, Q = eigh(H)
Q_inv = Q.T.conj()
for it in range(1, Nt):
psi[it, :] = Q_inv @ psi0
psi[it, :] = diag(np.exp(-1j * lambd * times[it])) @ psi[it, :]
psi[it, :] = Q @ psi[it, :]
plot(times, abs(psi) ** 2)
ylabel(r'$\|\Psi(t)\|^2$')
xlabel(r'$t$')
legend(['$\|\Psi(t)_1\|^2$', '$\|\Psi(t)_2\|^2$', '$\|\Psi(t)_3\|^2$'], loc=1)
figure()
y = cos(V12 ** 2 / E2 * times) ** 2
plot(times, y)
y = sin(V12 ** 2 / E2 * times) ** 2
plot(times, y)
plot(times, abs(psi[:, 0]) ** 2, label='$\|\Psi(t)_1\|^2$')
plot(times, abs(psi[:, 2]) ** 2, label='$\|\Psi(t)_3\|^2$')
ylabel(r'$\|\Psi(t)\|^2$')
xlabel(r'$t$')
legend(loc=1)
figure()
plot(times, abs(psi[:, 0]) ** 2, label='$\|\Psi(t)_1\|^2$')
y = cos(V12 ** 2 / E2 * times) ** 2
plot(times, y, label=r'$\cos(V_{12}^2 / (E_2 t)^2)$')
ylabel(r'$\|\Psi(t)\|^2$')
xlabel(r'$t$')
legend(loc=1)
ylim([.99, 1.01])
xlim([-.3, 3]);
def V(x, Z=1):
return -Z / sqrt(2 / Z ** 2 + x ** 2)
N = 2 ** 10
x0, x1 = -25, 25
x = linspace(x0, x1, N)
dx = (x1 - x0) / (N - 1)
H = diag(ones(N - 1), -1) - 2 * diag(ones(N)) + diag(ones(N - 1), 1)
H *= -1 / (2 * dx**2)
H += diag(V(x))
E, Psi_tot = eigh(H)
E_bound=E[E<0]
for k, E_ in enumerate(sorted(E_bound)[:3]):
print('E_{' + str(k) + '} = ' + "{:1.4f}".format(E_))
plot(x, Psi_tot[:, 0] / sqrt(dx), label=r'$\Psi_0(x)$')
plot(x, Psi_tot[:, 1] / sqrt(dx), label=r'$\Psi_1(x)$')
plot(x, Psi_tot[:, 2] / sqrt(dx), label=r'$\Psi_2(x)$')
legend(loc=1)
xlabel('x')
ylabel('$\Psi(t)$')
figure()
plot(x, V(x))
plot(x, E_bound * ones_like(x)[:, newaxis])
legend([r'$V(x)$', r'$E_0$', r'$E_1$', r'$E_2$'])
xlabel('x')
ylabel('Energy')
def E(t, E0, omega, n):
t_ = maximum(omega * t, 0)
t_ = minimum(t_, 2 * np.pi * n)
return E0 * sin(t_) * sin(t_ / (2 * n))
def A(t, E0, omega, n):
pref = -E0 / omega
t_ = maximum(omega * t, 0.)
t_ = minimum(t_, 2 * np.pi * n)
return pref * (cos(t_) * (n * n * cos(t_ / n) - n * n + 1) + n * sin(t_) *
sin(t_ / n) - 1) / (2 * (n * n - 1))
def vanish(V0, x, x0, x1):
V0 *= 2
xs, xe = x[0], x[-1]
potential = np.maximum(0, (V0 * (x - x0) / (xs - x0)))
return np.maximum(potential, (V0 * (x - x1) / (xe - x1)))
omega = .02
n = 3 / 2
E0 = .05
Z = 1
x0, x1 = -15, 15
dx = .1
x = arange(x0, x1, dx)
N = len(x)
p = fftfreq(N, d=dx / (2 * pi))
dt = 0.5
ts = np.arange(- pi / omega, 2 * np.pi * (n + .5) / omega, dt)
plot(ts, E(ts, E0, omega, n))
title('Electric field for n = 3/2')
xlabel('t')
ylabel('E(t)')
figure()
t_star = np.arange(-pi / omega, 2 * np.pi * (5 + .5) / omega, 0.01)
plot(t_star, E(t_star, E0, omega, 5))
xlabel('t')
ylabel('E(t)')
title('Electric field for n = 5')
figure()
plot(ts, A(ts, E0, omega, n))
xlabel('t')
ylabel('A(t)')
title('Magnetic potential field for n = 3/2')
figure()
plot(t_star, A(t_star, E0, omega, 5))
xlabel('t')
ylabel('A(t)')
title('Magnetic potential field for n = 5')
omega = .02
n = 3 / 2
E0 = .05
Z = 1
x0, x1 = -15, 15
xl, xr = -10, 10
d = x1 - xr
t_temp = np.linspace(0, 2 * np.pi * (n + .5) / omega, 1000)
A_max = max(A(t_temp, E0, omega, n)) # the maximum momentum is equal to the
# maximum value of the magnetic potential
p_tilde = n**2 * E0 /(n**2 - 1) / omega
print('dx using the approximation ',\
"{:1.4f}".format(pi / p_tilde), 'a.u.')
print('dx using the maximum of the momentum calculated numerically',\
"{:1.4f}".format(pi / A_max), 'a.u.')
print('dt using the approximation ',\
"{:1.4f}".format(2 * pi / p_tilde ** 2), 'a.u.')
print('dt using the maximum of the momentum calculated numerically',\
"{:1.4f}".format(2 * pi / A_max ** 2), 'a.u.')
print("{:1.4f}".format(p_tilde / (8 * d)), 'a.u. < tilde_V <',\
"{:1.4f}".format(p_tilde ** 3 / 2 ** 4), 'a.u.')
V_tilde = 5.
dx = pi / p_tilde
x = arange(x0, x1, dx)
N = len(x)
p = fftfreq(N, d=dx / (2 * pi))
dt = 2 * pi / p_tilde ** 2
ts = np.arange(0, 2 * np.pi * (n + .5) / omega, dt)
H = diag(ones(N - 1), -1) - 2 * diag(ones(N)) + diag(ones(N - 1), 1)
H *= -1 / (2 * dx ** 2)
H += diag(V(x, Z))
U_2 = exp(-1j * 0.5 * p ** 2 * dt)
_, Psi_tot = eigh(H)
Psi = Psi_tot[:, 0].astype('complex')
Psi /= np.sqrt(sum(abs(Psi) ** 2 * dx))
psi0 = deepcopy(Psi)
norm = np.zeros(len(ts))
overlap = np.zeros(len(ts))
for k, t in enumerate(ts):
U_1 = exp(-0.5 * 1j * (V(x, 1) - 1j *
vanish(V_tilde, x, xl, xr) - x * E(t, E0, omega, n)) * dt)
Psi *= U_1
Psi = fft(Psi)
Psi *= U_2
Psi = ifft(Psi) # go to real space
Psi *= U_1
norm[k] = sum(abs(Psi) ** 2 * dx)
overlap[k] = abs(vdot(Psi, psi0)) * dx
N_e = 20
ionizs = np.zeros(N_e)
norms = np.zeros((N_e, len(ts)))
for j, E0 in enumerate(np.linspace(0, .05, N_e)):
Psi = deepcopy(psi0)
for k, t in enumerate(ts):
U_1 = exp(-0.5 * 1j * (V(x, 1) - 1j *
vanish(V_tilde, x, -10, 10)
- x * E(t, E0, omega, n)) * dt)
Psi *= U_1
Psi = fft(Psi)
Psi *= U_2
Psi = ifft(Psi) # go to real space
Psi *= U_1
norms[j, k] = sum(abs(Psi) ** 2 * dx)
ionizs[j] = 1 - sum(abs(Psi) ** 2 * dx)
title('Ionization probabilies in time')
plot(ts, 1 - norms.T[:, ::-1])
legend([r'$E_0 = 0.05$ a.u.'])
xlabel(r't')
ylabel(r'$1 - |<\Psi(t)|\Psi(t)>|^2$')
figure()
title(r'ionization probabilies at $t_{end}$')
ylabel(r'$1 - |<\Psi(t_{end})|\Psi(t_{end})>|^2$')
xlabel(r'$E_0$')
plot(np.linspace(0, .05, N_e), ionizs)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Superexchange in a three-level system.
Step2: (b)
Step3: 2. The one-dimensional soft-core potential.
Step4: 3. Ionization from a one-dimensional soft-core potential.
Step5: (b)
Step6: (f)
Step7: We see as expected that the lower the amplitude of the electric field the lower the final ionization probability will be.
|
512 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
pd.options.display.max_columns = 999
%matplotlib inline
matplotlib.rcParams['savefig.dpi'] = 1.5 * matplotlib.rcParams['savefig.dpi']
# Read the data inside:
loan2011 = pd.read_csv('merged_summary_2011.csv')
loan2010 = pd.read_csv('merged_summary_2010.csv')
loan2012 = pd.read_csv('merged_summary_2012.csv')
loan2011.head()
loan2011['Monthly.Rpt.Prd'] = pd.to_datetime(loan2011['Monthly.Rpt.Prd'], format = '%m/%d/%Y')
loan2011['ORIG_DTE'] = pd.to_datetime(loan2011['ORIG_DTE'], format = '%m/%Y')
loan = {'2011':loan2011, '2012': loan2012, '2010': loan2010}
years = []
prepaid = {}
default = {}
loan_num = {}
for key in loan.keys():
temp_size =len(loan[key])
temp_count = loan[key].groupby('Zero.Bal.Code')['LOAN_ID'].count()
loan_num[key] = temp_size
prepaid[key] = temp_count[1]/temp_size
default[key] = 1 - sum(temp_count[0:2])/temp_size
def plot_dict_bar(val_dict):
x_axis = sorted(list(val_dict.keys()))
y_axis = [val_dict[key] for key in x_axis]
x = np.arange(3) - 0.175
y = np.array(y_axis)*100
fig, ax = plt.subplots()
ax.bar(x,y,0.35)
ax.set_xticks(np.arange(3))
ax.set_xticklabels(x_axis)
ax.set_xlabel("Year")
ax.set_ylabel("Percentage (%)")
ax.set_title("Prepaid Ratio")
plt.show()
plot_dict_bar(prepaid)
def plot_dict_bar(val_dict):
x_axis = sorted(list(val_dict.keys()))
y_axis = [val_dict[key] for key in x_axis]
x = np.arange(3) - 0.175
y = np.array(y_axis)*100
fig, ax = plt.subplots()
ax.bar(x,y,0.35)
ax.set_xticks(np.arange(3))
ax.set_xticklabels(x_axis)
ax.set_xlabel("Year")
ax.set_ylabel("Percentage (%)")
ax.set_title("Default Ratio")
plt.show()
plot_dict_bar(default)
def plot_dict_bar(val_dict):
x_axis = sorted(list(val_dict.keys()))
y_axis = [val_dict[key] for key in x_axis]
x = np.arange(3) - 0.175
y = np.array(y_axis)
fig, ax = plt.subplots()
ax.bar(x,y,0.35)
ax.set_xticks(np.arange(3))
ax.set_xticklabels(x_axis)
ax.set_xlabel("Year")
ax.set_ylabel("Percentage (%)")
ax.set_title("Loan Initiation Amount")
plt.show()
plot_dict_bar(loan_num)
years = []
loan_to_value = {}
for key in loan.keys():
temp_ltv = {}
temp_mean = loan[key].groupby('Zero.Bal.Code')['OLTV'].mean()
temp_ltv['Current'] = temp_mean[0]
temp_ltv['Prepaid'] = temp_mean[1]
temp_ltv['Default'] = np.mean(temp_mean[2:])
loan_to_value[key] = temp_ltv
result = pd.DataFrame.from_dict(loan_to_value)
result.head()
years = []
credit_score = {}
for key in loan.keys():
temp_cscore = {}
temp_mean = loan[key].groupby('Zero.Bal.Code')['CSCORE_C'].mean()
temp_cscore['Current'] = temp_mean[0]
temp_cscore['Prepaid'] = temp_mean[1]
temp_cscore['Default'] = np.mean(temp_mean[2:])
credit_score[key] = temp_cscore
result2 = pd.DataFrame.from_dict(credit_score)
result2.head()
# Last one, default by states...
years = []
state_res = {}
for key in loan.keys():
temp_state_count = loan[key][(loan[key]['Zero.Bal.Code'] == 3) | (loan[key]['Zero.Bal.Code'] == 6) | (loan[key]['Zero.Bal.Code'] == 9)].groupby('STATE')['LOAN_ID'].count()
states = list(temp_state_count.index)
for state in states:
if state_res.get(state) == None:
state_res[state] = temp_state_count[state]
else:
state_res[state] = state_res[state] + temp_state_count[state]
total_state_res = pd.DataFrame.from_dict(state_res, orient='index')
total_state_res.columns=['Default']
total_state_res.sort_values(by='Default', ascending=False)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Underwrting
Step2: Let's take a peek at the data.
Step3: How many morgages have been prepaid in these three years?
Step4: Remember prepay includes a common case that if you want to switch to another house. You will sell the current house that you live in. That would count as prepay too.
Step5: Default rate is not very high in these three years. The general trend is going down, which make senses because the economy is getting better.
Step6: It is very interesting to find out that "default" loans tends to have slightly higher loan-to-value (LTV) ratio on average, meaning they tends to have lower downpayment.
Step7: Again, it is very interesting to find that default loans tend to have slightly lower credit score, on average.
|
513 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('Sample-HRM-p50-genotyping.csv')
plt.plot(df.iloc[:, 0],df.iloc[:,1:])
plt.show()
df_melt=df.loc[(df.iloc[:,0]>75) & (df.iloc[:,0]<88)]
df_data=df_melt.iloc[:,1:]
plt.plot(df_melt.iloc[:, 0],df_data)
plt.show()
df_norm= (df_data - df_data.min()) / (df_data.max()-df_data.min())*100
plt.plot(df_melt.iloc[:, 0],df_norm)
plt.show()
dfdt = df_norm.diff()
plt.plot(df_melt.iloc[:,[0]],dfdt)
plt.show()
dfdtWithTemp = pd.concat([df_melt.iloc[:,[0]],dfdt],axis=1)
meltTempList = dfdtWithTemp.set_index('Temperature').idxmin()
dfdif = df_norm.sub(df_norm['J14'],axis=0)
plt.plot(df_melt.iloc[:, 0],dfdif)
plt.show()
import sklearn.cluster as sc
from IPython.display import display
mat = dfdif.T
hc = sc.KMeans(n_clusters=3)
hc.fit(mat)
labels = hc.labels_
results = pd.DataFrame([dfdif.T.index,labels])
display(results.loc[:0,results.iloc[1]==0])
display(results.loc[:0,results.iloc[1]==1])
display(results.loc[:0,results.iloc[1]==2])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read and Plot Melting Data
Step2: Select melting range
Step3: Normalizing
Step4: Melting Temp
Step5: Calculate and Show Diff Plot
Step6: Clustering
|
514 | <ASSISTANT_TASK:>
Python Code:
import autofig
import numpy as np
#autofig.inline()
t = np.linspace(0,10,31)
x = np.random.rand(31)
y = np.random.rand(31)
z = np.random.rand(31)
autofig.reset()
autofig.plot(x, y, z, i=t,
xlabel='x', ylabel='y', zlabel='z')
mplfig = autofig.draw()
autofig.reset()
autofig.plot(x, y, z, i=t,
xlabel='x', ylabel='y', zlabel='z',
projection='3d')
mplfig = autofig.draw()
autofig.reset()
autofig.plot(x, y, z, i=t,
xlabel='x', ylabel='y', zlabel='z',
projection='3d', elev=0, azim=0)
mplfig = autofig.draw()
autofig.reset()
autofig.plot(x, y, z, i=t,
xlabel='x', ylabel='y', zlabel='z',
projection='3d', elev=0, azim=[0, 180])
mplfig = autofig.draw(i=3)
anim = autofig.animate(i=t,
save='3d_azim_2.gif', save_kwargs={'writer': 'imagemagick'})
autofig.reset()
autofig.plot(x, y, z, i=t,
xlabel='x', ylabel='y', zlabel='z',
projection='3d', elev=0, azim=[0, 20, 30, 50, 150, 180])
anim = autofig.animate(i=t,
save='3d_azim_6.gif', save_kwargs={'writer': 'imagemagick'})
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: By default, autofig uses the z dimension just to assign z-order (so that positive z appears "on top")
Step2: To instead plot using a projected 3d axes, simply pass projection='3d'
Step3: If the projection is set to 3d, you can also set the elevation ('elev') and azimuth ('azim') of the viewing angle. These are provided in degrees and can be either a float (fixed) or a list (changes as a function of the current value of i).
Step4: When provided as an array, the set viewing angle is determined as follows
Step5: We can then achieve an "accelerating" rotation by passing finer detail on the azimuth as a function of 'i'.
|
515 | <ASSISTANT_TASK:>
Python Code:
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
import cirq
qubit = cirq.NamedQubit("myqubit")
# creates an equal superposition of |0> and |1> when simulated
circuit = cirq.Circuit(cirq.H(qubit))
# see the "myqubit" identifier at the left of the circuit
print(circuit)
# run simulation
result = cirq.Simulator().simulate(circuit)
print("result:")
print(result)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A qubit is the basic unit of quantum information, a quantum bit
|
516 | <ASSISTANT_TASK:>
Python Code:
# numpy is generally imported as 'np'
import numpy as np
print(np)
print(np.__version__)
# an explicit list of numbers
anarray = np.array([2, 3, 5, 7, 11, 13, 17, 19, 23])
# an array of zeros of shape(3, 4)
zeroarray = np.zeros((3, 4))
# a range from 0 to n-1
rangearray = np.arange(12)
# a range from 0 to n-1, reshaped to (2, 3, 5)
shapedarray = np.arange(30).reshape(2, 3, 5)
arr = np.ones((3, 2, 4))
print("Array shape:", arr.shape)
print("Array element dtype:", arr.dtype)
arr = np.array([1, 2, 3, 4, 5, 6])
print("arr --", arr)
print("arr[2] --", arr[2])
print("arr[2:5] --", arr[2:5])
print("arr[::2] --", arr[::2])
lst_2d = [[1, 2, 3], [4, 5, 6]]
arr_2d = np.array(lst_2d)
print("2D list:")
print(lst_2d)
print('')
print("2D array:")
print(arr_2d)
print('')
print("Single array element:")
print(arr_2d[1, 2])
print('')
print("Single row:")
print(arr_2d[1])
print('')
print("First two columns:")
print(arr_2d[:, :2])
print(arr_2d[arr_2d % 2 == 0])
print(lst_2d[0:2][1])
print(arr_2d[0:2, 1])
arr1 = np.arange(4)
arr2 = np.arange(4)
print('{} + {} = {}'.format(arr1, arr2, arr1 + arr2))
arr = np.arange(4)
const = 5
print("Original array: {}".format(arr))
print("")
print("Array + const: {}".format(arr + const))
daily_records = np.array([[12, 14, 11], [11, 12, 15]])
print('raw data:')
print(daily_records)
offset = np.array([2, 1, 4])
corrected_records = daily_records - offset
print('corrected values:')
print(corrected_records)
arr1 = np.ones((2, 3))
arr2 = np.ones((2, 1))
# (arr1 + arr2).shape
arr1 = np.ones((2, 3))
arr2 = np.ones(3)
# (arr1 + arr2).shape
arr1 = np.ones((1, 3))
arr2 = np.ones((2, 1))
# (arr1 + arr2).shape
arr1 = np.ones((1, 3))
arr2 = np.ones((1, 2))
# (arr1 + arr2).shape
days_adjust = np.array([1.5, 3.7])
adjusted = daily_records - days_adjust
a = np.arange(12).reshape((3, 4))
mean = np.mean(a)
print(a)
print(mean)
daily_records = np.array([[12, 14, 11], [11, 12, 15]])
masked_data = np.ma.masked_array(daily_records)
masked_data[0, 1] = np.ma.masked
print('masked data:')
print(masked_data)
print('unmasked average = ', np.mean(daily_records))
print('masked average = ', np.ma.mean(masked_data))
%%timeit
x = range(500)
%%timeit -n 100 -r 5
x = range(500)
rands = np.random.random(1000000).reshape(100, 100, 100)
%%timeit -n 10 -r 5
overPointEightLoop = 0
for i in range(100):
for j in range(100):
for k in range(100):
if rands[i, j, k] > 0.8:
overPointEightLoop +=1
%%timeit -n 10 -r 5
overPointEightWhere = rands[rands > 0.8].size
try:
a = np.ones((11, 13, 17, 23, 29, 37, 47))
except MemoryError:
print('this would have been a memory error')
arr = np.arange(8)
arr_view = arr.reshape(2, 4)
# Print the "view" array from reshape.
print('Before\n', arr_view)
# Update the first element of the original array.
arr[0] = 1000
# Print the "view" array from reshape again,
# noticing the first value has changed.
print('After\n', arr_view)
arr = np.arange(8)
arr_view = arr.reshape(2, 4).copy()
# Print the "view" array from reshape.
print('Before\n', arr_view)
# Update the first element of the original array.
arr[0] = 1000
# Print the "view" array from reshape again,
# noticing the first value has changed.
print('After\n', arr_view)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Documentation
Step2: Experiment
Step3: Exercise
Step4: You can also index multidimensional arrays in a logical way using an enhanced indexing syntax. Remember that Python uses zero-based indexing!
Step5: Numpy provides syntax to index conditionally, based on the data in the array.
Step6: Exercise
Step7: The result we just received points to an important piece of learning, which is that in most cases NumPy arrays behave very differently to Python lists. Let's explore the differences (and some similarities) between the two.
Step8: Exercise
Step9: Broadcasting
Step10: Each station is known to overstate the maximum recorded temperature by a different known constant value. You wish to subtract the appropriate offset from each station's values.
Step11: NumPy allows you to do this easily using a powerful piece of functionality called broadcasting.
Step12: Reshaping arrays to aid broadcasting
Step13: but that results in a ValueError
Step14: Exercise
Step15: The statistics of the masked data version are different
Step16: The np.ma.masked_array() function seen above is a simple creation method for masked data.
Step17: Repeat that, specifying only 100 loops and fastest time of 5 runs
Step18: This gives us an easy way to evaluate performance for implementations ...
Step19: Clearly this is a trivial example, so let us explore a more complicated case.
Step20: Views on Arrays
Step21: What this means is that if one array (arr) is modified, the other (arr_view) will also be updated
|
517 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import chap01soln
resp = chap01soln.ReadFemResp()
resp.columns
import thinkstats2
hist = thinkstats2.Hist(resp.totincr)
import thinkplot
thinkplot.Hist(hist, label='totincr')
thinkplot.Show()
hist = thinkstats2.Hist(resp.age_r)
thinkplot.Hist(hist, label='age_r')
thinkplot.Show()
hist = thinkstats2.Hist(resp.numfmhh)
thinkplot.Hist(hist, label='numfmhh')
thinkplot.Show()
hist = thinkstats2.Hist(resp.parity)
thinkplot.Hist(hist, label='parity')
thinkplot.Show()
print('The largest parity is ...', hist.Largest(10))
resp.totincr.value_counts() ## 총임금 빈도수 계산
rich = resp[resp.totincr == 14]
hist = thinkstats2.Hist(rich.parity)
thinkplot.Hist(hist, label='rich parity')
thinkplot.Show()
hist.Largest(10)
rich = resp[resp.totincr == 14]
poor = resp[resp.totincr < 14]
print('Rich mean value is: ', rich.parity.mean())
print('Poor mean value is: ', poor.parity.mean())
hist = thinkstats2.Hist(resp.fmarno)
thinkplot.Hist(hist, label='famrno')
thinkplot.Show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 응답자 가족에 대한 총소득 <tt>totincr</tt> 히스토그램을 생성하시오. 코드를 해석하기 위해서, codebook을 살펴보시오.
Step2: 히스토그램을 화면에 표시하시오.
Step3: 인터뷰 당시 응답자 나이 변수, <tt>age_r</tt>에 대한 히스토그램을 생성하시오.
Step4: 응답자 가구의 가구원수, <tt>numfmhh</tt>에 대한 히스토그램을 생성하시오.
Step5: 응답자가 낳은 자녀수, <tt>parity</tt>에 대한 히스토그램을 생성하시오. 이 분포를 어떻게 기술할까요?
Step6: Hist.Largest를 사용해서 <tt>parity</tt>의 가장 큰 수를 찾으시오.
Step7: <tt>totincr</tt>를 사용해서 가장 높은 임금을 갖는 응답자를 고르시오. 고임금 응답자에 대해서만 <tt>parity</tt> 분포를 계산하시오.
Step8: 고임금 응답자에 대한 가장 큰 <tt>parity</tt>를 구하시오.
Step9: 고임금과 고임금이 아닌 집단에 대한 평균 <tt>parity</tt>를 비교하시오.
Step10: 다른 흥미로워 보이는 변수도 조사하시오.
|
518 | <ASSISTANT_TASK:>
Python Code:
products = pd.read_csv('../../data/amazon_baby_subset.csv')
products['sentiment']
products['sentiment'].size
products.head(10).name
print ('# of positive reviews =', len(products[products['sentiment']==1]))
print ('# of negative reviews =', len(products[products['sentiment']==-1]))
# The same feature processing (same as the previous assignments)
# ---------------------------------------------------------------
import json
with open('../../data/important_words.json', 'r') as f: # Reads the list of most frequent words
important_words = json.load(f)
important_words = [str(s) for s in important_words]
def remove_punctuation(text):
import string
translator = str.maketrans('', '', string.punctuation)
return str(text).translate(translator)
# Remove punctuation.
products['review_clean'] = products['review'].apply(remove_punctuation)
# Split out the words into individual columns
for word in important_words:
products[word] = products['review_clean'].apply(lambda s : s.split().count(word))
with open('../../data/module-4-assignment-train-idx.json', 'r') as f:
train_idx = json.load(f)
train_data = products.ix[train_idx]
with open ('../../data/module-4-assignment-validation-idx.json', 'r') as f:
v_idx = json.load(f)
validation_data = products.ix[v_idx]
import numpy as np
def get_numpy_data(data_frame, features, label):
data_frame['intercept'] = 1
features = ['intercept'] + features
features_frame = data_frame[features]
feature_matrix = features_frame.as_matrix()
label_array = data_frame[label]
return(feature_matrix, label_array)
feature_matrix_train, sentiment_train = get_numpy_data(train_data, important_words, 'sentiment')
feature_matrix_valid, sentiment_valid = get_numpy_data(validation_data, important_words, 'sentiment')
def prediction(score):
return (1 / (1 + np.exp(-score)))
'''
produces probablistic estimate for P(y_i = +1 | x_i, w).
estimate ranges between 0 and 1.
'''
def predict_probability(feature_matrix, coefficients):
# Take dot product of feature_matrix and coefficients
scores = np.dot(feature_matrix, coefficients)
# Compute P(y_i = +1 | x_i, w) using the link function
predictions = np.apply_along_axis(prediction, 0, scores)
# return predictions
return predictions
def feature_derivative_with_L2(errors, feature, coefficient, l2_penalty, feature_is_constant):
# Compute the dot product of errors and feature
derivative = np.dot(feature, errors)
# add L2 penalty term for any feature that isn't the intercept.
if not feature_is_constant:
derivative = derivative - 2 * l2_penalty * coefficient
return derivative
def compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty):
indicator = (sentiment==+1)
scores = np.dot(feature_matrix, coefficients)
lp = np.sum((indicator-1)*scores - np.log(1. + np.exp(-scores))) - l2_penalty*np.sum(coefficients[1:]**2)
return lp
from math import sqrt
def logistic_regression_with_L2(feature_matrix, sentiment, initial_coefficients, step_size, l2_penalty, max_iter):
coefficients = np.array(initial_coefficients) # make sure it's a numpy array
for itr in range(max_iter):
# Predict P(y_i = +1|x_i,w) using your predict_probability() function
# YOUR CODE HERE
predictions = predict_probability(feature_matrix, coefficients)
# Compute indicator value for (y_i = +1)
indicator = (sentiment==+1)
# Compute the errors as indicator - predictions
errors = indicator - predictions
for j in range(len(coefficients)): # loop over each coefficient
# Recall that feature_matrix[:,j] is the feature column associated with coefficients[j].
# Compute the derivative for coefficients[j]. Save it in a variable called derivative
# YOUR CODE HERE
derivative = feature_derivative_with_L2(errors, feature_matrix[:, j], coefficients[j], l2_penalty, j == 0)
# add the step size times the derivative to the current coefficient
coefficients[j] += (step_size * derivative)
# Checking whether log likelihood is increasing
if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \
or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:
lp = compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty)
print ('iteration %*d: log likelihood of observed labels = %.8f' % \
(int(np.ceil(np.log10(max_iter))), itr, lp))
return coefficients
# run with L2 = 0
coefficients_0_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=0, max_iter=501)
# run with L2 = 4
coefficients_4_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=4, max_iter=501)
# run with L2 = 10
coefficients_10_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=10, max_iter=501)
# run with L2 = 1e2
coefficients_1e2_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=1e2, max_iter=501)
# run with L2 = 1e3
coefficients_1e3_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=1e3, max_iter=501)
# run with L2 = 1e5
coefficients_1e5_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=1e5, max_iter=501)
important_words.insert(0, 'intercept')
data = np.array(important_words)
table = pd.DataFrame(columns = ['words'], data = data)
def add_coefficients_to_table(coefficients, column_name):
table[column_name] = coefficients
return table
important_words.remove('intercept')
add_coefficients_to_table(coefficients_0_penalty, 'coefficients [L2=0]')
add_coefficients_to_table(coefficients_4_penalty, 'coefficients [L2=4]')
add_coefficients_to_table(coefficients_10_penalty, 'coefficients [L2=10]')
add_coefficients_to_table(coefficients_1e2_penalty, 'coefficients [L2=1e2]')
add_coefficients_to_table(coefficients_1e3_penalty, 'coefficients [L2=1e3]')
add_coefficients_to_table(coefficients_1e5_penalty, 'coefficients [L2=1e5]')
def make_tuple(column_name):
word_coefficient_tuples = [(word, coefficient) for word, coefficient in zip( table['words'], table[column_name])]
return word_coefficient_tuples
positive_words = list(map(lambda x: x[0], sorted(make_tuple('coefficients [L2=0]'), key=lambda x:x[1], reverse=True)[:5]))
negative_words = list(map(lambda x: x[0], sorted(make_tuple('coefficients [L2=0]'), key=lambda x:x[1], reverse=False)[:5]))
positive_words
negative_words
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = 10, 6
def make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list):
cmap_positive = plt.get_cmap('Reds')
cmap_negative = plt.get_cmap('Blues')
xx = l2_penalty_list
plt.plot(xx, [0.]*len(xx), '--', lw=1, color='k')
table_positive_words = table[table['words'].isin(positive_words)]
table_negative_words = table[table['words'].isin(negative_words)]
del table_positive_words['words']
del table_negative_words['words']
for i in range(len(positive_words)):
color = cmap_positive(0.8*((i+1)/(len(positive_words)*1.2)+0.15))
plt.plot(xx, table_positive_words[i:i+1].as_matrix().flatten(),
'-', label=positive_words[i], linewidth=4.0, color=color)
for i in range(len(negative_words)):
color = cmap_negative(0.8*((i+1)/(len(negative_words)*1.2)+0.15))
plt.plot(xx, table_negative_words[i:i+1].as_matrix().flatten(),
'-', label=negative_words[i], linewidth=4.0, color=color)
plt.legend(loc='best', ncol=3, prop={'size':16}, columnspacing=0.5)
plt.axis([1, 1e5, -1, 2])
plt.title('Coefficient path')
plt.xlabel('L2 penalty ($\lambda$)')
plt.ylabel('Coefficient value')
plt.xscale('log')
plt.rcParams.update({'font.size': 18})
plt.tight_layout()
make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list=[0, 4, 10, 1e2, 1e3, 1e5])
def get_classification_accuracy(feature_matrix, sentiment, coefficients):
scores = np.dot(feature_matrix, coefficients)
apply_threshold = np.vectorize(lambda x: 1. if x > 0 else -1.)
predictions = apply_threshold(scores)
num_correct = (predictions == sentiment).sum()
accuracy = num_correct / len(feature_matrix)
return accuracy
train_accuracy = {}
train_accuracy[0] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_0_penalty)
train_accuracy[4] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_4_penalty)
train_accuracy[10] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_10_penalty)
train_accuracy[1e2] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e2_penalty)
train_accuracy[1e3] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e3_penalty)
train_accuracy[1e5] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e5_penalty)
validation_accuracy = {}
validation_accuracy[0] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_0_penalty)
validation_accuracy[4] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_4_penalty)
validation_accuracy[10] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_10_penalty)
validation_accuracy[1e2] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e2_penalty)
validation_accuracy[1e3] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e3_penalty)
validation_accuracy[1e5] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e5_penalty)
# Build a simple report
for key in sorted(validation_accuracy.keys()):
print("L2 penalty = %g" % key)
print("train accuracy = %s, validation_accuracy = %s" % (train_accuracy[key], validation_accuracy[key]))
print("--------------------------------------------------------------------------------")
# Optional. Plot accuracy on training and validation sets over choice of L2 penalty.
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = 10, 6
sorted_list = sorted(train_accuracy.items(), key=lambda x:x[0])
plt.plot([p[0] for p in sorted_list], [p[1] for p in sorted_list], 'bo-', linewidth=4, label='Training accuracy')
sorted_list = sorted(validation_accuracy.items(), key=lambda x:x[0])
plt.plot([p[0] for p in sorted_list], [p[1] for p in sorted_list], 'ro-', linewidth=4, label='Validation accuracy')
plt.xscale('symlog')
plt.axis([0, 1e3, 0.78, 0.786])
plt.legend(loc='lower left')
plt.rcParams.update({'font.size': 18})
plt.tight_layout
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Train-Validation split
Step2: Convert Frame to NumPy array
Step3: Building on logistic regression with no L2 penalty assignment
Step4: Adding L2 penalty
Step5: Quiz Question
Step6: Quiz Question
Step7: Explore effects of L2 regularization
Step8: Compare coefficients
Step9: Using the coefficients trained with L2 penalty 0, find the 5 most positive words (with largest positive coefficients). Save them to positive_words. Similarly, find the 5 most negative words (with largest negative coefficients) and save them to negative_words.
Step10: Let us observe the effect of increasing L2 penalty on the 10 words just selected. We provide you with a utility function to plot the coefficient path.
Step11: Quiz Question
Step12: Below, we compare the accuracy on the training data and validation data for all the models that were trained in this assignment. We first calculate the accuracy values and then build a simple report summarizing the performance for the various models.
|
519 | <ASSISTANT_TASK:>
Python Code:
import os
import fiona
import matplotlib.pyplot as plt
from planet import api
import rasterio
from rasterio import features as rfeatures
from rasterio.enums import Resampling
from rasterio.plot import show
import shapely
from shapely.geometry import shape as sshape
# if your Planet API Key is not set as an environment variable, you can paste it below
API_KEY = os.environ.get('PL_API_KEY', 'PASTE_YOUR_KEY_HERE')
analytics_client = api.ClientV1(api_key=API_KEY)
# This ID is for a subscription for monthly road detection in Kirazli, Turkey
SUBSCRIPTION_ID = 'f184516c-b948-406f-b257-deaa66c3f38a'
results = analytics_client.list_collection_features(SUBSCRIPTION_ID).get()
features = results['features']
print('{} features in collection'.format(len(features)))
# sort features by acquisition date
features.sort(key=lambda k: k['properties']['first_acquired'])
feature = features[-1]
print(feature['properties']['first_acquired'])
RESOURCE_TYPE = 'target-quad'
def create_save_dir(root_dir='data'):
save_dir = root_dir
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
return save_dir
dest = 'data'
create_save_dir(dest)
from planet.api.models import Body
from planet.api.utils import write_to_file
def download_feature(feature, subscription_id, resource_type, dest=dest):
print('{}: acquired {}'.format(feature['id'], get_date(feature)))
resource = analytics_client.get_associated_resource_for_analytic_feature(subscription_id,
feature['id'],
resource_type)
named_resource = NamedBody(resource, get_name(feature))
filename = download_resource(named_resource, dest)
return filename
def get_date(feature):
feature_acquired = feature['properties']['first_acquired']
return feature_acquired.split('T',1)[0]
def get_name(feature):
return feature['properties']['target_quad_id'] + '_' + get_date(feature) + '.tif'
def download_resource(resource, dest, overwrite=False):
writer = write_to_file(dest, overwrite=overwrite)
writer(resource)
filename = os.path.join(dest, resource.name)
print('file saved to: {}'.format(filename))
return filename
class NamedBody(Body):
def __init__(self, body, name):
super(NamedBody, self).__init__(body._request, body.response, body._dispatcher)
self._name = name
@property
def name(self):
return self._name
filename = download_feature(feature, SUBSCRIPTION_ID, RESOURCE_TYPE)
def _open(filename, factor=1):
with rasterio.open(filename) as dataset:
height = int(dataset.height / factor)
width = int(dataset.width / factor)
data = dataset.read(
out_shape=(dataset.count, height, width)
)
return data
def open_bool(filename, factor=1):
data = _open(filename, factor=factor)
return data[0,:,:]
def get_figsize(factor):
return tuple(2 * [int(25/factor)])
factor = 1
figsize = (15, 15)
roads = open_bool(filename, factor=factor)
fig = plt.figure(figsize=figsize)
show(roads, title="roads", cmap="binary")
gdal_output_filename = os.path.join('data', 'test_gdal.shp')
!gdal_polygonize.py $filename $gdal_output_filename
def roads_as_vectors(filename):
with rasterio.open(filename) as dataset:
roads = dataset.read(1)
road_mask = roads == 255 # mask non-road pixels
# transforms roads features to image crs
road_shapes = rfeatures.shapes(roads, mask=road_mask, connectivity=8, transform=dataset.transform)
road_geometries = (r for r, _ in road_shapes)
crs = dataset.crs
return (road_geometries, crs)
def save_as_shapefile(output_filename, geometries, crs):
driver='ESRI Shapefile'
schema = {'geometry': 'Polygon', 'properties': []}
with fiona.open(output_filename, mode='w', driver=driver, schema=schema, crs=crs) as c:
count = 0
for g in geometries:
count += 1;
c.write({'geometry': g, 'properties': {}})
print('wrote {} geometries to {}'.format(count, output_filename))
road_geometries, crs = roads_as_vectors(filename)
output_filename = os.path.join('data', 'test.shp')
save_as_shapefile(output_filename, road_geometries, crs)
def roads_as_vectors_with_filtering(filename, min_pixel_size=5):
with rasterio.open(filename) as dataset:
roads = dataset.read(1)
road_mask = roads == 255 # mask non-road pixels
# we skip transform on vectorization so we can perform filtering in pixel space
road_shapes = rfeatures.shapes(roads, mask=road_mask, connectivity=8)
road_geometries = (r for r, _ in road_shapes)
geo_shapes = (sshape(g) for g in road_geometries)
# filter to shapes bigger than min_pixel_size
geo_shapes = (s for s in geo_shapes if s.area > min_pixel_size)
# simplify so we don't have a million pixel edge points
tolerance = 1 #1.5
geo_shapes = (g.simplify(tolerance, preserve_topology=False)
for g in geo_shapes)
# apply image transform
# rasterio transform: (a, b, c, d, e, f, 0, 0, 1), c and f are offsets
# shapely: a b d e c/xoff f/yoff
d = dataset.transform
shapely_transform = [d[0], d[1], d[3], d[4], d[2], d[5]]
proj_shapes = (shapely.affinity.affine_transform(g, shapely_transform)
for g in geo_shapes)
road_geometries = (shapely.geometry.mapping(s) for s in proj_shapes)
crs = dataset.crs
return (road_geometries, crs)
road_geometries_filt, crs = roads_as_vectors_with_filtering(filename)
output_filename = os.path.join('data', 'test_filt.shp')
save_as_shapefile(output_filename, road_geometries_filt, crs)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Obtain Analytics Raster
Step2: Download Quad Raster
Step3: We want to save each all of the images in one directory. But all of the images for a single target quad have the same name, L15_{target_quad_id}. We use the function write_to_file to save the image, and that function pulls the name from the resource name attribute, which we can't set. So, we are going to make a new object that functions just like resource, but has the name attribute set to the acquisition date. It would be nice if the write_to_file function just allowed us to set the name, like it allows us to set the directory.
Step4: Visualize Roads Image
Step5: Convert Roads to Vector Features
Step6: Rasterio - no filtering
Step7: Rasterio - Filtering and Simplifying
|
520 | <ASSISTANT_TASK:>
Python Code:
import striplog
striplog.__version__
text = "wet silty fine sand with tr clay"
from striplog import Lexicon
lex_dict = {
'lithology': ['sand', 'clay'],
'grainsize': ['fine'],
'modifier': ['silty'],
'amount': ['trace'],
'moisture': ['wet', 'dry'],
'abbreviations': {'tr': 'trace'},
'splitters': ['with'],
'parts_of_speech': {'noun': ['lithology'],
'adjective': ['grainsize', 'modifier', 'moisture'],
'subordinate': ['amount'],
}
}
lexicon = Lexicon(lex_dict)
from striplog import Interval
Interval._parse_description(text, lexicon=lexicon, max_component=3, abbreviations=True)
# Make and expand the lexicon.
lexicon = Lexicon.default()
# Add moisture words (or could add as other 'modifiers').
lexicon.moisture = ['wet(?:tish)?', 'dry(?:ish)?']
lexicon.parts_of_speech['adjective'] += ['moisture']
# Add the comma as component splitter.
lexicon.splitters += [', ']
Interval._parse_description(text, lexicon=lexicon, max_component=3)
Interval._parse_description("Coarse sandstone with minor limestone", lexicon=lexicon, max_component=3)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We have some text
Step2: To read this with striplog, we need to define a Lexicon. This is a dictionary-like object full of regular expressions, which acts as a bridge between this unstructured description and a dictionary-like Component object which striplog wants. The Lexicon also contains abbreviations for converting abbreviated text like cuttings descriptions into expanded words.
Step3: Now we can parse the text with it
Step4: But this is obviously a bit of a pain to make and maintain. So instead of definining a Lexicon from scratch, we'll modify the default one
Step5: Parsing with this yields the same results as before...
Step6: ...but we can parse more things now
|
521 | <ASSISTANT_TASK:>
Python Code:
labVersion = 'cs190_week2_word_count_v_1_0'
wordsList = ['cat', 'elephant', 'rat', 'rat', 'cat']
wordsRDD = sc.parallelize(wordsList, 4)
# Print out the type of wordsRDD
print type(wordsRDD)
# TODO: Replace <FILL IN> with appropriate code
def makePlural(word):
Adds an 's' to `word`.
Note:
This is a simple function that only adds an 's'. No attempt is made to follow proper
pluralization rules.
Args:
word (str): A string.
Returns:
str: A string with 's' added to it.
return word + 's'
print makePlural('cat')
# One way of completing the function
def makePlural(word):
return word + 's'
print makePlural('cat')
# Load in the testing code and check to see if your answer is correct
# If incorrect it will report back '1 test failed' for each failed test
# Make sure to rerun any cell you change before trying the test again
from test_helper import Test
# TEST Pluralize and test (1b)
Test.assertEquals(makePlural('rat'), 'rats', 'incorrect result: makePlural does not add an s')
# TODO: Replace <FILL IN> with appropriate code
pluralRDD = wordsRDD.map(makePlural)
print pluralRDD.collect()
# TEST Apply makePlural to the base RDD(1c)
Test.assertEquals(pluralRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'],
'incorrect values for pluralRDD')
# TODO: Replace <FILL IN> with appropriate code
pluralLambdaRDD = wordsRDD.map(lambda word: word + 's')
print pluralLambdaRDD.collect()
# TEST Pass a lambda function to map (1d)
Test.assertEquals(pluralLambdaRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'],
'incorrect values for pluralLambdaRDD (1d)')
# TODO: Replace <FILL IN> with appropriate code
pluralLengths = (pluralRDD
.map(lambda word: len(word))
.collect())
print pluralLengths
# TEST Length of each word (1e)
Test.assertEquals(pluralLengths, [4, 9, 4, 4, 4],
'incorrect values for pluralLengths')
# TODO: Replace <FILL IN> with appropriate code
wordPairs = wordsRDD.map(lambda word: (word, 1))
print wordPairs.collect()
# TEST Pair RDDs (1f)
Test.assertEquals(wordPairs.collect(),
[('cat', 1), ('elephant', 1), ('rat', 1), ('rat', 1), ('cat', 1)],
'incorrect value for wordPairs')
# TODO: Replace <FILL IN> with appropriate code
# Note that groupByKey requires no parameters
wordsGrouped = wordPairs.groupByKey()
for key, value in wordsGrouped.collect():
print '{0}: {1}'.format(key, list(value))
# TEST groupByKey() approach (2a)
Test.assertEquals(sorted(wordsGrouped.mapValues(lambda x: list(x)).collect()),
[('cat', [1, 1]), ('elephant', [1]), ('rat', [1, 1])],
'incorrect value for wordsGrouped')
# TODO: Replace <FILL IN> with appropriate code
wordCountsGrouped = wordsGrouped.map(lambda (k,v): (k, sum(v)))
print wordCountsGrouped.collect()
# TEST Use groupByKey() to obtain the counts (2b)
Test.assertEquals(sorted(wordCountsGrouped.collect()),
[('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect value for wordCountsGrouped')
# TODO: Replace <FILL IN> with appropriate code
# Note that reduceByKey takes in a function that accepts two values and returns a single value
wordCounts = wordPairs.reduceByKey(lambda a,b: a+b)
print wordCounts.collect()
# TEST Counting using reduceByKey (2c)
Test.assertEquals(sorted(wordCounts.collect()), [('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect value for wordCounts')
# TODO: Replace <FILL IN> with appropriate code
wordCountsCollected = (wordsRDD
.map(lambda word: (word, 1))
.reduceByKey(lambda a,b: a+b)
.collect())
print wordCountsCollected
# TEST All together (2d)
Test.assertEquals(sorted(wordCountsCollected), [('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect value for wordCountsCollected')
# TODO: Replace <FILL IN> with appropriate code
uniqueWords = wordsRDD.map(lambda word: (word, 1)).distinct().count()
print uniqueWords
# TEST Unique words (3a)
Test.assertEquals(uniqueWords, 3, 'incorrect count of uniqueWords')
# TODO: Replace <FILL IN> with appropriate code
from operator import add
totalCount = (wordCounts
.map(lambda (a,b): b)
.reduce(add))
average = totalCount / float(wordCounts.distinct().count())
print totalCount
print round(average, 2)
# TEST Mean using reduce (3b)
Test.assertEquals(round(average, 2), 1.67, 'incorrect value of average')
# TODO: Replace <FILL IN> with appropriate code
def wordCount(wordListRDD):
Creates a pair RDD with word counts from an RDD of words.
Args:
wordListRDD (RDD of str): An RDD consisting of words.
Returns:
RDD of (str, int): An RDD consisting of (word, count) tuples.
return (wordListRDD
.map(lambda a : (a,1))
.reduceByKey(lambda a,b: a+b))
print wordCount(wordsRDD).collect()
# TEST wordCount function (4a)
Test.assertEquals(sorted(wordCount(wordsRDD).collect()),
[('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect definition for wordCount function')
# TODO: Replace <FILL IN> with appropriate code
import re
def removePunctuation(text):
Removes punctuation, changes to lower case, and strips leading and trailing spaces.
Note:
Only spaces, letters, and numbers should be retained. Other characters should should be
eliminated (e.g. it's becomes its). Leading and trailing spaces should be removed after
punctuation is removed.
Args:
text (str): A string.
Returns:
str: The cleaned up string.
return re.sub("[^a-zA-Z0-9 ]", "", text.strip(" ").lower()).strip()
print removePunctuation('Hi, you!')
print removePunctuation(' No under_score!')
print removePunctuation(' * Remove punctuation then spaces * ')
# TEST Capitalization and punctuation (4b)
Test.assertEquals(removePunctuation(" The Elephant's 4 cats. "),
'the elephants 4 cats',
'incorrect definition for removePunctuation function')
# Just run this code
import os.path
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab1', 'shakespeare.txt')
fileName = os.path.join(baseDir, inputPath)
shakespeareRDD = (sc
.textFile(fileName, 8)
.map(removePunctuation))
print '\n'.join(shakespeareRDD
.zipWithIndex() # to (line, lineNum)
.map(lambda (l, num): '{0}: {1}'.format(num, l)) # to 'lineNum: line'
.take(15))
# TODO: Replace <FILL IN> with appropriate code
shakespeareWordsRDD = shakespeareRDD.flatMap(lambda a: a.split(" "))
shakespeareWordCount = shakespeareWordsRDD.count()
print shakespeareWordsRDD.top(5)
print shakespeareWordCount
# TEST Words from lines (4d)
# This test allows for leading spaces to be removed either before or after
# punctuation is removed.
Test.assertTrue(shakespeareWordCount == 927631 or shakespeareWordCount == 928908,
'incorrect value for shakespeareWordCount')
Test.assertEquals(shakespeareWordsRDD.top(5),
[u'zwaggerd', u'zounds', u'zounds', u'zounds', u'zounds'],
'incorrect value for shakespeareWordsRDD')
# TODO: Replace <FILL IN> with appropriate code
shakeWordsRDD = shakespeareWordsRDD.filter(lambda word: len(word) > 0)
shakeWordCount = shakeWordsRDD.count()
print shakeWordCount
# TEST Remove empty elements (4e)
Test.assertEquals(shakeWordCount, 882996, 'incorrect value for shakeWordCount')
# TODO: Replace <FILL IN> with appropriate code
top15WordsAndCounts = wordCount(shakeWordsRDD).takeOrdered(15, lambda (a,b): -b)
print '\n'.join(map(lambda (w, c): '{0}: {1}'.format(w, c), top15WordsAndCounts))
# TEST Count the words (4f)
Test.assertEquals(top15WordsAndCounts,
[(u'the', 27361), (u'and', 26028), (u'i', 20681), (u'to', 19150), (u'of', 17463),
(u'a', 14593), (u'you', 13615), (u'my', 12481), (u'in', 10956), (u'that', 10890),
(u'is', 9134), (u'not', 8497), (u'with', 7771), (u'me', 7769), (u'it', 7678)],
'incorrect value for top15WordsAndCounts')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 1
Step3: (1b) Pluralize and test
Step4: (1c) Apply makePlural to the base RDD
Step5: (1d) Pass a lambda function to map
Step6: (1e) Length of each word
Step7: (1f) Pair RDDs
Step8: Part 2
Step9: (2b) Use groupByKey() to obtain the counts
Step10: (2c) Counting using reduceByKey
Step11: (2d) All together
Step12: Part 3
Step13: (3b) Mean using reduce
Step15: Part 4
Step17: (4b) Capitalization and punctuation
Step18: (4c) Load a text file
Step19: (4d) Words from lines
Step20: (4e) Remove empty elements
Step21: (4f) Count the words
|
522 | <ASSISTANT_TASK:>
Python Code:
import csv
import re
with open('../data/bee_list.txt') as f:
csvr = csv.DictReader(f, delimiter = '\t')
species = []
authors = []
for r in csvr:
species.append(r['Scientific Name'])
authors.append(r['Taxon Author'])
len(species)
len(authors)
au = authors[37]
au
my_reg = re.compile(r'\(?([\w\s,\.\-\&]*),\s(\d{4})\)?')
# Translation
# \(? -> open parenthesis (or not)
# ([\w\s,\.\-\&]+) -> the first group is the list of authors
# which can contain \w (word character)
# \s (space) \. (dot) \- (dash) \& (ampersand)
# ,\s -> followed by comma and space
# (\d{4}) -> the second group is the year, 4 digits
# \)? -> potentially, close parenthesis
re.findall(my_reg,au)
def extract_list_au_year(au):
tmp = re.match(my_reg, au)
authorlist = tmp.group(1)
year = tmp.group(2)
# split authors into a list using re.split
authorlist = re.split(', | \& ', authorlist)
# Translation: either separate using ', ' or ' & '
return [authorlist, year]
extract_list_au_year(au)
dict_years = {}
dict_authors = {}
for au in authors:
tmp = extract_list_au_year(au)
for aunum in tmp[0]:
if aunum in dict_authors.keys():
dict_authors[aunum] = dict_authors[aunum] + 1
else:
dict_authors[aunum] = 1
if tmp[1] in dict_years.keys():
dict_years[tmp[1]] = dict_years[tmp[1]] + 1
else:
dict_years[tmp[1]] = 1
dict_authors
max_value_author = max(dict_authors.values())
max_value_author
which_index = list(dict_authors.values()).index(max_value_author)
which_index
list(dict_authors.keys())[which_index]
max_value_year = max(dict_years.values())
which_index = list(dict_years.values()).index(max_value_year)
list(dict_years.keys())[which_index]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then, we read the file, and store the columns Scientific Name and Taxon Author in two lists
Step2: How many species?
Step3: Pick one of the authors element to use for testing. Choose one that is quite complicated, such as the 38th element
Step4: Now we need to build a regular expression. After some twiddling, you should end up with something like this, which captures the authors in one group, and the year in another group
Step5: Test the expression
Step6: Now we write a function that uses the regular expression to extract an author list (useful when there are multiple authors), and the year
Step7: Let's see the output of this function
Step8: Finally, let's build two dictionaries
Step9: For example, these are all the authors
Step10: What is the name of the author with most entries in the database?
Step11: An the winner is
Step12: Which year of publication is most represented in the database?
|
523 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
sigma = np.array([1/3, 1/2, 0, 0, 1/6])
np.where(sigma > 0) # Recall Python indexing starts at 0
sigma = np.array([0, 0, 1, 0])
np.where(sigma > 0) # Recall Python indexing starts at 0
A = np.array([[1, 1, 0], [2, 3, 0]])
sigma_c = np.array([0, 0, 1])
(np.dot(A, sigma_c))
import nashpy as nash
A = np.array([[1,-1], [-1, 1]])
game = nash.Game(A)
list(game.support_enumeration())
A = np.array([[1, 1, -1], [2, -1, 0]])
B = np.array([[1/2, -1, -1/2], [-1, 3, 2]])
game = nash.Game(A, B)
list(game.support_enumeration())
A = np.array([[1, 1, 0], [2, -1, 0]])
B = np.array([[1/2, -1, -1/2], [-1, 3, 2]])
game = nash.Game(A, B)
list(game.support_enumeration())
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Definition of nondegenerate games
Step2: This leads to the following algorithm for identifying Nash equilibria
Step3: If you recall the degenerate game mentioned previously
|
524 | <ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('C:\Anaconda2\envs\dato-env\Lib\site-packages')
import graphlab
sales = graphlab.SFrame('kc_house_data_small.gl/')
import numpy as np # note this allows us to refer to numpy as np instead
def get_numpy_data(data_sframe, features, output):
data_sframe['constant'] = 1 # this is how you add a constant column to an SFrame
# add the column 'constant' to the front of the features list so that we can extract it along with the others:
features = ['constant'] + features # this is how you combine two lists
# select the columns of data_SFrame given by the features list into the SFrame features_sframe (now including constant):
features_sframe = data_sframe[features]
# the following line will convert the features_SFrame into a numpy matrix:
feature_matrix = features_sframe.to_numpy()
# assign the column of data_sframe associated with the output to the SArray output_sarray
output_sarray = data_sframe[output]
# the following will convert the SArray into a numpy array by first converting it to a list
output_array = output_sarray.to_numpy()
return(feature_matrix, output_array)
def normalize_features(feature_matrix):
norms = np.linalg.norm(feature_matrix, axis=0)
normalized_features = feature_matrix/norms
return (normalized_features, norms)
(train_and_validation, test) = sales.random_split(.8, seed=1) # initial train/test split
(train, validation) = train_and_validation.random_split(.8, seed=1) # split training set into training and validation sets
feature_list = ['bedrooms',
'bathrooms',
'sqft_living',
'sqft_lot',
'floors',
'waterfront',
'view',
'condition',
'grade',
'sqft_above',
'sqft_basement',
'yr_built',
'yr_renovated',
'lat',
'long',
'sqft_living15',
'sqft_lot15']
features_train, output_train = get_numpy_data(train, feature_list, 'price')
features_test, output_test = get_numpy_data(test, feature_list, 'price')
features_valid, output_valid = get_numpy_data(validation, feature_list, 'price')
features_train, norms = normalize_features(features_train) # normalize training set features (columns)
features_test = features_test / norms # normalize test set by training set norms
features_valid = features_valid / norms # normalize validation set by training set norms
print features_test[0]
print features_train[9]
euclidean_distance = np.sqrt(np.sum((features_train[9] - features_test[0])**2))
print euclidean_distance
dist_dict = {}
for i in range(0,10):
dist_dict[i] = np.sqrt(np.sum((features_train[i] - features_test[0])**2))
print (i, np.sqrt(np.sum((features_train[i] - features_test[0])**2)))
print min(dist_dict.items(), key=lambda x: x[1])
for i in xrange(3):
print features_train[i]-features_test[0]
# should print 3 vectors of length 18
print features_train[0:3] - features_test[0]
# verify that vectorization works
results = features_train[0:3] - features_test[0]
print results[0] - (features_train[0]-features_test[0])
# should print all 0's if results[0] == (features_train[0]-features_test[0])
print results[1] - (features_train[1]-features_test[0])
# should print all 0's if results[1] == (features_train[1]-features_test[0])
print results[2] - (features_train[2]-features_test[0])
# should print all 0's if results[2] == (features_train[2]-features_test[0])
diff = features_train - features_test[0]
print diff[-1].sum() # sum of the feature differences between the query and last training house
# should print -0.0934339605842
print np.sum(diff**2, axis=1)[15] # take sum of squares across each row, and print the 16th sum
print np.sum(diff[15]**2) # print the sum of squares for the 16th row -- should be same as above
distances = np.sqrt(np.sum(diff**2, axis=1))
print distances[100] # Euclidean distance between the query house and the 101th training house
# should print 0.0237082324496
def compute_distances(train_matrix, query_vector):
diff = train_matrix - query_vector
distances = np.sqrt(np.sum(diff**2, axis=1))
return distances
third_house_distance = compute_distances(features_train, features_test[2])
print third_house_distance.argsort()[:1], min(third_house_distance)
print third_house_distance[382]
print np.argsort(third_house_distance, axis = 0)[:4]
print output_train[382]
def compute_k_nearest_neighbors(k, features_matrix, feature_vector):
distances = compute_distances(features_matrix, feature_vector)
return np.argsort(distances, axis = 0)[:k]
print compute_k_nearest_neighbors(4, features_train, features_test[2])
def compute_distances_k_avg(k, features_matrix, output_values, feature_vector):
k_neigbors = compute_k_nearest_neighbors(k, features_matrix, feature_vector)
avg_value = np.mean(output_values[k_neigbors])
return avg_value
print compute_distances_k_avg(4, features_train, output_train, features_test[2])
print features_test[0:10].shape[0]
def compute_distances_k_all(k, features_matrix, output_values, feature_vector):
num_of_rows = feature_vector.shape[0]
predicted_values = []
for i in xrange(num_of_rows):
avg_value = compute_distances_k_avg(k, features_train, output_train, features_test[i])
predicted_values.append(avg_value)
return predicted_values
predicted_values = compute_distances_k_all(10, features_train, output_train, features_test[0:10])
print predicted_values
print predicted_values.index(min(predicted_values))
print min(predicted_values)
rss_all = []
for k in range(1,16):
predict_value = compute_distances_k_all(k, features_train, output_train, features_valid)
residual = (output_valid - predict_value)
rss = sum(residual**2)
rss_all.append(rss)
print rss_all
print rss_all.index(min(rss_all))
import matplotlib.pyplot as plt
%matplotlib inline
kvals = range(1, 16)
plt.plot(kvals, rss_all,'bo-')
predict_value = compute_distances_k_all(14, features_train, output_train, features_test)
residual = (output_test - predict_value)
rss = sum(residual**2)
print rss
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: Import useful functions from previous notebooks
Step3: We will also need the normalize_features() function from Week 5 that normalizes all feature columns to unit norm. Paste this function below.
Step4: Split data into training, test, and validation sets
Step5: Extract features and normalize
Step6: In computing distances, it is crucial to normalize features. Otherwise, for example, the sqft_living feature (typically on the order of thousands) would exert a much larger influence on distance than the bedrooms feature (typically on the order of ones). We divide each column of the training feature matrix by its 2-norm, so that the transformed column has unit norm.
Step7: Compute a single distance
Step8: Now print the 10th row (index 9) of the training feature matrix. Again, you get an 18-dimensional vector with components between 0 and 1.
Step9: QUIZ QUESTION
Step10: Compute multiple distances
Step11: QUIZ QUESTION
Step12: It is computationally inefficient to loop over computing distances to all houses in our training dataset. Fortunately, many of the Numpy functions can be vectorized, applying the same operation over multiple values or vectors. We now walk through this process.
Step13: The subtraction operator (-) in Numpy is vectorized as follows
Step14: Note that the output of this vectorized operation is identical to that of the loop above, which can be verified below
Step15: Aside
Step16: To test the code above, run the following cell, which should output a value -0.0934339605842
Step17: The next step in computing the Euclidean distances is to take these feature-by-feature differences in diff, square each, and take the sum over feature indices. That is, compute the sum of square feature differences for each training house (row in diff).
Step18: With this result in mind, write a single-line expression to compute the Euclidean distances between the query house and all houses in the training set. Assign the result to a variable distances.
Step19: To test the code above, run the following cell, which should output a value 0.0237082324496
Step20: Now you are ready to write a function that computes the distances from a query house to all training houses. The function should take two parameters
Step21: QUIZ QUESTIONS
Step22: Perform k-nearest neighbor regression
Step23: QUIZ QUESTION
Step24: Make a single prediction by averaging k nearest neighbor outputs
Step25: QUIZ QUESTION
Step26: Compare this predicted value using 4-nearest neighbors to the predicted value using 1-nearest neighbor computed earlier.
Step27: QUIZ QUESTION
Step28: Choosing the best value of k using a validation set
Step29: To visualize the performance as a function of k, plot the RSS on the VALIDATION set for each considered k value
Step30: QUIZ QUESTION
|
525 | <ASSISTANT_TASK:>
Python Code:
f = spot.formula('a U Gb')
a = f.translate('ba')
a
propset = spot.atomic_prop_collect_as_bdd(f, a)
ta = spot.tgba_to_ta(a, propset, True, True, False, False, True)
ta.show('.A')
ta = spot.tgba_to_ta(a, propset, True, True, False, False, False)
ta.show('.A')
spot.minimize_ta(ta).show('.A')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then, gather all the atomic proposition in the formula, and create an automaton with changesets
Step2: Then, remove dead states, and remove stuttering transitions (i.e., transitions labeled by {}), marking as livelock accepting (rectangles) any states from which there exists a an accepting path labeled by {}.
Step3: Finally, use bisimulation to minimize the number of states.
|
526 | <ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
from vertebratesLib import *
split = "SPLIT1"
summaryTree,summarySpecies,splitPositions = get_split_data(split)
print summaryTree.shape
def get_sentence(position,splitPositions,summary,ignore=False):
splitIndex = np.where(splitPositions==position)[0]
nonZero = np.where(summary[splitIndex,:] != 0)[1]
sentence = []
for nz in nonZero:
if ignore and TRANSITIONS[nz].count(TRANSITIONS[nz][0]) == 2:
continue
count = int(summary[splitIndex,nz][0])
sentence.extend([TRANSITIONS[nz]] * count)
return sentence
position = '8500'
sentence1 = get_sentence(position,splitPositions,summaryTree,ignore=False)
sentence2 = get_sentence(position,splitPositions,summaryTree,ignore=True)
print("with same AA transition")
print(sentence1)
print("without same AA transition")
print(sentence2)
import lda
## the data matrix are the sentences by vocabulary
vocab = TRANSITIONS
#inPlaceTransitions = []
#for t in TRANSITIONS:
from IPython.display import Image
dataDir = None
for ddir in [os.path.join("..","data","herve-vertebrates"),\
os.path.join("/","media","ganda","mojo","phylogenetic-models","herve-vertebrates")]:
if os.path.isdir(ddir):
dataDir = ddir
split = "SPLIT1"
position = "0"
treeList = get_trees(split,position,dataDir)
countMatrix = np.zeros((len(treeList),len(TRANSITIONS)),)
t = 0
for t,pbTree in enumerate(treeList):
fixedTree,treeSummary = fix_tree(pbTree)
tlist = []
for item in treeSummary.itervalues():
tlist.extend(item['pairs'])
counts = transitions_to_counts(tlist)
countMatrix[t,:] = counts
figName1 = os.path.join("figures","lda-bplot-check.png")
profile_box_plot(countMatrix,figName1,figTitle='position - %s'%position)
Image(filename=figName1)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: a sentence of words is represented as the transitions for a given position
Step2: Simple test run with lda package
Step3: Recall that the Dirichlet Process (DP) (Ferguson, 1973) is essentially a distribution over distributions, where each draw from a DP is itself a distribution and importantly for clustering applications it serves as a natural prior that lets the number of clusters grow as the data grows. The DP has a base distribution parameter $\beta$ and a strength or concentration parameter $\alpha$.
|
527 | <ASSISTANT_TASK:>
Python Code:
g.plot_reward(smoothing=100)
g.__class__ = KarpathyGame
np.set_printoptions(formatter={'float': (lambda x: '%.2f' % (x,))})
x = g.observe()
new_shape = (x[:-2].shape[0]//g.eye_observation_size, g.eye_observation_size)
print(x[:-4].reshape(new_shape))
print(x[-4:])
g.to_html()
%pwd
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualizing what the agent is seeing
|
528 | <ASSISTANT_TASK:>
Python Code:
from numpy import *
v = array([1,2,3,4])
v
M = array([[1, 2], [3, 4]])
M
type(v), type(M)
v.shape
M.shape
v.size, M.size
shape(M)
size(M)
M.dtype
M[0,0] = "hello"
M[0,0]=5
M = array([[1, 2], [3, 4]], dtype=complex)
M
x = arange(0, 10, 1) # argumenti: početak, kraj, korak
x # 10 nije u nizu!
x = arange(-1, 1, 0.1)
x
# ovdje su i početak i kraj uključeni!
linspace(0, 10, 25)
logspace(0, 10, 10, base=e)
x, y = mgrid[0:5, 0:5] # slično kao meshgrid u MATLAB-u
x
y
from numpy import random
# uniformna distribucija na [0,1]
random.rand(5,5)
# standardna normalna distribucija
random.randn(5,5)
# dijagonalna matrica
diag([1,2,3])
# matrica sa sporednom dijagonalom
diag([1,2,3], k=1)
zeros((3,3))
ones((3,3))
!head tpt-europe.csv
data = genfromtxt('tpt-europe.csv')
data.shape, data.dtype
M = random.rand(3,3)
M
savetxt("random-matrix.csv", M)
!cat random-matrix.csv
savetxt("random-matrix.csv", M, fmt='%.5f') # s fmt specificiramo format
!cat random-matrix.csv
save("random-matrix.npy", M)
!file random-matrix.npy
load("random-matrix.npy")
M.itemsize # byte-ovi po elementu
M.nbytes
M.ndim
v[0]
M[1,1]
M
M[1]
M[1,:] # redak 1
M[:,1] # stupac 1
M[1,:] = 0
M[:,2] = -1
M
A = array([1,2,3,4,5])
A
A[1:3]
A[1:3] = [-2,-3]
A
A[::]
A[::2]
A[:3]
A[3:]
A = array([1,2,3,4,5])
A[-1] # zadnji element niza
A[-3:] # zadnja tri elementa
A = array([[n+m*10 for n in range(5)] for m in range(5)])
A
A[1:4, 1:4]
A[::2, ::2]
indeksi_redaka = [1, 2, 3]
A[indeksi_redaka]
indeksi_stupaca = [1, 2, -1]
A[indeksi_redaka, indeksi_stupaca]
B = array([n for n in range(5)])
B
maska = array([True, False, True, False, False])
B[maska]
maska = array([1,0,1,0,0], dtype=bool)
B[maska]
x = arange(0, 10, 0.5)
x
maska = (5 < x) * (x < 7.5)
maska
x[maska]
indeksi = where(maska)
indeksi
x[indeksi]
print(A)
diag(A)
diag(A, -1)
v2 = arange(-3,3)
v2
indeksi_redaka = [1, 3, 5]
v2[indeksi_redaka]
v2.take(indeksi_redaka)
take([-3, -2, -1, 0, 1, 2], indeksi_redaka)
koji = [1, 0, 1, 0]
izbori = [[-1,-2,-3,-4], [5,4,3,2]]
choose(koji, izbori)
v1 = arange(0, 5)
v1 * 2
v1 + 2
print(A)
A * 2, A + 2
A * A
v1 * v1
A.shape, v1.shape
print(A,v1)
A * v1
dot(A, A)
A @ A # nova operacija definirana u Python-u 3.5+
matmul(A,A) # @ je zapravo pokrata za matmul, dot i matmul nisu iste operacije (poklapaju se na 1D i 2D nizovima)
dot(A, v1)
A @ v1
v1 @ v1 # analogno dot(v1, v1)
a = random.rand(8,13,13)
b = random.rand(8,13,13)
matmul(a, b).shape
M = matrix(A)
v = matrix(v1).T # da bi dobili stupčasti vektor
v
M*M
M*v
# skalarni produkt
v.T * v
v + M*v
v = matrix([1,2,3,4,5,6]).T
shape(M), shape(v)
M * v
C = matrix([[1j, 2j], [3j, 4j]])
C
conjugate(C)
C.H
real(C) # isto što i C.real
imag(C) # isto što i C.imag
angle(C+1) # u MATLAB-u je to funkcija arg, dakle argument (faza) kompleksnog broja
abs(C)
from numpy.linalg import inv, det
inv(C) # isto što i C.I
C.I * C
det(C)
det(C.I)
# u stockholm_td_adj.dat su podaci o vremenu za Stockholm
dataStockholm = genfromtxt('stockholm_td_adj.dat')
dataStockholm.shape
# temperatura se nalazi u 4. stupcu (znači stupcu broj 3)
mean(dataStockholm[:,3])
std(dataStockholm[:,3]), var(dataStockholm[:,3])
dataStockholm[:,3].min()
dataStockholm[:,3].max()
d = arange(0, 10)
d
sum(d)
prod(d+1)
# kumulativa suma
cumsum(d)
# kumulativan produkt
cumprod(d+1)
# isto što i: diag(A).sum()
trace(A)
!head -n 3 stockholm_td_adj.dat
# mjeseci su 1.,..., 12.
unique(dataStockholm[:,1])
maska_velj = dataStockholm[:,1] == 2
mean(dataStockholm[maska_velj,3])
mjeseci = arange(1,13)
mjeseci_prosjek = [mean(dataStockholm[dataStockholm[:,1] == mjesec, 3]) for mjesec in mjeseci]
from pylab import *
%matplotlib inline
fig, ax = subplots()
ax.bar(mjeseci, mjeseci_prosjek)
ax.set_xlabel("Mjesec")
ax.set_ylabel("Prosj. mj. temp.");
m = rand(3,3)
m
m.max()
# max u svakom stupcu
m.max(axis=0)
# max u svakom retku
m.max(axis=1)
A
n, m = A.shape
B = A.reshape((1,n*m))
B
B[0,0:5] = 5 # promijenili smo B
B
A # a time smo promijenili i A
B = A.flatten()
B
B[0:5] = 10
B
A # A je sad ostao isti
v = array([1,2,3])
shape(v)
# pretvorimo v u matricu
v[:, newaxis]
v[:,newaxis].shape
v[newaxis,:].shape
a = array([[1, 2], [3, 4]])
# ponovimo svaki element tri puta
repeat(a, 3)
tile(a, 3)
b = array([[5, 6]])
concatenate((a, b), axis=0)
concatenate((a, b.T), axis=1)
vstack((a,b))
hstack((a,b.T))
A = array([[1, 2], [3, 4]])
A
# B je isto što i A (bez kopiranja podataka)
B = A
B = copy(A)
v = array([1,2,3,4])
for element in v:
print (element)
M = array([[1,2], [3,4]])
for row in M:
print ("redak {}".format(row))
for element in row:
print (element)
for row_idx, row in enumerate(M):
print ("indeks retka {} redak {}".format(row_idx, row))
for col_idx, element in enumerate(row):
print ("col_idx {} element {}".format(col_idx, element))
M[row_idx, col_idx] = element ** 2
def Theta(x):
Sklarna verzija step funkcije.
if x >= 0:
return 1
else:
return 0
Theta(array([-3,-2,-1,0,1,2,3]))
Theta_vec = vectorize(Theta)
Theta_vec(array([-3,-2,-1,0,1,2,3]))
def Theta(x):
Vektorska verzija step funkcije.
return 1 * (x >= 0)
Theta(array([-3,-2,-1,0,1,2,3]))
# radi naravno i za skalare
Theta(-1.2), Theta(2.6)
M
if (M > 5).any():
print ("barem jedan element iz M je veći od 5")
else:
print ("svi elementi iz M su manji ili jednaki od 5")
if (M > 5).all():
print ("svi elementi iz M su veći od 5")
else:
print ("barem jedan element je manji ili jednak od 5")
M.dtype
M2 = M.astype(float)
M2
M2.dtype
M3 = M.astype(bool)
M3
from verzije import *
from IPython.display import HTML
HTML(print_sysinfo()+info_packages('numpy,matplotlib'))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Kreiranje nizova pomoću numpy modula
Step2: Možemo koristiti i funkcije numpy.shape, numpy.size
Step3: Koja je razlika između numpy.ndarray tipa i standardnih lista u Pythonu?
Step4: Kako je M statički objekt, ne možemo napraviti ovo
Step5: Naravno, ovo je ok
Step6: dtype se može eksplicitno zadati
Step7: Tipično dtype su
Step8: Učitavanje podataka
Step9: Uz numpy.savetxt možemo napraviti i obrnuto.
Step10: Postoji i interni format za numpy nizove
Step11: Rad s nizovima
Step12: Naravno, možemo koristiti i
Step13: S negativnim indeksima računamo od kraja niza
Step14: Naravno, iste operacije imamo i za višedimenzionalne nizove.
Step15: Možemo koristiti i tzv. maske
Step16: Zanimljiviji primjer
Step17: Funkcije na nizovima
Step18: U sljedećem primjeru take djeluje na listu, a izlaz je array
Step19: Funkcija choose
Step20: Što radi ova funkcija?
Step21: Defaultne operacije na nizovima su uvijek definirane po elementima.
Step22: Kako doći do standardnog umnoška?
Step23: Matrice mogu biti i višedimenzionalne
Step24: Postoji i tip matrix. Kod njega operacije +, -, * se ponašaju onako kako smo navikli.
Step25: Naravno, dimenzije trebaju biti kompatibilne.
Step26: Još neke funkcije
Step27: Adjungiranje
Step28: Za izvlačenje realnog, odnosno imaginarnog dijela
Step29: Izvlačenje osnovih informacija iz nizova
Step30: Prosječna dnevna temperatura u Stockholmu u zadnjiih 200 godina je bila 6.2 C.
Step31: Naravno, sve ove operacije možemo raditi na dijelovima nizova.
Step32: Format je
Step33: Sada nije problem doći do histograma za prosječne mjesečne temperature u par redaka.
Step34: Rad s višedimenzionalnim podacima
Step35: Oblik niza se može promijeniti bez da se dira memorija, dakle mogu se primijenjivati i na veliku količinu podataka.
Step36: Funkcija flatten radi kopiju.
Step37: Kopiranje nizova
Step38: Ako želimo napraviti novu kopiju, koristimo funkciju copy
Step39: Funkcija enumerate nam daje i element i njegov indeks
Step41: Vektorizacija funkcija
Step43: To smo mogli napraviti i ručno.
Step44: Eksplicitno pretvaranje podataka. Uvijek stvara novi niz.
|
529 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
pd.set_option('display.max_columns', 999)
import pandas.io.sql as psql
# plot a figure directly on Notebook
import matplotlib.pyplot as plt
%matplotlib inline
a = pd.read_csv("data/ADMISSIONS.csv")
a.columns = map(str.lower, a.columns)
a.groupby(['marital_status']).count()['row_id'].plot(kind='pie')
a.groupby(['religion']).count()['row_id'].plot(kind = 'barh')
p = pd.read_csv("data/PATIENTS.csv")
p.columns = map(str.lower, p.columns)
ap = pd.merge(a, p, on = 'subject_id' , how = 'inner')
ap.groupby(['religion','gender']).size().unstack().plot(kind="barh", stacked=True)
c = pd.read_csv("data/CPTEVENTS.csv")
c.columns = map(str.lower, c.columns)
ac = pd.merge(a, c, on = 'hadm_id' , how = 'inner')
ac.groupby(['discharge_location','sectionheader']).size().unstack().plot(kind="barh", stacked=True)
# !conda install -c conda-forge pandas-profiling -y
import pandas_profiling
a = pd.read_csv("data/ADMISSIONS.csv")
a.columns = map(str.lower, a.columns)
# ignore the times when profiling since they are uninteresting
cols = [c for c in a.columns if not c.endswith('time')]
pandas_profiling.ProfileReport(a[cols])
# !conda install -c conda-forge missingno -y
import missingno as msno
msno.matrix(a)
# !conda install -c conda-forge wordcloud -y
from wordcloud import WordCloud
text = str(a['diagnosis'].values)
wordcloud = WordCloud().generate(text)
import matplotlib.pyplot as plt
plt.figure(figsize = (10,10))
plt.imshow(wordcloud, interpolation = 'bilinear')
plt.axis("off")
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Agenda
Step2: Load the admissions table (2/3)
Step3: Profile the table (3/3)
Step4: Agenda
Step5: Agenda
Step6: Prepare an input text in string (2/4)
Step7: Generate a word-cloud from the input text (3/4)
Step8: Plot the word-cloud (4/4)
|
530 | <ASSISTANT_TASK:>
Python Code:
import string
import pandas as pd
import numpy as np
import seaborn as sns
def get_random_numerical_data(size, *amplitudes):
n = len(amplitudes)
data = np.random.random((size, n)) * np.array(amplitudes).reshape(1, n)
return pd.DataFrame(data=data, columns=pd.Series(list(string.ascii_uppercase[:n]), name="feature"))
get_random_numerical_data(5, 1, 2)
get_random_numerical_data(500, 1, 2, 3, 4).describe().loc[['count', 'std', 'max']]
df_small_range = get_random_numerical_data(500, 1, 2, 3, 4)
sns.violinplot(df_small_range)
df_big_range = get_random_numerical_data(500, 1, 10, 100, 1000)
sns.violinplot(df_big_range)
df_big_range = get_random_numerical_data(500, 1, 10, 100, 1000)
h = sns.violinplot(df_big_range)
h.set_yscale('log')
import matplotlib.pyplot as plt
def featureplot(df, nrows=1, ncols=1, figsize=(12,8), plotfunc=sns.violinplot):
Plot the dataframe features
width, height = figsize
fig, axes = plt.subplots(nrows, ncols, figsize=(width, height * nrows));
i = 0
plots_per_figure = max(df.shape[1] // (nrows * ncols), 1)
if nrows == 1 and ncols == 1:
axes = [axes]
if nrows > 1 and ncols > 1:
axes = chain.from_iterable(axes) # flatten the nested list
for j, ax in zip(range(plots_per_figure, df.shape[1] + 1, plots_per_figure), axes):
plotfunc(df.iloc[:, i:j], ax=ax)
i = j
plt.tight_layout()
featureplot(df_big_range, ncols=4)
df_big_range_lf = df_big_range.stack().reset_index(name="value").drop('level_0', axis=1)#.reset_index() # don't keep the index
df_big_range_lf.head()
# size is the height of each figure and aspect is the with/height aspect ratio of each figure.
sns.FacetGrid(df_big_range_lf, col="feature", hue="feature",
sharey=False, size=7, aspect=8/12.0/2.0).map(sns.violinplot, "value", orient="v")
test = pd.DataFrame({'foo':["one"] * 3 + ["two"] * 3, 'bar': list("ABC")*2, 'baz': list(range(6))})
test
test.pivot('foo', 'bar', 'baz')
test.set_index(['foo','bar']).unstack()['baz']
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get some random data
Step2: Plotting all features directly with Seaborn
Step3: Changing the y-scale to log doesn't help much
Step5: Plotting distributions on separate figures using Matplotlib
Step6: Plotting on separate columns using Seaborn only
Step7: Appendix
|
531 | <ASSISTANT_TASK:>
Python Code:
# Ensure compatibility with Python 2 and 3
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import climlab
from climlab import constants as const
import cartopy.crs as ccrs # use cartopy to make some maps
## The NOAA ESRL server is shutdown! January 2019
ncep_url = "http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis.derived/"
ncep_Ts = xr.open_dataset(ncep_url + "surface_gauss/skt.sfc.mon.1981-2010.ltm.nc", decode_times=False)
#url = "http://apdrc.soest.hawaii.edu:80/dods/public_data/Reanalysis_Data/NCEP/NCEP/clima/"
#ncep_Ts = xr.open_dataset(url + 'surface_gauss/skt')
lat_ncep = ncep_Ts.lat; lon_ncep = ncep_Ts.lon
Ts_ncep = ncep_Ts.skt
print( Ts_ncep.shape)
maxTs = Ts_ncep.max(dim='time')
minTs = Ts_ncep.min(dim='time')
meanTs = Ts_ncep.mean(dim='time')
fig = plt.figure( figsize=(16,6) )
ax1 = fig.add_subplot(1,2,1, projection=ccrs.Robinson())
cax1 = ax1.pcolormesh(lon_ncep, lat_ncep, meanTs, cmap=plt.cm.seismic , transform=ccrs.PlateCarree())
cbar1 = plt.colorbar(cax1)
ax1.set_title('Annual mean surface temperature ($^\circ$C)', fontsize=14 )
ax2 = fig.add_subplot(1,2,2, projection=ccrs.Robinson())
cax2 = ax2.pcolormesh(lon_ncep, lat_ncep, maxTs - minTs, transform=ccrs.PlateCarree() )
cbar2 = plt.colorbar(cax2)
ax2.set_title('Seasonal temperature range ($^\circ$C)', fontsize=14)
for ax in [ax1,ax2]:
#ax.contour( lon_cesm, lat_cesm, topo.variables['LANDFRAC'][:], [0.5], colors='k');
#ax.set_xlabel('Longitude', fontsize=14 ); ax.set_ylabel('Latitude', fontsize=14 )
ax.coastlines()
Tmax = 65; Tmin = -Tmax; delT = 10
clevels = np.arange(Tmin,Tmax+delT,delT)
fig_zonobs, ax = plt.subplots( figsize=(10,6) )
cax = ax.contourf(np.arange(12)+0.5, lat_ncep,
Ts_ncep.mean(dim='lon').transpose(), levels=clevels,
cmap=plt.cm.seismic, vmin=Tmin, vmax=Tmax)
ax.set_xlabel('Month', fontsize=16)
ax.set_ylabel('Latitude', fontsize=16 )
cbar = plt.colorbar(cax)
ax.set_title('Zonal mean surface temperature (degC)', fontsize=20)
omega = 2*np.pi / const.seconds_per_year
omega
B = 2.
Hw = np.linspace(0., 100.)
Ctilde = const.cw * const.rho_w * Hw * omega / B
amp = 1./((Ctilde**2+1)*np.cos(np.arctan(Ctilde)))
Phi = np.arctan(Ctilde)
color1 = 'b'
color2 = 'r'
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(111)
ax1.plot(Hw, amp, color=color1)
ax1.set_xlabel('water depth (m)', fontsize=14)
ax1.set_ylabel('Seasonal amplitude ($Q^* / B$)', fontsize=14, color=color1)
for tl in ax1.get_yticklabels():
tl.set_color(color1)
ax2 = ax1.twinx()
ax2.plot(Hw, np.rad2deg(Phi), color=color2)
ax2.set_ylabel('Seasonal phase shift (degrees)', fontsize=14, color=color2)
for tl in ax2.get_yticklabels():
tl.set_color(color2)
ax1.set_title('Dependence of seasonal cycle phase and amplitude on water depth', fontsize=16)
ax1.grid()
ax1.plot([2.5, 2.5], [0, 1], 'k-');
fig, ax = plt.subplots()
years = np.linspace(0,2)
Harray = np.array([0., 2.5, 10., 50.])
for Hw in Harray:
Ctilde = const.cw * const.rho_w * Hw * omega / B
Phi = np.arctan(Ctilde)
ax.plot(years, np.sin(2*np.pi*years - Phi)/np.cos(Phi)/(1+Ctilde**2), label=Hw)
ax.set_xlabel('Years', fontsize=14)
ax.set_ylabel('Seasonal amplitude ($Q^* / B$)', fontsize=14)
ax.set_title('Solution of toy seasonal model for several different water depths', fontsize=14)
ax.legend(); ax.grid()
# for convenience, set up a dictionary with our reference parameters
param = {'A':210, 'B':2, 'a0':0.354, 'a2':0.25, 'D':0.6}
param
# We can pass the entire dictionary as keyword arguments using the ** notation
model1 = climlab.EBM_seasonal(**param, name='Seasonal EBM')
print( model1)
# We will try three different water depths
water_depths = np.array([2., 10., 50.])
num_depths = water_depths.size
Tann = np.empty( [model1.lat.size, num_depths] )
models = []
for n in range(num_depths):
ebm = climlab.EBM_seasonal(water_depth=water_depths[n], **param)
models.append(ebm)
models[n].integrate_years(20., verbose=False )
models[n].integrate_years(1., verbose=False)
Tann[:,n] = np.squeeze(models[n].timeave['Ts'])
lat = model1.lat
fig, ax = plt.subplots()
ax.plot(lat, Tann)
ax.set_xlim(-90,90)
ax.set_xlabel('Latitude')
ax.set_ylabel('Temperature (degC)')
ax.set_title('Annual mean temperature in the EBM')
ax.legend( water_depths )
num_steps_per_year = int(model1.time['num_steps_per_year'])
Tyear = np.empty((lat.size, num_steps_per_year, num_depths))
for n in range(num_depths):
for m in range(num_steps_per_year):
models[n].step_forward()
Tyear[:,m,n] = np.squeeze(models[n].Ts)
fig = plt.figure( figsize=(16,10) )
ax = fig.add_subplot(2,num_depths,2)
cax = ax.contourf(np.arange(12)+0.5, lat_ncep,
Ts_ncep.mean(dim='lon').transpose(),
levels=clevels, cmap=plt.cm.seismic,
vmin=Tmin, vmax=Tmax)
ax.set_xlabel('Month')
ax.set_ylabel('Latitude')
cbar = plt.colorbar(cax)
ax.set_title('Zonal mean surface temperature - observed (degC)', fontsize=20)
for n in range(num_depths):
ax = fig.add_subplot(2,num_depths,num_depths+n+1)
cax = ax.contourf(4*np.arange(num_steps_per_year),
lat, Tyear[:,:,n], levels=clevels,
cmap=plt.cm.seismic, vmin=Tmin, vmax=Tmax)
cbar1 = plt.colorbar(cax)
ax.set_title('water depth = %.0f m' %models[n].param['water_depth'], fontsize=20 )
ax.set_xlabel('Days of year', fontsize=14 )
ax.set_ylabel('Latitude', fontsize=14 )
def initial_figure(models):
fig, axes = plt.subplots(1,len(models), figsize=(15,4))
lines = []
for n in range(len(models)):
ax = axes[n]
c1 = 'b'
Tsline = ax.plot(lat, models[n].Ts, c1)[0]
ax.set_title('water depth = %.0f m' %models[n].param['water_depth'], fontsize=20 )
ax.set_xlabel('Latitude', fontsize=14 )
if n is 0:
ax.set_ylabel('Temperature', fontsize=14, color=c1 )
ax.set_xlim([-90,90])
ax.set_ylim([-60,60])
for tl in ax.get_yticklabels():
tl.set_color(c1)
ax.grid()
c2 = 'r'
ax2 = ax.twinx()
Qline = ax2.plot(lat, models[n].insolation, c2)[0]
if n is 2:
ax2.set_ylabel('Insolation (W m$^{-2}$)', color=c2, fontsize=14)
for tl in ax2.get_yticklabels():
tl.set_color(c2)
ax2.set_xlim([-90,90])
ax2.set_ylim([0,600])
lines.append([Tsline, Qline])
return fig, axes, lines
def animate(step, models, lines):
for n, ebm in enumerate(models):
ebm.step_forward()
# The rest of this is just updating the plot
lines[n][0].set_ydata(ebm.Ts)
lines[n][1].set_ydata(ebm.insolation)
return lines
# Plot initial data
fig, axes, lines = initial_figure(models)
# Some imports needed to make and display animations
from IPython.display import HTML
from matplotlib import animation
num_steps = int(models[0].time['num_steps_per_year'])
ani = animation.FuncAnimation(fig, animate,
frames=num_steps,
interval=80,
fargs=(models, lines),
)
HTML(ani.to_html5_video())
orb_highobl = {'ecc':0.,
'obliquity':90.,
'long_peri':0.}
print( orb_highobl)
model_highobl = climlab.EBM_seasonal(orb=orb_highobl, **param)
print( model_highobl.param['orb'])
Tann_highobl = np.empty( [lat.size, num_depths] )
models_highobl = []
for n in range(num_depths):
model = climlab.EBM_seasonal(water_depth=water_depths[n],
orb=orb_highobl,
**param)
models_highobl.append(model)
models_highobl[n].integrate_years(40., verbose=False )
models_highobl[n].integrate_years(1., verbose=False)
Tann_highobl[:,n] = np.squeeze(models_highobl[n].timeave['Ts'])
Tyear_highobl = np.empty([lat.size, num_steps_per_year, num_depths])
for n in range(num_depths):
for m in range(num_steps_per_year):
models_highobl[n].step_forward()
Tyear_highobl[:,m,n] = np.squeeze(models_highobl[n].Ts)
fig = plt.figure( figsize=(16,5) )
Tmax_highobl = 125; Tmin_highobl = -Tmax_highobl; delT_highobl = 10
clevels_highobl = np.arange(Tmin_highobl, Tmax_highobl+delT_highobl, delT_highobl)
for n in range(num_depths):
ax = fig.add_subplot(1,num_depths,n+1)
cax = ax.contourf( 4*np.arange(num_steps_per_year), lat, Tyear_highobl[:,:,n],
levels=clevels_highobl, cmap=plt.cm.seismic, vmin=Tmin_highobl, vmax=Tmax_highobl )
cbar1 = plt.colorbar(cax)
ax.set_title('water depth = %.0f m' %models[n].param['water_depth'], fontsize=20 )
ax.set_xlabel('Days of year', fontsize=14 )
ax.set_ylabel('Latitude', fontsize=14 )
lat2 = np.linspace(-90, 90, 181)
days = np.linspace(1.,50.)/50 * const.days_per_year
Q_present = climlab.solar.insolation.daily_insolation( lat2, days )
Q_highobl = climlab.solar.insolation.daily_insolation( lat2, days, orb_highobl )
Q_present_ann = np.mean( Q_present, axis=1 )
Q_highobl_ann = np.mean( Q_highobl, axis=1 )
fig, ax = plt.subplots()
ax.plot( lat2, Q_present_ann, label='Earth' )
ax.plot( lat2, Q_highobl_ann, label='90deg obliquity' )
ax.grid()
ax.legend(loc='lower center')
ax.set_xlabel('Latitude', fontsize=14 )
ax.set_ylabel('W m$^{-2}$', fontsize=14 )
ax.set_title('Annual mean insolation for two different obliquities', fontsize=16)
%load_ext version_information
%version_information numpy, xarray, climlab
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Contents
Step2: Make two maps
Step3: Make a contour plot of the zonal mean temperature as a function of time
Step4: <a id='section2'></a>
Step5: The blue line shows the amplitude of the seasonal cycle of temperature, expressed as a fraction of its maximum value $\frac{Q^*}{B}$ (the value that would occur if the system had zero heat capacity so that temperatures were always in radiative equilibrium with the instantaneous insolation).
Step6: The blue curve in this figure is in phase with the insolation.
Step7: Notice that this model has an insolation subprocess called DailyInsolation, rather than AnnualMeanInsolation. These should be fairly self-explanatory.
Step8: All models should have the same annual mean temperature
Step9: There is no automatic function in the climlab code to keep track of minimum and maximum temperatures (though we might add that in the future!)
Step10: Make a figure to compare the observed zonal mean seasonal temperature cycle to what we get from the EBM with different heat capacities
Step11: Which one looks more realistic? Depends a bit on where you look. But overall, the observed seasonal cycle matches the 10 meter case best. The effective heat capacity governing the seasonal cycle of the zonal mean temperature is closer to 10 meters of water than to either 2 or 50 meters.
Step12: <a id='section4'></a>
Step13: Repeat the same procedure to calculate and store temperature throughout one year, after letting the models run out to equilibrium.
Step14: And plot the seasonal temperature cycle same as we did above
Step15: Note that the temperature range is much larger than for the Earth-like case above (but same contour interval, 10 degC).
Step16: Though this is a bit misleading, because our model prescribes an increase in albedo from the equator to the pole. So the absorbed shortwave gradients look even more different.
|
532 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
# Replace 0 with your sigmoid calculation.
self.activation_function = lambda x : 1 / (1 + np.exp(-x))
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# hidden_inputs: signals into hidden layer
# hidden_outputs: signals from hidden layer
# final_inputs: signals into final output layer
# final_outputs: signals from final output layer
hidden_inputs, hidden_outputs, final_inputs, final_outputs = self.forward_pass(X)
#### Implement the backward pass here ####
### Backward pass ###
error, output_error_term, hidden_error, hidden_error_term = self.backward_pass_errors(y, final_outputs, hidden_outputs)
# Weight step (input to hidden)
# the hidden_error_term by the transpose(inputs)
delta_weights_i_h += hidden_error_term * X[:, None]
# Weight step (hidden to output)
# the output_error_term by the transpose(hidden_outputs)
delta_weights_h_o += error * hidden_outputs[:, None]
# TODO: Update the weights - Replace these values with your calculations.
# update hidden-to-output weights with gradient descent step
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records
# update input-to-hidden weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records
def backward_pass_errors(self, targets, final_outputs, hidden_outputs):
Calculate the errors and error terms for the network, output layer, and hidden layer
Notes about the shapes
Numbers
# i: number of input units, 3 in our tests
# j: number of hidden units, 2 in our tests
# k: number of output units, 1 in our tests
Matrices
# y: a 1 x k matrix of target outputs
# final_outputs: a 1 x k matrix of network outputs
# hidden_outputs: sigmoid of hidden_inputs, a 1 x j matrix
# error: target - final_outputs, a 1 x k matrix for each of the k output units
# output_error_term = error, a 1 x k matix
# hidden_error = error DOT transpose(w_h_o) or 1 x k DOT k x j, yields a 1 x j matrix
for each of the j hidden units
# hidden_error_term = hidden_error * activation derivative of hidden_outputs,
1 x j * 1 x j, yields a 1 x j matrix for each of the j hidden units
# Output layer error is the difference between desired target and actual output.
error = targets - final_outputs
# Get the output error, which is just the error
output_error_term = error
# Take the output error term and scale by the weights from the hidden layer to that output
hidden_error = np.dot(output_error_term, self.weights_hidden_to_output.T)
# Use derivative of activation for the hidden outputs
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
return error, output_error_term, hidden_error, hidden_error_term
def forward_pass(self, features):
Calculate the values for the inputs and outputs of the hidden and final layers
Notes about the shapes
Numbers
# i: number of input units, 3 in our tests
# j: number of hidden units, 2 in our tests
# k: number of output units, 1 in our tests
Matrices
# features: a 1 x i row vector
# w_i_h: i x j matrix of weights from input units to hidden units
# w_h_o: j x k matrix of weights from hidden units to output units
# hidden_inputs: features DOT w_i_h, yeilds a 1 x j matrix, for each of the j hidden units
# hidden_outputs: sigmoid of hidden_inputs, also 1 x j matrix
# final_inputs: hidden_outputs DOT w_h_o, so 1 x j DOT j x k, yields 1 x k matrix
# final_outputs: same as the final inputs, so 1 x k matrix
hidden_inputs = np.dot(features, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs
return hidden_inputs, hidden_outputs, final_inputs, final_outputs
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
hidden_inputs, hidden_outputs, final_inputs, final_outputs = self.forward_pass(features)
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner(verbosity=1).run(suite)
import sys
### Set the hyperparameters here ###
# Results from earlier hyperparameter settings
# (iterations, learning_rate, hidden_nodes) -> (Training Loss, Validation Loss)
# (5000, 0.8, 8) -> (0.059, 0.148)
iterations = 5000 #100
learning_rate = 0.9 #0.1
hidden_nodes = 8 #2
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step9: Time to build the network
Step10: Unit tests
Step11: Training the network
Step12: Check out your predictions
|
533 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import keras
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train.shape
import matplotlib.pyplot as plt
%matplotlib inline
randix = np.random.randint(0,60000)
plt.imshow(x_train[randix])
print("Label is {}.".format(y_train[randix]))
x_train_f = x_train.reshape(60000,-1)
x_train_f.shape
x_test_f = x_test.reshape(-1, 28**2)
x_test_f.shape
from sklearn.preprocessing import OneHotEncoder as OHE
ohe = OHE(sparse = False)
y_train_ohe = ohe.fit_transform(y_train.reshape(-1,1))
y_test_ohe = ohe.fit_transform(y_test.reshape(-1,1))
np.argmax(y_train_ohe[randix]) == y_train[randix]
from scipy.spatial.distance import cdist
from sklearn.neighbors import KDTree
class KNNClassifier(object):
def fit(self,x,y,k=1,fun=lambda x: np.mean(x,axis=0)):
Fits a KNN regressor.
Args:
x (numpy array) Array of samples indexed along first axis.
y (numpy array) Array of corresponding labels.
k (int) the number of neighbors
fun (function numpy array --> desired output) Function to be applied to k-nearest
neighbors for predictions
self.x = x[:]
self.y = y[:]
self.k = k
self.f = fun
self.tree = KDTree(self.x)
def predict_one(self, sample):
Run prediction on sample
Args:
new_x (numpy array) sample
dists = cdist(sample.reshape(1,-1),self.x)
ix = np.argpartition(dists,self.k-1)[0,0:self.k]
return self.f(self.y[ix])
def predict(self, samples):
Run predictions on list.
Args:
samples (numpy array) samples
return np.array([self.predict_one(x) for x in samples])
def faster_predict(self,samples):
Run faster predictions on list.
Args:
samples (numpy array) samples
_, ixs = self.tree.query(samples, k=self.k)
#print(ixs)
return np.array([self.f(self.y[ix]) for ix in ixs])
classifier = KNNClassifier()
classifier.fit(x_train_f, y_train_ohe, k=1)
preds=classifier.predict(x_test_f[:500])
np.mean(np.argmax(preds,axis=1)==y_test[:500])
faster_preds = classifier.faster_predict(x_test_f[:500])
np.mean(np.argmax(faster_preds,axis=1)==y_test[:500])
from timeit import default_timer as timer
start = timer()
classifier.predict(x_test_f[:500])
end = timer()
print(end-start)
start = timer()
classifier.faster_predict(x_test_f[:500])
end = timer()
print(end-start)
def cluster_means(x,cluster_assignments,k):
Return the new cluster means and the within cluster squared distance given the cluster assignments
cluster_counter = np.zeros((k,1))
cluster_means = np.zeros((k, x.shape[1]))
for cluster, pt in zip(cluster_assignments, x):
#print(x)
cluster_means[cluster] += pt
cluster_counter[cluster]+=1
cluster_means = cluster_means/cluster_counter
wcss = 0.
for cluster, pt in zip(cluster_assignments, x):
wcss+=np.sum((pt-cluster_means[cluster])**2)
return cluster_means, wcss
class KMeansCluster(object):
#Fit a clustering object on a dataset x consisting of samples on each row
#by the K-means algorithm into k clusters
def fit(self,x,k):
Fit k-means clusterer
Args:
x (numpy array) samples
k (int) number of clusters
num_samples, num_features = x.shape[0], x.shape[1]
#Randomly assign clusters
cluster_assignments = np.random.randint(0,k,num_samples)
#initialize
cluster_mus = np.zeros((k,num_features))
#update
new_cluster_mus, wcss = cluster_means(x,cluster_assignments,k)
count = 1
while (cluster_mus!=new_cluster_mus).any() and count < 10**3:
count += 1
print("Iteration {:3d}, WCSS = {:10f}".format(count,wcss),end="\r")
cluster_mus = new_cluster_mus
#calculate distances
distances = cdist(x,cluster_mus, metric = 'sqeuclidean')
np.argmin(distances, axis = 1, out = cluster_assignments)
new_cluster_mus, wcss = cluster_means(x,cluster_assignments,k)
self.cluster_means = cluster_means
self.cluster_assignments = cluster_assignments
self.x = x[:]
self.wcss = wcss
clusterer = KMeansCluster()
clusterer.fit(x_train_f,10)
clusterer2 = KMeansCluster()
clusterer2.fit(x_train_f,10)
from sklearn.metrics import confusion_matrix
confusion_matrix(y_train, clusterer2.cluster_assignments)
cluster_samples = clusterer2.x[clusterer2.cluster_assignments == 0]
plt.imshow(cluster_samples[0].reshape(28,28))
plt.imshow(cluster_samples[1].reshape(28,28))
plt.imshow(cluster_samples[23].reshape(28,28))
plt.imshow(cluster_samples[50].reshape(28,28))
np.mean(classifier.faster_predict(cluster_samples),axis=0)
big_df = pd.read_csv("UCI_Credit_Card.csv")
big_df.head()
len(big_df)
len(big_df.dropna())
df = big_df.drop(labels = ['ID'], axis = 1)
labels = df['default.payment.next.month']
df.drop('default.payment.next.month', axis = 1, inplace = True)
num_samples = 25000
train_x, train_y = df[0:num_samples], labels[0:num_samples]
test_x, test_y = df[num_samples:], labels[num_samples:]
test_x.head()
train_y.head()
class bin_transformer(object):
def __init__(self, df, num_quantiles = 2):
#identify list of quantiles
self.quantiles = df.quantile(np.linspace(1./num_quantiles, 1.-1./num_quantiles,num_quantiles-1))
def transform(self, df):
Args:
df (pandas dataframe) : dataframe to transform
Returns:
new (pandas dataframe) : new dataframe where for every feature of the original there will be
num_quantiles-1 features corresponding to whether or not the original values where greater
than or equal to the corresponding quantile.
fns (dictionary (string,float)) returns dictionary of quantiles
new = pd.DataFrame()
fns = {}
for col_name in df.axes[1]:
for ix, q in self.quantiles.iterrows():
quart = q[col_name]
new[col_name+str(ix)] = (df[col_name] >= quart)
fn = quart
fns[col_name+str(ix)] = [col_name, fn]
return new, fns
transformer = bin_transformer(train_x,2)
train_x_t, tr_fns = transformer.transform(train_x)
test_x_t, test_fns = transformer.transform(test_x)
train_x_t.head()
def bdd_cross_entropy(pred, label):
return np.mean(-np.sum(label*np.log(pred+10**(-8)),axis=1))
def MSE(pred,label):
return np.mean(np.sum((pred-label)**2, axis=1))
def acc(pred,label):
return np.mean(np.argmax(pred,axis=1)==np.argmax(label, axis=1))
def SSE(x,y):
return np.sum((x-y)**2)
def gini(x,y):
return 1-np.sum(np.mean(y,axis=0)**2)
def find_split(x, y, loss, verbose = False):
Args:
x (dataframe) : dataframe of boolean values
y (dataframe (1 column)) : dataframe of labeled values
loss (function: (yvalue, dataframe of labels)-->float) : calculates loss for prediction of yvalue
for a dataframe of true values.
verbose (bool) : whether or not to include debugging info
min_ax = None
N = x.shape[0]
base_loss = loss(np.mean(y,axis=0),y)
min_loss = base_loss
for col_name in x.axes[1]:
mask = x[col_name]
num_pos = np.sum(mask)
num_neg = N - num_pos
if num_neg*num_pos == 0:
continue
pos_y = np.mean(y[mask], axis = 0)
neg_y = np.mean(y[~mask], axis = 0)
l = (num_pos*loss(pos_y, y[mask]) + num_neg*loss(neg_y, y[~mask]))/N
if verbose:
print("Column {0} split has improved loss {1}".format(col_name, base_loss-l))
if l < min_loss:
min_loss = l
min_ax = col_name
return min_ax, min_loss, base_loss-min_loss
ohe = OHE(sparse = False)
train_y_ohe = ohe.fit_transform(train_y.values.reshape(-1,1))
train_y_ohe[0:5],train_y.values[0:5]
test_y_ohe = ohe.transform(test_y.values.reshape(-1,1))
find_split(train_x_t, train_y_ohe, bdd_cross_entropy, verbose = False)
np.mean(train_y_ohe[train_x_t['LIMIT_BAL0.5']],axis=0)
np.mean(train_y_ohe[~train_x_t['LIMIT_BAL0.5']],axis = 0)
np.mean(train_y_ohe,axis=0)
#Slow but simple
class decision_tree(object):
def __init__(self):
self.f = None
def fit(self, x,y,depth=5,loss=MSE, minsize = 1, quintiles = 2, verbose = False):
#Construct default function
mu = np.mean(y, axis=0)
self.f = lambda a: mu
# Check our stopping criteria
if(x.shape[0]<=minsize or depth == 0):
return
# transform our data
tr = bin_transformer(x, quintiles)
tr_x, fns = tr.transform(x)
split, split_loss, improvement = find_split(tr_x,y,loss)
if verbose:
print("Improvement: {}".format(improvement))
#if no good split was found return
if split == None:
return
# Build test function
col_to_split = fns[split][0]
splitter = lambda a: (a[col_to_split] >= fns[split][1])
mask = tr_x[split]
left = decision_tree()
right = decision_tree()
left.fit(x[~mask],y[~mask],depth-1,loss, minsize, quintiles)
right.fit(x[mask],y[mask],depth-1,loss, minsize, quintiles)
def g(z):
if(splitter(z)):
return right.f(z)
else:
return left.f(z)
self.f = g
def predict(self, x):
Used for bulk prediction
num_samples = x.shape[0]
return np.array([self.f(x.iloc[ix,:]) for ix in range(num_samples)])
dt = decision_tree()
dt.fit(train_x, train_y_ohe, loss = MSE, minsize = 1, depth = 6, quintiles = 50)
dt.predict(test_x.iloc[0:3,:]), test_y_ohe[0:3]
preds = dt.predict(train_x)
np.mean(np.argmax(preds, axis=1)==train_y)
1-np.mean(test_y)
class gradient_boosting_trees(object):
def fit(self, x, y, depth = 2, quintiles = 10, num_trees = 10):
self.forest = [None]*num_trees
cur_y = y[:]
for ix in range(num_trees):
self.forest[ix] = decision_tree()
self.forest[ix].fit(x, cur_y, loss=MSE, depth = depth, quintiles = quintiles, minsize = 1)
preds = self.forest[ix].predict(x)
cur_y = cur_y - preds
def predict(self,x):
s = 0.
preds = [tree.predict(x) for tree in self.forest]
for t in preds:
s+=t
return s
forest = gradient_boosting_trees()
train_y_ohe = ohe.fit_transform(train_y.values.reshape(-1,1))
forest.fit(train_x, train_y_ohe, depth = 20, num_trees = 5, quintiles = 20)
forest.predict(test_x.iloc[0:3,:]), test_y_ohe[0:3]
for_preds = forest.predict(train_x)
for_preds[0:5,:]
train_y_ohe[0:3]
np.mean(np.argmax(for_preds, axis=1)==train_y)
for_preds = forest.predict(test_x)
np.mean(np.argmax(for_preds, axis=1)==test_y)
from sklearn import tree
sktree = tree.DecisionTreeClassifier(max_depth=20)
sktree.fit(train_x, train_y_ohe)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us load up a sample dataset.
Step6: Now construct a KNN classifier
Step7: Calculate accuracy on this very small subset.
Step8: Let's time these different methods to see if the "faster_preds" is actually faster
Step11: Okay now, let us try the clustering algorithm.
Step12: Let us load the credit card dataset and extract a small dataframe of numerical features to test on.
Step14: Now let us write our transformation function.
Step15: Now let us build some simple loss functions for 1d labels.
Step17: Now let us define the find split function.
Step18: One hot encode our dataset
Step20: Test this to see if it is reasonable
Step21: Test this out.
Step22: The naive option
|
534 | <ASSISTANT_TASK:>
Python Code:
import pyspark
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import LogisticRegressionWithSGD
from pyspark.mllib.tree import DecisionTree
sc = pyspark.SparkContext()
raw_rdd = sc.textFile("datasets/COUNT/titanic.csv")
raw_rdd.count()
raw_rdd.take(5)
header = raw_rdd.first()
data_rdd = raw_rdd.filter(lambda line: line != header)
data_rdd.takeSample(False, 5, 0)
def row_to_labeled_point(line):
'''
Builds a LabelPoint consisting of:
survival (truth): 0=no, 1=yes
ticket class: 0=1st class, 1=2nd class, 2=3rd class
age group: 0=child, 1=adults
gender: 0=man, 1=woman
'''
passenger_id, klass, age, sex, survived = [segs.strip('"') for segs in line.split(',')]
klass = int(klass[0]) - 1
if (age not in ['adults', 'child'] or
sex not in ['man', 'women'] or
survived not in ['yes', 'no']):
raise RuntimeError('unknown value')
features = [
klass,
(1 if age == 'adults' else 0),
(1 if sex == 'women' else 0)
]
return LabeledPoint(1 if survived == 'yes' else 0, features)
labeled_points_rdd = data_rdd.map(row_to_labeled_point)
labeled_points_rdd.takeSample(False, 5, 0)
training_rdd, test_rdd = labeled_points_rdd.randomSplit([0.7, 0.3], seed = 0)
training_count = training_rdd.count()
test_count = test_rdd.count()
training_count, test_count
model = DecisionTree.trainClassifier(training_rdd,
numClasses=2,
categoricalFeaturesInfo={
0: 3,
1: 2,
2: 2
})
predictions_rdd = model.predict(test_rdd.map(lambda x: x.features))
truth_and_predictions_rdd = test_rdd.map(lambda lp: lp.label).zip(predictions_rdd)
accuracy = truth_and_predictions_rdd.filter(lambda v_p: v_p[0] == v_p[1]).count() / float(test_count)
print('Accuracy =', accuracy)
print(model.toDebugString())
model = LogisticRegressionWithSGD.train(training_rdd)
predictions_rdd = model.predict(test_rdd.map(lambda x: x.features))
labels_and_predictions_rdd = test_rdd.map(lambda lp: lp.label).zip(predictions_rdd)
accuracy = labels_and_predictions_rdd.filter(lambda v_p: v_p[0] == v_p[1]).count() / float(test_count)
print('Accuracy =', accuracy)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we create a SparkContext, the main object in the Spark API. This call may take a few seconds to return as it fires up a JVM under the covers.
Step2: Sample the data
Step3: We query RDD for the number of lines in the file. The call here causes the file to be read and the result computed. This is a Spark action.
Step4: We query for the first five rows of the RDD. Even though the data is small, we shouldn't get into the habit of pulling the entire dataset into the notebook. Many datasets that we might want to work with using Spark will be much too large to fit in memory of a single machine.
Step5: We see a header row followed by a set of data rows. We filter out the header to define a new RDD containing only the data rows.
Step6: We take a random sample of the data rows to better understand the possible values.
Step7: We see that the first value in every row is a passenger number. The next three values are the passenger attributes we might use to predict passenger survival
Step8: We apply the function to all rows.
Step9: We take a random sample of the resulting points to inspect them.
Step10: Split for training and test
Step11: Train and test a decision tree classifier
Step12: We now apply the trained model to the feature values in the test set to get the list of predicted outcomines.
Step13: We bundle our predictions with the ground truth outcome for each passenger in the test set.
Step14: Now we compute the test error (% predicted survival outcomes == actual outcomes) and display the decision tree for good measure.
Step15: Train and test a logistic regression classifier
|
535 | <ASSISTANT_TASK:>
Python Code:
stuff = {
'apple': 1.97,
'banana': 2.99,
'cherry': 3.99,
}
# Common pattern of .format use: use numerical indexes
for name, price in stuff.items():
print('The price of {0} is {1}.'.format(name, price))
# Common pattern of .format use: use parameter names
for name, price in stuff.items():
print(
'The price of {name} is {price}.'.
format(name=name, price=price))
for name, price in stuff.items():
print(f'The price of {name} is {price}.')
tax_rate = 0.50
for name, price in stuff.items():
print(f'The total price of {name} is {round(price * (1+tax_rate), 2)}.')
tax_rate = 0.50
for name, price in stuff.items():
total_price = round(price * (1+tax_rate), 2)
print(f'The total price of {name} is {total_price}.')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In 'The price of {0} is {1}' above,
Step2: Something that sucks about the above print,
Step3: It reminds me of shell syntax. For example,
Step4: But what does round(price * (1+tax_rate), 2) mean?
|
536 | <ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
x = tf.Variable(0)
x.assign(114514)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
537 | <ASSISTANT_TASK:>
Python Code:
# == Basic import == #
# No annoying warnings
import warnings
warnings.filterwarnings('ignore')
# plot within the notebook
%matplotlib inline
import numpy as np
from scipy import stats
import matplotlib.pyplot as mpl
def plot_guassians(loc=1, scale=2):
plot the pdf and the cdf of gaussian and return their axes
gaussian_prop = dict(loc=loc, scale=scale)
x = np.linspace(-10,10,1000)
fig = mpl.figure(figsize=[12,5])
ax = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
_ = ax.plot(x, stats.norm.pdf(x, **gaussian_prop), color="k", lw=2, label="PDF N(1,2)")
_ = ax2.plot(x, stats.norm.cdf(x, **gaussian_prop), color="k", lw=2, label="CDF N(1,2)")
ax.legend(loc="upper left", frameon=False)
ax2.legend(loc="upper left", frameon=False)
return ax,ax2
ax,ax2 = plot_guassians()
gaussian_prop=dict(loc=1, scale=2)
ax,ax2 = plot_guassians(**gaussian_prop)
# - define
bounds = [-10,5] # i.e. -1 +1sigma
#bounds = [-3,5] # i.e. -2 +2sigma
#bounds = [-10,5] # i.e. 1 tail 2 sigma (for plotting reason I set -10 and not -np.inf, but the latter works with scipy)
# - show it
x = np.linspace(bounds[0],bounds[1], 1e2)
prop_fill = dict(alpha=0.4, color=mpl.cm.Blues(0.8))
ax2.fill_between(x, stats.norm.cdf(x,**gaussian_prop), **prop_fill)
ax.fill_between(x, stats.norm.pdf(x,**gaussian_prop), **prop_fill)
print "".center(80,"-")
print ("chance to randomly draw a point between %.2f and %.2f is %.2f%%"%(
bounds[0],bounds[1], (stats.norm.cdf(bounds[1], **gaussian_prop) - stats.norm.cdf(bounds[0], **gaussian_prop))*100)
).center(80)
print "".center(80,"-")
x = np.random.rand(10)
y = np.random.rand(10)
_ = mpl.plot(x,y, ms=10, ls="None", marker="o", mfc= mpl.cm.Blues(0.5), mec= "k", mew=2)
stats.pearsonr(x,y)
x = np.random.rand(10)
y = np.random.rand(10) + x
_ = mpl.plot(x,y, ms=10, ls="None", marker="o", mfc= mpl.cm.Blues(0.5), mec= "k", mew=2)
stats.pearsonr(x,y)
s1 = np.random.normal(loc=1, scale=2, size=30)
s2 = np.random.normal(loc=5, scale=3, size=34)
prop = dict(range=[-5,8], bins=6, histtype="step", normed=True)
_= mpl.hist(s1, fill=False, lw=2, ec="k", **prop)
_= mpl.hist(s2, fill=True, lw=0, facecolor=mpl.cm.Blues(0.6,0.4), **prop)
stats.ks_2samp(s1,s2)
rho, p = stats.pearsonr(x,y)
N = 10
print rho/(np.sqrt( (1-rho**2)/(N-2)) )
print "p-value",p
1- stats.norm.cdf(87, loc=54, scale=np.sqrt(54))
(stats.norm.ppf(1-3.5e-4, loc=54, scale=np.sqrt(54)) - 54)/ np.sqrt(54)
x = np.linspace(10,100,1000)
fig = mpl.figure(figsize=[10,7])
ax = fig.add_subplot(111)
ax.plot(x,stats.chi2.pdf(x, 30),"k-",lw=2, label=r"30 dof")
ax.plot(x,stats.chi2.pdf(x, 50),"r-",lw=2, label=r"50 dof")
ax.plot(x,stats.chi2.pdf(x, 70),"g-",lw=2, label=r"70 dof")
ax.legend(loc="upper right", frameon=False)
print stats.chi2.cdf(50, 30)
np.exp(-10/2.)
import matplotlib.image as mpimg
atlas_excess = mpimg.imread("/Users/mrigault/Desktop/useful_plots/NonSNeIaData/Atlas_750Excess.png")
cms_excess = mpimg.imread("/Users/mrigault/Desktop/useful_plots/NonSNeIaData/CMS_750Excess.png")
fig = mpl.figure(figsize=[20,10])
axatlas = fig.add_subplot(121, frameon=False)
axcms = fig.add_subplot(122, frameon=False)
_ = axatlas.imshow(atlas_excess)
_ = axcms.imshow(cms_excess)
_ = [ax.set_xticks([]) for ax in [axatlas,axcms]]
_ = [ax.set_yticks([]) for ax in [axatlas,axcms]]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Reminder
Step3: => Probability of random drawing a point within given limits [a,b]= CDF[b] - CDF[a]
Step4: Note about error bars
Step5: let's measure the correlation coefficient
Step6: The documentation says
Step7: Maybe Maybe not... because the correlation is weak and you do not have much points... change the number of points, lower the boundaries for detecting a correlation.
Step8: Sigma as confidence level
Step9: Usual confidence level interpretation in Physics
Step10: Which is really unlikely. We could also see it in terms of sigma
Step11: IV – Goodness of Fit
Step12: AIC
Step13: V – A good reflex of Datas Scientist Blind analysis
|
538 | <ASSISTANT_TASK:>
Python Code:
import pandas
tss = pandas.read_csv("NSQD_Res_TSS.csv")
medians = (
tss.groupby(by=['parameter', 'units', 'season'])
.median()['res']
.reset_index()
)
medians
index_cols = [
'epa_rain_zone', 'location_code', 'station_name', 'primary_landuse',
'start_date', 'season', 'station', 'parameter', 'units',
]
medians = (
tss.groupby(by=index_cols)
.first()
.reset_index()
.groupby(by=['parameter', 'units', 'season'])
.median()['res']
.reset_index()
)
medians
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Compute the medians for each season without dropping duplicates
Step2: Compute the medians for each season after dropping duplicate records
|
539 | <ASSISTANT_TASK:>
Python Code:
import gym
import tensorflow as tf
import numpy as np
# Create the Cart-Pole game environment
env = gym.make('CartPole-v0')
env.reset()
rewards = []
for _ in range(100):
env.render()
state, reward, done, info = env.step(env.action_space.sample()) # take a random action
rewards.append(reward)
if done:
rewards = []
env.reset()
print(rewards[-20:])
class QNetwork:
def __init__(self, learning_rate=0.01, state_size=4,
action_size=2, hidden_size=10,
name='QNetwork'):
# state inputs to the Q-network
with tf.variable_scope(name):
self.inputs_ = tf.placeholder(tf.float32, [None, state_size], name='inputs')
# One hot encode the actions to later choose the Q-value for the action
self.actions_ = tf.placeholder(tf.int32, [None], name='actions')
one_hot_actions = tf.one_hot(self.actions_, action_size)
# Target Q values for training
self.targetQs_ = tf.placeholder(tf.float32, [None], name='target')
# ReLU hidden layers
self.fc1 = tf.contrib.layers.fully_connected(self.inputs_, hidden_size)
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, hidden_size)
# Linear output layer
self.output = tf.contrib.layers.fully_connected(self.fc2, action_size,
activation_fn=None)
### Train with loss (targetQ - Q)^2
# output has length 2, for two actions. This next line chooses
# one value from output (per row) according to the one-hot encoded actions.
self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1)
self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q))
self.opt = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)
from collections import deque
class Memory():
def __init__(self, max_size = 1000):
self.buffer = deque(maxlen=max_size)
def add(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
idx = np.random.choice(np.arange(len(self.buffer)),
size=batch_size,
replace=False)
return [self.buffer[ii] for ii in idx]
train_episodes = 1000 # max number of episodes to learn from
max_steps = 200 # max steps in an episode
gamma = 0.99 # future reward discount
# Exploration parameters
explore_start = 1.0 # exploration probability at start
explore_stop = 0.01 # minimum exploration probability
decay_rate = 0.0001 # exponential decay rate for exploration prob
# Network parameters
hidden_size = 64 # number of units in each Q-network hidden layer
learning_rate = 0.0001 # Q-network learning rate
# Memory parameters
memory_size = 10000 # memory capacity
batch_size = 20 # experience mini-batch size
pretrain_length = batch_size # number experiences to pretrain the memory
tf.reset_default_graph()
mainQN = QNetwork(name='main', hidden_size=hidden_size, learning_rate=learning_rate)
# Initialize the simulation
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
memory = Memory(max_size=memory_size)
# Make a bunch of random actions and store the experiences
for ii in range(pretrain_length):
# Uncomment the line below to watch the simulation
# env.render()
# Make a random action
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
if done:
# The simulation fails so no next state
next_state = np.zeros(state.shape)
# Add experience to memory
memory.add((state, action, reward, next_state))
# Start new episode
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
# Add experience to memory
memory.add((state, action, reward, next_state))
state = next_state
# Now train with experiences
saver = tf.train.Saver()
rewards_list = []
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
step = 0
for ep in range(1, train_episodes):
total_reward = 0
t = 0
while t < max_steps:
step += 1
# Uncomment this next line to watch the training
# env.render()
# Explore or Exploit
explore_p = explore_stop + (explore_start - explore_stop)*np.exp(-decay_rate*step)
if explore_p > np.random.rand():
# Make a random action
action = env.action_space.sample()
else:
# Get action from Q-network
feed = {mainQN.inputs_: state.reshape((1, *state.shape))}
Qs = sess.run(mainQN.output, feed_dict=feed)
action = np.argmax(Qs)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
total_reward += reward
if done:
# the episode ends so no next state
next_state = np.zeros(state.shape)
t = max_steps
print('Episode: {}'.format(ep),
'Total reward: {}'.format(total_reward),
'Training loss: {:.4f}'.format(loss),
'Explore P: {:.4f}'.format(explore_p))
rewards_list.append((ep, total_reward))
# Add experience to memory
memory.add((state, action, reward, next_state))
# Start new episode
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
# Add experience to memory
memory.add((state, action, reward, next_state))
state = next_state
t += 1
# Sample mini-batch from memory
batch = memory.sample(batch_size)
states = np.array([each[0] for each in batch])
actions = np.array([each[1] for each in batch])
rewards = np.array([each[2] for each in batch])
next_states = np.array([each[3] for each in batch])
# Train network
target_Qs = sess.run(mainQN.output, feed_dict={mainQN.inputs_: next_states})
# Set target_Qs to 0 for states where episode ends
episode_ends = (next_states == np.zeros(states[0].shape)).all(axis=1)
target_Qs[episode_ends] = (0, 0)
targets = rewards + gamma * np.max(target_Qs, axis=1)
loss, _ = sess.run([mainQN.loss, mainQN.opt],
feed_dict={mainQN.inputs_: states,
mainQN.targetQs_: targets,
mainQN.actions_: actions})
saver.save(sess, "checkpoints/cartpole.ckpt")
%matplotlib inline
import matplotlib.pyplot as plt
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
eps, rews = np.array(rewards_list).T
smoothed_rews = running_mean(rews, 10)
plt.plot(eps[-len(smoothed_rews):], smoothed_rews)
plt.plot(eps, rews, color='grey', alpha=0.3)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
test_episodes = 10
test_max_steps = 400
env.reset()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
for ep in range(1, test_episodes):
t = 0
while t < test_max_steps:
env.render()
# Get action from Q-network
feed = {mainQN.inputs_: state.reshape((1, *state.shape))}
Qs = sess.run(mainQN.output, feed_dict=feed)
action = np.argmax(Qs)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
if done:
t = test_max_steps
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
state = next_state
t += 1
env.close()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: We interact with the simulation through env. To show the simulation running, you can use env.render() to render one frame. Passing in an action as an integer to env.step will generate the next step in the simulation. You can see how many actions are possible from env.action_space and to get a random action you can use env.action_space.sample(). This is general to all Gym games. In the Cart-Pole game, there are two possible actions, moving the cart left or right. So there are two actions we can take, encoded as 0 and 1.
Step3: To shut the window showing the simulation, use env.close().
Step4: The game resets after the pole has fallen past a certain angle. For each frame while the simulation is running, it returns a reward of 1.0. The longer the game runs, the more reward we get. Then, our network's goal is to maximize the reward by keeping the pole vertical. It will do this by moving the cart to the left and the right.
Step5: Experience replay
Step6: Exploration - Exploitation
Step7: Populate the experience memory
Step8: Training
Step9: Visualizing training
Step10: Testing
|
540 | <ASSISTANT_TASK:>
Python Code:
y = [2, 3, 1]
x = np.arange(len(y))
xlabel = ['A', 'B', 'C']
plt.bar(x, y, align='center') #보통은 이 명령어를 쳐야 가운데를 기준으로 x가 정렬, 설정 없으면 left가 디폴트
plt.xticks(x, xlabel);
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
plt.barh(y_pos, performance, xerr=error, align='center', alpha=0.4)
plt.yticks(y_pos, people)
plt.xlabel('Performance');
n_groups = 5
means_men = (20, 35, 30, 35, 27)
std_men = (2, 3, 4, 1, 2)
means_women = (25, 32, 34, 20, 25)
std_women = (3, 5, 2, 3, 3)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.4
error_config = {'ecolor': '0.3'}
rects1 = plt.bar(index, means_men, bar_width,
alpha=opacity,
color='b',
yerr=std_men,
error_kw=error_config,
label='Men')
rects2 = plt.bar(index + bar_width, means_women, bar_width,
alpha=opacity,
color='r',
yerr=std_women,
error_kw=error_config,
label='Women')
plt.xlabel('Group')
plt.ylabel('Scores')
plt.title('Scores by group and gender')
plt.xticks(index + bar_width, ('A', 'B', 'C', 'D', 'E'))
plt.legend()
plt.tight_layout()
N = 5
menMeans = (20, 35, 30, 35, 27)
womenMeans = (25, 32, 34, 20, 25)
menStd = (2, 3, 4, 1, 2)
womenStd = (3, 5, 2, 3, 3)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, menMeans, width, color='r', yerr=menStd)
p2 = plt.bar(ind, womenMeans, width, color='y',
bottom=menMeans, yerr=womenStd)
plt.ylabel('Scores')
plt.title('Scores by group and gender')
plt.xticks(ind + width/2., ('G1', 'G2', 'G3', 'G4', 'G5'))
plt.yticks(np.arange(0, 81, 10))
plt.legend((p1[0], p2[0]), ('Men', 'Women'))
x = np.linspace(0.1, 2*np.pi, 10)
markerline, stemlines, baseline = plt.stem(x, np.cos(x), '-.')
plt.setp(markerline, 'markerfacecolor', 'b')
plt.setp(baseline, 'color', 'r', 'linewidth', 2);
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) #툭 튀어나오게끔
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90)
plt.axis('equal'); #보기 좋게끔 정사각형 형태의 틀로 맞춰서 출력된다.
x = np.random.randn(5000)
arrays, bins, patches = plt.hist(x, bins=50, normed=True) #bins를 100개 이상 넣을 경우에 패치즈를 잡아먹기 때문에(메모리) 필요가 없다
arrays
bins
X = np.random.normal(0, 1, 1024)
Y = np.random.normal(0, 1, 1024)
plt.scatter(X, Y);
N = 50
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
area = np.pi * (15 * np.random.rand(N))**2
plt.scatter(x, y, s=area, c=colors, alpha=0.5);
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.images[0]
X
plt.imshow(X, interpolation='nearest'); #사람이 인식하기 좋게 부드럽게 만드는 정도
plt.grid(False)
methods = [None, 'none', 'nearest', 'bilinear', 'bicubic', 'spline16',
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric',
'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos']
fig, axes = plt.subplots(3, 6, figsize=(12, 6), subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(hspace=0.3, wspace=0.05)
for ax, interp_method in zip(axes.flat, methods):
ax.imshow(X, interpolation=interp_method)
ax.set_title(interp_method)
def f(x, y):
return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 -y ** 2)
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
XX, YY = np.meshgrid(x, y)
ZZ = f(XX, YY)
plt.contourf(XX, YY, ZZ, alpha=.75, cmap='jet');
plt.contour(XX, YY, ZZ, colors='black', linewidth=.5);
from mpl_toolkits.mplot3d import Axes3D
X = np.arange(-4, 4, 0.25)
Y = np.arange(-4, 4, 0.25)
XX, YY = np.meshgrid(X, Y)
RR = np.sqrt(XX**2 + YY**2)
ZZ = np.sin(RR)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(XX, YY, ZZ, rstride=1, cstride=1, cmap='hot');
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: xerr 인수나 yerr 인수를 지정하면 에러 바(error bar)를 추가할 수 있다.
Step2: 두 개 이상의 바 차트를 한번에 그리는 경우도 있다.
Step3: 또는 bottom 인수로 바의 위치를 조정하여 겹친 바 차트(stacked bar chart)도 그릴 수 있다.
Step4: 스템 플롯
Step5: 파이 차트
Step6: 히스토그램
Step7: 스캐터 플롯
Step8: Imshow
Step9: 컨투어 플롯
Step10: 3D 서피스 플롯
|
541 | <ASSISTANT_TASK:>
Python Code:
def area(p0, p1, p2):
"Calculate the area of a triangle given three points coordinates in the format (x, y)"
# Check if all the points have two coordinates
if len(p0) == 2 and len(p1) == 2 and len(p2) == 2:
return abs((p0[0]*(p1[1]-p2[1]) + p1[0]*(p2[1]-p0[1]) + p2[0]*(p0[1]-p1[1])) / 2)
else:
return 'Insert the coordinates of three points in the format (x, y)'
# Test cases
print(area((0, 0), (1, 0), (0, 1))) # 0.5
print(area((0, 0), (5, 0), (0, 8))) # 20
print(area((0, 0), (5, 1, 0), (0, 8))) # Error message
def polygon_area(points):
Calculate the area of a simple polygon given a tuple of points in the format (x, y).
The points have to be ordered as you move around the polygon
# Initialize area to 0
area = 0
# For each point check the number of coordinates and add to the area
for i in range(len(points)):
if len(points[i]) == 2:
# Here I use modulo to restart from 0 to account for the term where i=n-1
area += points[i][0]*points[(i+1)%len(points)][1] - points[(i+1)%len(points)][0]*points[i][1]
else:
return 'The coordinates of the points should be in the format (x, y)'
return abs(area/2)
# Test cases
print(polygon_area(((0, 0), (1, 0), (0, 1)))) # triangle, area = 0.5
print(polygon_area(((0, 0), (1, 0), (1, 1), (0, 1)))) # square, area = 1
def add_vec(x, y):
"Sum two vectors of arbitrary length"
# Check if the vectors have the same length
if len(x) == len(y):
# Initialize sum to zero
z = [0]*len(x)
# Add vectors element by element
for i in range(len(x)):
z[i] = x[i] + y[i]
return z
else:
return 'The two vectors must have the same length'
# Test cases
print(add_vec((1, 4), (2, -3)))
print(add_vec((1, 4), (2, -3, 1))) # Error message
print(add_vec((1, 4, -5, 9), (2, -3, 1, 7)))
def add_vec2(x, y):
"Sum two vectors of arbitrary length using list comprehension"
# Check if the vectors have the same length
if len(x) == len(y):
# Create a new list summing the two vectors element-wise
return [a + b for a, b in zip(x, y)]
else:
return 'The two vectors must have the same size'
# Test cases
print(add_vec2((1, 4), (2, -3)))
print(add_vec2((1, 4), (2, -3, 1))) # Error message
print(add_vec2((1, 4, -5, 9), (2, -3, 1, 7)))
# Colleges dictionary
colleges = {'Christ\'s':'CHR'
, 'Churchill':'CHU'
, 'Clare':'CL'
, 'Clare Hall':'CLH'
, 'Corpus Christi':'CC'
, 'Darwin':'DAR'
, 'Downing':'DOW'
, 'Emmanuel':'EM'
, 'Fitzwilliam':'F'
, 'Girton':'G'
}
# Check the dictionary
print('Colleges dictionary:\n', colleges)
# Initialize empty abbreviation dictionary
colleges_abbr = {}
# Populate dictionary
for name, abbr in colleges.items():
colleges_abbr[abbr] = name
# Check the abbreviations dictionary
print('\nColleges\' abbreviations dictionary:\n', colleges_abbr)
# Sort the abbreviations
colleges_abbr_sorted = sorted(colleges_abbr)
# Check the sorted abbreviations
print('\nSorted colleges abbreviation:\n', colleges_abbr_sorted)
# Colleges dictionary with additional info
colleges_ext = {'Christ\'s':{'abbreviation':'CHR', 'year':1505, 'students':541}
, 'Churchill':{'abbreviation':'CHU', 'year':1960, 'students':704}
, 'Clare':{'abbreviation':'CL', 'year':1326, 'students':655}
, 'Clare Hall':{'abbreviation':'CLH', 'year':1966, 'students':155}
, 'Corpus Christi':{'abbreviation':'CC', 'year':1352, 'students':467}
, 'Darwin':{'abbreviation':'DAR', 'year':1964, 'students':674}
, 'Downing':{'abbreviation':'DOW', 'year':1800, 'students':623}
, 'Emmanuel':{'abbreviation':'EM', 'year':1584, 'students':750}
, 'Fitzwilliam':{'abbreviation':'F', 'year':1869, 'students':688}
, 'Girton':{'abbreviation':'G', 'year':1869, 'students':677}
}
# Initialize the result
more_students = ''
# Initialize the variable that keeps track of the max number of students
n_students = 0
# Iterate over the colleges in the dict
for coll, data in colleges_ext.items():
# If the number of students of the current college is greater than n_students
# update n_students with the current number and more_students with the current college name
if data['students'] > n_students:
more_students, n_students = coll, data['students']
# Print the college with more_students
print('The college with more students is', more_students)
# Initialize the result
out = [0, '']
# Initialize the variable that keeps track of the oldest year found to an arbitrary large year
year = 2500
for coll, data in colleges_ext.items():
# If the year of foundation of the current college is lesser than year
# update year with the current number and out with the current college number of students and abbreviation
if data['year'] < year:
out, year = [data['students'], data['abbreviation']], data['year']
# Print the oldest college number of students and name
print('{} is the oldest college and it has {} students'.format(out[1], out[0]))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Exercise 06.2 (selecting data structures)
Step3: Exercise 06.3 (indexing)
Step4: Optional (advanced)
Step5: Exercise 06.4 (dictionaries)
Step6: Optional extension
Step7: Find the college with the greatest number of students and print the abbreviation
Step8: Find the oldest college, and print the number of students and the abbreviation for this college
|
542 | <ASSISTANT_TASK:>
Python Code:
import importlib
autograd_available = True
# if automatic differentiation is available, use it
try:
import autograd
except ImportError:
autograd_available = False
pass
if autograd_available:
import autograd.numpy as np
from autograd import elementwise_grad as egrad
else:
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import interactive
import ipywidgets as widgets
%matplotlib inline
if autograd_available:
print('Using autograd to compute gradients')
else:
print('Using hand-calculated gradient')
function_select = 3
def myfun(x):
functions = {
1: 0.5*x**2,
2: 0.5*x**3,
3: x**2+x**3
}
return functions.get(function_select)
if autograd_available:
gradient = egrad(myfun)
else:
def gradient(x):
functions = {
1: x,
2: 1.5*x**2,
3: 2*x+3*x**2
}
return functions.get(function_select)
x = np.linspace(-3,3,100)
fy = myfun(x)
gy = gradient(x)
plt.figure(1,figsize=(10,6))
plt.rcParams.update({'font.size': 14})
plt.plot(x,fy,x,gy)
plt.grid(True)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["$f(x)$","$f^\prime(x)$"])
plt.show()
epsilon = 0.5
start = 3.75
points = []
while abs(gradient(start)) > 1e-8 and len(points) < 50:
points.append( (start,myfun(start)) )
start = start - epsilon*np.sign(gradient(start))
plt.figure(1,figsize=(15,6))
plt.rcParams.update({'font.size': 14})
plt.subplot(1,2,1)
plt.scatter(list(zip(*points))[0],list(zip(*points))[1],c=range(len(points),0,-1),cmap='gray',s=40,edgecolors='k')
plt.plot(x,fy)
plt.grid(True)
plt.xlabel("x")
plt.ylabel("y=f(x)")
plt.subplot(1,2,2)
plt.plot(range(0,len(points)),list(zip(*points))[0])
plt.grid(True)
plt.xlabel("Step i")
plt.ylabel("x_i")
plt.show()
epsilon = 0.01
start = 3.75
points = []
while abs(gradient(start)) > 1e-8 and len(points) < 500:
points.append( (start,myfun(start)) )
start = start - epsilon*gradient(start)
plt.figure(1,figsize=(15,6))
plt.rcParams.update({'font.size': 14})
plt.subplot(1,2,1)
plt.scatter(list(zip(*points))[0],list(zip(*points))[1],c=range(len(points),0,-1),cmap='gray',s=40,edgecolors='k')
plt.plot(x,fy)
plt.grid(True)
plt.xlabel("x")
plt.ylabel("y=f(x)")
plt.subplot(1,2,2)
plt.plot(range(0,len(points)),list(zip(*points))[0])
plt.grid(True)
plt.xlabel("Step i")
plt.ylabel("x_i")
plt.show()
def interactive_gradient_descent(start,epsilon, maximum_steps, xmin, xmax):
points = []
# assume 1e-10 is about zero
while abs(gradient(start)) > 1e-10 and len(points) < maximum_steps:
points.append( (start,myfun(start)) )
start = start - epsilon*gradient(start)
plt.figure(1,figsize=(15,6))
plt.rcParams.update({'font.size': 14})
plt.subplot(1,2,1)
plt.scatter(list(zip(*points))[0],list(zip(*points))[1],c=range(len(points),0,-1),cmap='gray',s=40,edgecolors='k')
px = np.linspace(xmin,xmax,1000)
pfy = myfun(px)
plt.plot(px,pfy)
plt.autoscale(enable=True,tight=True)
plt.xlim(xmin,xmax)
plt.grid(True)
plt.xlabel("x")
plt.ylabel("y=f(x)")
plt.subplot(1,2,2)
plt.plot(range(0,len(points)),list(zip(*points))[0])
plt.grid(True)
plt.xlabel("Step i")
plt.ylabel("x_i")
plt.show()
epsilon_values = np.arange(0.0,0.1,0.0001)
style = {'description_width': 'initial'}
interactive_update = interactive(interactive_gradient_descent, \
epsilon = widgets.SelectionSlider(options=[("%g"%i,i) for i in epsilon_values], value=0.01, continuous_update=False,description='epsilon',layout=widgets.Layout(width='50%'),style=style), \
start = widgets.FloatSlider(min=-5.0,max=5.0,step=0.0001,value=3.7, continuous_update=False, description='Start x', layout=widgets.Layout(width='75%'), style=style), \
maximum_steps = widgets.IntSlider(min=20, max=500, value= 200, continuous_update=False, description='Number steps',layout=widgets.Layout(width='50%'),style=style), \
xmin = widgets.FloatSlider(min=-10, max=0, step=0.1, value=-5, continuous_update=False, description='Plot negative x limit',layout=widgets.Layout(width='50%'), style=style), \
xmax = widgets.FloatSlider(min=0, max=10, step=0.1, value=5, continuous_update=False, description='Plot positive x limit',layout=widgets.Layout(width='50%'),style=style))
output = interactive_update.children[-1]
output.layout.height = '400px'
interactive_update
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Specify the function to minimize as a simple python function.<br>
Step2: Plot the function and its derivative
Step3: Simple gradient descent strategy using only sign of the derivative
Step4: Gradient descent
Step5: Here, we provide an interactive tool to play around yourself with parameters of the gradient descent.
|
543 | <ASSISTANT_TASK:>
Python Code:
from sklearn import datasets
import pandas as pd
from sklearn.datasets import load_digits
digits = load_digits() #dataset de clasificacion
brio= pd.read_csv('C:/Users/Alex/Documents/eafit/semestres/X semestre/programacion/briofitos.csv') #dataset regresion
#digits.DESCR
digits.target
brio.head()
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
%pylab inline
import matplotlib.pyplot as plt
brio2=pd.DataFrame({'x':brio['altura_media'],'y':brio['Promedio_briofitos']})
npMatrix = np.matrix(brio2)
X, Y = npMatrix[:,0], npMatrix[:,1]
#mdl = LinearRegression().fit(X,Y)
mdl = LinearRegression().fit(brio2[['x']],brio2.y)
m = mdl.coef_[0]
b = mdl.intercept_
print ("formula: y = {0}x + {1}".format(m, b) )# following slope intercept form
plt.scatter(X,Y, color='blue')
plt.plot([0,4000],[b,m*100+b],'r')
plt.title('Linear Regression', fontsize = 20)
plt.xlabel('Altura', fontsize = 15)
plt.ylabel('Especies', fontsize = 15)
plt.show()
from sklearn.metrics import r2_score
print('Coefficients: \n', mdl.coef_)
print('Variance score: %.2f' % mdl.score(X, Y))
y1=mdl.predict(Y)
print ( 'R2 score:', r2_score(Y, y1))
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
X = digits.data[:, 2:4]
Y = digits.target
h = .02
logreg = linear_model.LogisticRegression(C=1e5)
logreg.fit(X, Y)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Target')
plt.ylabel('Sample')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
X, y = digits.data[:1700,:], digits.target[:1700]
tX, ty = digits.data[1700:,:], digits.target[1700:]
OVR = OneVsRestClassifier(linear_model.LogisticRegression(C=1e5)).fit(X,y)
OVO = OneVsOneClassifier(linear_model.LogisticRegression(C=1e5)).fit(X,y)
print ('One vs rest accuracy: %.3f' % OVR.score(tX,ty))
print ('One vs one accuracy: %.3f' % OVO.score(tX,ty))
from sklearn import metrics
print (metrics.classification_report(tX[:, 1],ty))
from sklearn import svm
X = brio[['Suma_briofitos','altura_media']]
y = brio['Promedio_briofitos']
clf = svm.SVC()
clf.fit(X, y)
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(0, 350)
yy = a * xx - (clf.intercept_[0]) / w[1]
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X.iloc[:, 0], X.iloc[:, 1], c=y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
clf = svm.SVC(kernel='linear').fit(X, y)
y1=clf.predict(X)
print ( 'R2 score:', r2_score(y, y1))
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
X = np.c_[digits.data[:, 2:4]]
Y = np.c_[digits.target]
fignum = 1
for kernel in ('linear', 'rbf','sigmoid'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
print (clf.score(X,Y))
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -10
x_max = 20
y_min = -5
y_max = 30
XX, YY = np.mgrid[x_min:x_max:20j, y_min:y_max:400j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Brio es un dataframe que entrega informacion de numero promedio y numero total de especies de briofitos que se encuentran a lo largo de un gradiente altitudinal en una montana
Step2: Punto 2.1. Regression
Step3: Punto 2.2. Evalúe la calidad de la regressión
Step4: Punto 3.1. Realize una clasificación
Step5: Punto 3.2. Evalúe la clasificación
Step6: Punto 4. Otros algoritmos
Step7: Se usaron los datos sobre briofitos para realizar un regresion lineal clasica y una lineal con SVM, el mejor resultado obtenido fue el utilizado con SVM, teniendo un R2 de 0.59. El R2 obtenido con la regresion linela clasica, el cual fue negativo.
|
544 | <ASSISTANT_TASK:>
Python Code:
from dx import *
import seaborn as sns; sns.set()
# constant short rate
r = constant_short_rate('r', 0.02)
# market environments
me_gbm = market_environment('gbm', dt.datetime(2015, 1, 1))
me_jd = market_environment('jd', dt.datetime(2015, 1, 1))
me_sv = market_environment('sv', dt.datetime(2015, 1, 1))
# geometric Brownian motion
me_gbm.add_constant('initial_value', 36.)
me_gbm.add_constant('volatility', 0.2)
me_gbm.add_constant('currency', 'EUR')
me_gbm.add_constant('model', 'gbm')
# jump diffusion
me_jd.add_constant('initial_value', 36.)
me_jd.add_constant('volatility', 0.2)
me_jd.add_constant('lambda', 0.5)
# probability for jump p.a.
me_jd.add_constant('mu', -0.75)
# expected jump size [%]
me_jd.add_constant('delta', 0.1)
# volatility of jump
me_jd.add_constant('currency', 'EUR')
me_jd.add_constant('model', 'jd')
# stochastic volatility model
me_sv.add_constant('initial_value', 36.)
me_sv.add_constant('volatility', 0.2)
me_sv.add_constant('vol_vol', 0.1)
me_sv.add_constant('kappa', 2.5)
me_sv.add_constant('theta', 0.4)
me_sv.add_constant('rho', -0.5)
me_sv.add_constant('currency', 'EUR')
me_sv.add_constant('model', 'sv')
# valuation environment
val_env = market_environment('val_env', dt.datetime(2015, 1, 1))
val_env.add_constant('paths', 10000)
val_env.add_constant('frequency', 'W')
val_env.add_curve('discount_curve', r)
val_env.add_constant('starting_date', dt.datetime(2015, 1, 1))
val_env.add_constant('final_date', dt.datetime(2015, 12, 31))
# add valuation environment to market environments
me_gbm.add_environment(val_env)
me_jd.add_environment(val_env)
me_sv.add_environment(val_env)
risk_factors = {'gbm' : me_gbm, 'jd' : me_jd, 'sv' : me_sv}
correlations = [['gbm', 'jd', 0.66], ['jd', 'sv', -0.75]]
gbm = geometric_brownian_motion('gbm_obj', me_gbm)
me_put = market_environment('put', dt.datetime(2015, 1, 1))
me_put.add_constant('maturity', dt.datetime(2015, 12, 31))
me_put.add_constant('strike', 40.)
me_put.add_constant('currency', 'EUR')
me_put.add_environment(val_env)
am_put = valuation_mcs_american_single('am_put', mar_env=me_put, underlying=gbm,
payoff_func='np.maximum(strike - instrument_values, 0)')
am_put.present_value(fixed_seed=True, bf=5)
jd = jump_diffusion('jd_obj', me_jd)
me_max_call = market_environment('put', dt.datetime(2015, 1, 1))
me_max_call.add_constant('maturity', dt.datetime(2015, 9, 15))
me_max_call.add_constant('currency', 'EUR')
me_max_call.add_environment(val_env)
payoff_call = "np.maximum(np.maximum(maturity_value['gbm'], maturity_value['jd']) - 34., 0)"
assets = {'gbm' : me_gbm, 'jd' : me_jd}
asset_corr = [correlations[0]]
asset_corr
max_call = valuation_mcs_european_multi('max_call', me_max_call, assets, asset_corr,
payoff_func=payoff_call)
max_call.present_value(fixed_seed=False)
max_call.delta('jd')
max_call.delta('gbm')
sv = stochastic_volatility('sv_obj', me_sv)
me_min_put = market_environment('min_put', dt.datetime(2015, 1, 1))
me_min_put.add_constant('maturity', dt.datetime(2015, 6, 17))
me_min_put.add_constant('currency', 'EUR')
me_min_put.add_environment(val_env)
payoff_put = "np.maximum(32. - np.minimum(instrument_values['jd'], instrument_values['sv']), 0)"
assets = {'jd' : me_jd, 'sv' : me_sv}
asset_corr = [correlations[1]]
asset_corr
min_put = valuation_mcs_american_multi(
'min_put', val_env=me_min_put, risk_factors=assets,
correlations=asset_corr, payoff_func=payoff_put)
min_put.present_value(fixed_seed=True)
min_put.delta('jd')
min_put.delta('sv')
am_put_pos = derivatives_position(
name='am_put_pos',
quantity=2,
underlyings=['gbm'],
mar_env=me_put,
otype='American single',
payoff_func='np.maximum(instrument_values - 36., 0)')
max_call_pos = derivatives_position(
'max_call_pos', 3, ['gbm', 'jd'],
me_max_call, 'European multi',
payoff_call)
min_put_pos = derivatives_position(
'min_put_pos', 5, ['sv', 'jd'],
me_min_put, 'American multi',
payoff_put)
positions = {'am_put_pos' : am_put_pos, 'max_call_pos' : max_call_pos,
'min_put_pos' : min_put_pos}
port = derivatives_portfolio(name='portfolio',
positions=positions,
val_env=val_env,
risk_factors=risk_factors,
correlations=correlations)
%time stats = port.get_statistics()
stats
stats['pos_value'].sum()
path_no = 1
paths1 = port.underlying_objects['sv'].get_instrument_values()[:, path_no]
paths2 = port.underlying_objects['jd'].get_instrument_values()[:, path_no]
paths1
paths2
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(10, 6))
plt.plot(port.time_grid, paths1, 'r', label='sv')
plt.plot(port.time_grid, paths2, 'b', label='jd')
plt.gcf().autofmt_xdate()
plt.legend(loc=0); plt.grid(True)
# negatively correlated underlyings
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Risk Factors
Step2: Three risk factors ares modeled
Step3: Assumptions for the geometric_brownian_motion object.
Step4: Assumptions for the jump_diffusion object.
Step5: Assumptions for the stochastic_volatility object.
Step6: Finally, the unifying valuation assumption for the valuation environment.
Step7: These are added to the single market_environment objects of the risk factors.
Step8: Finally, the market model with the risk factors and the correlations between them.
Step9: Derivatives
Step10: European Maximum Call on 2 Assets
Step11: American Minimum Put on 2 Assets
Step12: Portfolio
Step13: These objects are to be collected in dictionary objects.
Step14: All is together to instantiate the derivatives_portfolio class.
Step15: Let us have a look at the major portfolio statistics.
Step16: Finally, a graphical look at two selected, simulated paths of the stochastic volatility risk factor and the jump diffusion risk factor, respectively.
Step17: The resulting plot illustrates the strong negative correlation.
|
545 | <ASSISTANT_TASK:>
Python Code:
from nltk.corpus import stopwords as nltk_stop_words
from nltk.corpus import words
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
#from sklearn.model_selection import cross_val_score
from sklearn.cross_validation import cross_val_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
import pandas as pd
import datetime
import numpy as np
from time import time
from sklearn.grid_search import GridSearchCV
def read_data():
X_train = []
X_test = []
id_test = []
y_train = []
with open('products_sentiment_train.tsv') as f:
for line in f:
parts = line.rsplit('\t', 1)
X_train.append(parts[0].strip())
y_train.append(parts[1].strip())
with open('products_sentiment_test.tsv') as f:
f.readline()
for line in f:
parts = line.split('\t', 1)
id_test.append(parts[0].strip())
X_test.append(parts[1].strip())
return X_train, y_train, id_test, X_test
def predict(predictor, data_train, y, id_test, data_test, cv_score=None):
predictor.fit(data_train, y)
prediction = predictor.predict(data_test)
#print predictor
timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
filepath_prediction = 'data/prediction-%s-data.csv' % timestamp
filepath_description = 'data/prediction-%s-estimator.txt' % timestamp
# Create a dataframe with predictions and write it to CSV file
predictions_df = pd.DataFrame(data=prediction, columns=['y'])
predictions_df.to_csv(filepath_prediction, sep=',', index_label='Id')
# Write a short description of the classifier that was used
f = open(filepath_description, 'w')
f.write(str(predictor))
score = '\nCross-validation score %.8f' % cv_score
f.write(score)
f.close()
def get_pipeline_and_params_1():
pipeline = Pipeline([
('vect', CountVectorizer()),
('logreg', LogisticRegression()),
])
parameters = {
'vect__max_df': (0.6, 0.8, 1.0),
'vect__min_df': (0, 1, 2, 5),
'vect__stop_words': ('english', None),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
'logreg__C': (0.0001, 0.01, 1),
'logreg__penalty': ('l2', 'l1'),
}
return pipeline, parameters
def get_pipeline_and_params_2():
pipeline = Pipeline([
('tfidf', TfidfVectorizer()),
('logreg', LogisticRegression()),
])
parameters = {
'tfidf__max_df': (0.6, 0.8, 1.0),
'tfidf__min_df': (0, 5, 10, 15),
'tfidf__ngram_range': ((1, 1), (1, 2), (1,3), (2,3)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
'tfidf__norm': ('l1', 'l2'),
'logreg__C': (0.0001, 0.01, 1),
'logreg__penalty': ('l2', 'l1'),
}
return pipeline, parameters
def get_pipeline_and_params_3():
# gives 0.7895 on cross-validation and 0.835 on Kaggle. done in 108.629s
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 3), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
'clf__n_iter': (10, 50, 80),
}
return pipeline, parameters
def get_pipeline_and_params_4():
nltk_sw = nltk_stop_words.words('english')
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LinearSVC()),
])
parameters = {
#'vect__max_df': (0.8, 0.9, 1),
'vect__min_df': (0, 5, 10),
#'vect__vocabulary': (None),
'vect__stop_words': ('english', nltk_sw, None),
'vect__max_features': (None, 5000, 10000, 50000),
#'vect__analyzer' : ('word', 'char', 'char_wb',),
'vect__ngram_range': ((1,1), (1, 4), (1, 3)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__smooth_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
#'tfidf__sublinear_tf': (True, False),
#'clf__C': (0.00001, 0.001, 0.1, 1),
}
return pipeline, parameters
def get_pipeline_and_params_5():
wordlist = set(words.words())
nltk_sw = nltk_stop_words.words('english')
pipeline = Pipeline([
('vect', CountVectorizer(vocabulary=wordlist)),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
parameters = {
'vect__strip_accents': ('ascii', None),
'vect__analyzer': ('word', 'char_wb'),
'vect__max_df': (0.8, 1.0),
#'vect__min_df': (0, 1, 2),
#'vect__max_features': (100, 200, 500, 1000, 2000, 5000),
'vect__vocabulary': (None, wordlist),
'vect__stop_words': ('english', nltk_sw, None),
'vect__ngram_range': ((1, 3), (1, 1), (1, 2)),
'tfidf__use_idf': (True,),
'tfidf__norm': ('l1',),
'clf__alpha': (0.000001,),
'clf__penalty': ('l1',),
#'clf__n_iter': (10, 50, 80),
}
return pipeline, parameters
def do_grid_search(pipeline, parameters, X_train, y_train):
grid_search = GridSearchCV(pipeline, parameters, scoring='accuracy')
t0 = time()
grid_search.fit(X_train, y_train)
print "done in %0.3fs" % (time() - t0)
print("Best score: %.4f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
return grid_search
def do_experiment(X_train, y_train, id_test, X_test, get_pipeline_and_params):
pipeline, parameters = get_pipeline_and_params()
gs = do_grid_search(pipeline, parameters, X_train, y_train)
predict(gs.best_estimator_, X_train, y_train, id_test, X_test, gs.best_score_)
X_train, y_train, id_test, X_test = read_data()
do_experiment(X_train, y_train, id_test, X_test, get_pipeline_and_params_1)
do_experiment(X_train, y_train, id_test, X_test, get_pipeline_and_params_2)
do_experiment(X_train, y_train, id_test, X_test, get_pipeline_and_params_3)
do_experiment(X_train, y_train, id_test, X_test, get_pipeline_and_params_4)
do_experiment(X_train, y_train, id_test, X_test, get_pipeline_and_params_5)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Определим необходимые функции.
Step2: Обучение и классификация выбранным классификатором, сохранение результатов в файл с временной меткой в названии для удобства.
Step3: Далее описаны различные конфигурации для выделения признаков и классификации, использовавшиеся в экспериментах.
Step4: Вариант-2 с признаками на основе частотности слов (TF-IDF) и классификация логистической регрессией.
Step5: Вариант-3 с признаками на основе частотности слов (TF-IDF) и классификатор на основе стохастического градиентного спуска.
Step6: Вариант-4 с признаками на основе частотности слов (TF-IDF) и классификация методом опорных векторов.
Step7: Вариант-5 с признаками на основе частотности слов (TF-IDF) и классификатор на основе стохастического градиентного спуска с использванием корпуса английских слов, стоп-слов и анализатора частей слов (параметр "char_wb").
Step8: Функция перебора комбинаций параметров и определения наилучшей конфигурации с помощью GridSearchCV
Step9: Основная фунция, описывающая отдельный эксперимент
Step10: Чтение исходных данных
Step11: Выполнение серии экспериментов, их результаты по точности при кросс-валидации и подобранные значения параметров.
|
546 | <ASSISTANT_TASK:>
Python Code:
import os
import pandas as pd
from google.cloud import bigquery
PROJECT = !(gcloud config get-value core/project)
PROJECT = PROJECT[0]
BUCKET = PROJECT # defaults to PROJECT
REGION = "us-central1" # Replace with your REGION
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
%%bash
exists=$(gsutil ls -d | grep -w gs://$BUCKET/)
if [ -n "$exists" ]; then
echo -e "Bucket gs://$BUCKET already exists."
else
echo "Creating a new GCS bucket."
gsutil mb -l $REGION gs://$BUCKET
echo -e "\nHere are your current buckets:"
gsutil ls
fi
%%bigquery --project $PROJECT
SELECT
url, title, score
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
LENGTH(title) > 10
AND score > 10
AND LENGTH(url) > 0
LIMIT 10
%%bigquery --project $PROJECT
SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
COUNT(title) AS num_articles
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
GROUP BY
source
ORDER BY num_articles DESC
LIMIT 100
regex = ".*://(.[^/]+)/"
sub_query =
SELECT
title,
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '{0}'), '.'))[OFFSET(1)] AS source
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '{0}'), '.com$')
AND LENGTH(title) > 10
.format(
regex
)
query =
SELECT
LOWER(REGEXP_REPLACE(title, '[^a-zA-Z0-9 $.-]', ' ')) AS title,
source
FROM
({sub_query})
WHERE (source = 'github' OR source = 'nytimes' OR source = 'techcrunch')
.format(
sub_query=sub_query
)
print(query)
bq = bigquery.Client(project=PROJECT)
title_dataset = bq.query(query).to_dataframe()
title_dataset.head()
print(f"The full dataset contains {len(title_dataset)} titles")
title_dataset.source.value_counts()
DATADIR = "./data/"
if not os.path.exists(DATADIR):
os.makedirs(DATADIR)
FULL_DATASET_NAME = "titles_full.csv"
FULL_DATASET_PATH = os.path.join(DATADIR, FULL_DATASET_NAME)
# Let's shuffle the data before writing it to disk.
title_dataset = title_dataset.sample(n=len(title_dataset))
title_dataset.to_csv(
FULL_DATASET_PATH, header=False, index=False, encoding="utf-8"
)
sample_title_dataset = title_dataset.sample(n=1000)
sample_title_dataset.source.value_counts()
SAMPLE_DATASET_NAME = "titles_sample.csv"
SAMPLE_DATASET_PATH = os.path.join(DATADIR, SAMPLE_DATASET_NAME)
sample_title_dataset.to_csv(
SAMPLE_DATASET_PATH, header=False, index=False, encoding="utf-8"
)
sample_title_dataset.head()
%%bash
gsutil cp data/titles_sample.csv gs://$BUCKET
from google.cloud import storage
storage_client = storage.Client()
bucket = storage_client.get_bucket(BUCKET)
SAMPLE_BATCH_INPUTS = "./batch_predict_inputs.jsonl"
for idx, text in sample_title_dataset.title.items():
# write the text sample to GCS
blob = bucket.blob(f"hacker_news_sample/sample_{idx}.txt")
blob.upload_from_string(data=text, content_type="text/plain")
# add the GCS file to local jsonl
with open(SAMPLE_BATCH_INPUTS, "a") as f:
f.write(
f'{{"content": "gs://{BUCKET}/hacker_news_sample/sample_{idx}.txt", "mimeType": "text/plain"}}\n'
)
!head -5 ./batch_predict_inputs.jsonl
!gsutil ls gs://$BUCKET/hacker_news_sample | head -5
!gsutil cp ./batch_predict_inputs.jsonl gs://$BUCKET
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Replace the variable values in the cell below. Note, AutoML can only be run in the regions where it is available.
Step2: Create a Dataset from BigQuery
Step3: Let's do some regular expression parsing in BigQuery to get the source of the newspaper article from the URL. For example, if the url is http
Step6: Now that we have good parsing of the URL to get the source, let's put together a dataset of source and titles. This will be our labeled dataset for machine learning.
Step7: For ML training, we usually need to split our dataset into training and evaluation datasets (and perhaps an independent test dataset if we are going to do model or feature selection based on the evaluation dataset). AutoML however figures out on its own how to create these splits, so we won't need to do that here.
Step8: AutoML for text classification requires that
Step9: Let's make sure we have roughly the same number of labels for each of our three labels
Step10: Finally we will save our data, which is currently in-memory, to disk.
Step11: Now let's sample 1000 articles from the full dataset and make sure we have enough examples for each label in our sample dataset (see here for further details on how to prepare data for AutoML).
Step12: Let's write the sample datatset to disk.
Step13: Train a Model with AutoML for Text Classification
Step 1
Step14: Let's make sure the jsonl file was written correctly and that the bucket contains the sample .txt files
Step15: We'll copy the json file to our GCS bucket and kick off the batch prediction job...
|
547 | <ASSISTANT_TASK:>
Python Code:
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
! pip3 install $USER_FLAG kfp google-cloud-pipeline-components --upgrade
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
! python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))"
! python3 -c "import google_cloud_pipeline_components; print('google_cloud_pipeline_components version: {}'.format(google_cloud_pipeline_components.__version__))"
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_URI = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]":
BUCKET_URI = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_URI
! gsutil ls -al $BUCKET_URI
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your GCP project id from gcloud
shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].strip()
SERVICE_ACCOUNT = SERVICE_ACCOUNT.replace("*", "")
SERVICE_ACCOUNT = SERVICE_ACCOUNT.replace(" ", "")
print(SERVICE_ACCOUNT)
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_URI
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_URI
import google.cloud.aiplatform as aip
PIPELINE_ROOT = "{}/pipeline_root/tpu_cifar10_pipeline".format(BUCKET_URI)
import kfp
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2.dsl import component
from kfp.v2.google import experimental
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_URI)
from google.cloud.aiplatform import gapic
# Use TPU Accelerators. Temporarily using numeric codes, until types are added to the SDK
# 6 = TPU_V2
# 7 = TPU_V3
TRAIN_TPU, TRAIN_NTPU = (7, 8) # Using TPU_V3 with 8 accelerators
DEPLOY_GPU, DEPLOY_NGPU = (gapic.AcceleratorType.NVIDIA_TESLA_K80, 1)
DEPLOY_VERSION = "tf2-gpu.2-6"
DEPLOY_IMAGE = "us-docker.pkg.dev/cloud-aiplatform/prediction/{}:latest".format(
DEPLOY_VERSION
)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
MACHINE_TYPE = "cloud-tpu"
# TPU VMs do not require VCPU definition
TRAIN_COMPUTE = MACHINE_TYPE
print("Train machine type", TRAIN_COMPUTE)
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
if not TRAIN_NTPU or TRAIN_NTPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "tpu"
EPOCHS = 20
STEPS = 10000
TRAINER_ARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
# create working dir to pass to job spec
WORKING_DIR = f"{PIPELINE_ROOT}/model"
MODEL_DISPLAY_NAME = f"tpu_train_deploy_{TIMESTAMP}"
print(TRAINER_ARGS, WORKING_DIR, MODEL_DISPLAY_NAME)
CONTAINER_ARTIFACTS_DIR = "tpu-container-artifacts"
!mkdir {CONTAINER_ARTIFACTS_DIR}
import os
dockerfile = FROM python:3.8
WORKDIR /root
# Copies the trainer code to the docker image.
COPY train.py /root/train.py
RUN pip3 install tensorflow-datasets
# Install TPU Tensorflow and dependencies.
# libtpu.so must be under the '/lib' directory.
RUN wget https://storage.googleapis.com/cloud-tpu-tpuvm-artifacts/libtpu/20210525/libtpu.so -O /lib/libtpu.so
RUN chmod 777 /lib/libtpu.so
RUN wget https://storage.googleapis.com/cloud-tpu-tpuvm-artifacts/tensorflow/20210525/tf_nightly-2.6.0-cp38-cp38-linux_x86_64.whl
RUN pip3 install tf_nightly-2.6.0-cp38-cp38-linux_x86_64.whl
RUN rm tf_nightly-2.6.0-cp38-cp38-linux_x86_64.whl
ENTRYPOINT ["python3", "train.py"]
with open(os.path.join(CONTAINER_ARTIFACTS_DIR, "Dockerfile"), "w") as f:
f.write(dockerfile)
%%writefile {CONTAINER_ARTIFACTS_DIR}/train.py
# Single, Mirror and Multi-Machine Distributed Training for CIFAR-10
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--lr', dest='lr',
default=0.01, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print('DEVICES', device_lib.list_local_devices())
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple TPU devices
elif args.distribute == 'tpu':
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="local")
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
strategy = tf.distribute.TPUStrategy(cluster_resolver)
print("All devices: ", tf.config.list_logical_devices('TPU'))
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
# Preparing dataset
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets_unbatched():
# Scaling CIFAR10 data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
datasets, info = tfds.load(name='cifar10',
with_info=True,
as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()
# Build the Keras model
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=['accuracy'])
return model
# Train the model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
MODEL_DIR = os.getenv("AIP_MODEL_DIR")
train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_cnn_model()
model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
if args.distribute=="tpu":
save_locally = tf.saved_model.SaveOptions(experimental_io_device='/job:localhost')
model.save(MODEL_DIR, options=save_locally)
else:
model.save(MODEL_DIR)
!gcloud services enable artifactregistry.googleapis.com
!sudo usermod -a -G docker ${USER}
REPOSITORY = "tpu-training-repository"
IMAGE = "tpu-train"
!gcloud auth configure-docker us-central1-docker.pkg.dev --quiet
!gcloud artifacts repositories create $REPOSITORY --repository-format=docker \
--location=us-central1 --description="Vertex TPU training repository"
TRAIN_IMAGE = f"{REGION}-docker.pkg.dev/{PROJECT_ID}/{REPOSITORY}/{IMAGE}:latest"
print(TRAIN_IMAGE)
%cd $CONTAINER_ARTIFACTS_DIR
# Use quiet flag as the output is fairly large
!docker build --quiet \
--tag={TRAIN_IMAGE} \
.
!docker push {TRAIN_IMAGE}
%cd ..
WORKER_POOL_SPECS = [
{
"containerSpec": {
"args": TRAINER_ARGS,
"env": [{"name": "AIP_MODEL_DIR", "value": WORKING_DIR}],
"imageUri": TRAIN_IMAGE,
},
"replicaCount": "1",
"machineSpec": {
"machineType": TRAIN_COMPUTE,
"accelerator_type": TRAIN_TPU,
"accelerator_count": TRAIN_NTPU,
},
}
]
@component
def tpu_training_task_op(input1: str):
print("training task: {}".format(input1))
@kfp.dsl.pipeline(name="train-endpoint-deploy" + TIMESTAMP)
def pipeline(
project: str = PROJECT_ID,
model_display_name: str = MODEL_DISPLAY_NAME,
serving_container_image_uri: str = DEPLOY_IMAGE,
):
train_task = tpu_training_task_op("tpu model training")
experimental.run_as_aiplatform_custom_job(
train_task,
worker_pool_specs=WORKER_POOL_SPECS,
)
model_upload_op = gcc_aip.ModelUploadOp(
project=project,
display_name=model_display_name,
artifact_uri=WORKING_DIR,
serving_container_image_uri=serving_container_image_uri,
)
model_upload_op.after(train_task)
endpoint_create_op = gcc_aip.EndpointCreateOp(
project=project,
display_name="tpu-pipeline-created-endpoint",
)
gcc_aip.ModelDeployOp(
endpoint=endpoint_create_op.outputs["endpoint"],
model=model_upload_op.outputs["model"],
deployed_model_display_name=model_display_name,
dedicated_resources_machine_type=DEPLOY_COMPUTE,
dedicated_resources_min_replica_count=1,
dedicated_resources_max_replica_count=1,
dedicated_resources_accelerator_type=DEPLOY_GPU.name,
dedicated_resources_accelerator_count=DEPLOY_NGPU,
)
from kfp.v2 import compiler # noqa: F811
compiler.Compiler().compile(
pipeline_func=pipeline,
package_path="tpu train cifar10_pipeline.json".replace(" ", "_"),
)
DISPLAY_NAME = "tpu_cifar10_training_" + TIMESTAMP
job = aip.PipelineJob(
display_name=DISPLAY_NAME,
template_path="tpu train cifar10_pipeline.json".replace(" ", "_"),
pipeline_root=PIPELINE_ROOT,
)
job.run()
! rm tpu_train_cifar10_pipeline.json
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
try:
if delete_model and "DISPLAY_NAME" in globals():
models = aip.Model.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
model = models[0]
aip.Model.delete(model)
print("Deleted model:", model)
except Exception as e:
print(e)
try:
if delete_endpoint and "DISPLAY_NAME" in globals():
endpoints = aip.Endpoint.list(
filter=f"display_name={DISPLAY_NAME}_endpoint", order_by="create_time"
)
endpoint = endpoints[0]
endpoint.undeploy_all()
aip.Endpoint.delete(endpoint.resource_name)
print("Deleted endpoint:", endpoint)
except Exception as e:
print(e)
if delete_dataset and "DISPLAY_NAME" in globals():
if "tabular" == "tabular":
try:
datasets = aip.TabularDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.TabularDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
if "tabular" == "image":
try:
datasets = aip.ImageDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.ImageDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
if "tabular" == "text":
try:
datasets = aip.TextDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.TextDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
if "tabular" == "video":
try:
datasets = aip.VideoDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.VideoDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
try:
if delete_pipeline and "DISPLAY_NAME" in globals():
pipelines = aip.PipelineJob.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
pipeline = pipelines[0]
aip.PipelineJob.delete(pipeline.resource_name)
print("Deleted pipeline:", pipeline)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_URI" in globals():
! gsutil rm -r $BUCKET_URI
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Install the latest GA version of google-cloud-pipeline-components library as well.
Step3: Restart the kernel
Step4: Check the versions of the packages you installed. The KFP SDK version should be >=1.6.
Step5: Before you begin
Step6: Region
Step7: Timestamp
Step8: Authenticate your Google Cloud account
Step9: Create a Cloud Storage bucket
Step10: Only if your bucket doesn't already exist
Step11: Finally, validate access to your Cloud Storage bucket by examining its contents
Step12: Service Account
Step13: Set service account access for Vertex AI Pipelines
Step14: Set up variables
Step15: Vertex AI Pipelines constants
Step16: Additional imports.
Step17: Initialize Vertex AI SDK for Python
Step18: Set up variables
Step19: Set pre-built containers
Step20: Set machine types
Step21: Create a custom container
Step23: Write the Dockerfile
Step24: Training script
Step25: Build Container
Step26: Build the training image
Step27: Define custom model pipeline that uses components from google_cloud_pipeline_components
Step28: Define pipeline components
Step29: The pipeline has four main steps
Step30: Compile the pipeline
Step31: Run the pipeline
Step32: Click on the generated link to see your run in the Cloud Console.
|
548 | <ASSISTANT_TASK:>
Python Code:
import os
import sys
import urllib2
import collections
import matplotlib.pyplot as plt
import math
from time import time, sleep
%pylab inline
spark_home = os.environ.get('SPARK_HOME', None)
if not spark_home:
raise ValueError("Please set SPARK_HOME environment variable!")
# Add the py4j to the path.
sys.path.insert(0, os.path.join(spark_home, 'python'))
sys.path.insert(0, os.path.join(spark_home, 'C:/spark/python/lib/py4j-0.9-src.zip'))
from pyspark.mllib.recommendation import ALS, Rating
from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local[*]").setAppName("MovieRecommendationsALS").set("spark.executor.memory", "2g")
sc = SparkContext(conf = conf)
def loadMovieNames():
movieNames = {}
for line in urllib2.urlopen("https://raw.githubusercontent.com/psumank/DATA643/master/WK5/ml-100k/u.item"):
fields = line.split('|')
movieNames[int(fields[0])] = fields[1].decode('ascii', 'ignore')
return movieNames
print "\nLoading movie names..."
nameDict = loadMovieNames()
print "\nLoading ratings data..."
data = sc.textFile("file:///C:/Users/p_sum/.ipynb_checkpoints/ml-100k/u.data")
ratings = data.map(lambda x: x.split()[2])
#action -- just to trigger the driver [ lazy evaluation ]
rating_results = ratings.countByValue()
sortedResults = collections.OrderedDict(sorted(rating_results.items()))
for key, value in sortedResults.iteritems():
print "%s %i" % (key, value)
ratPlot = plt.bar(range(len(sortedResults)), sortedResults.values(), align='center')
plt.xticks(range(len(sortedResults)), list(sortedResults.keys()))
ratPlot[3].set_color('g')
print "Ratings Histogram"
movies = data.map(lambda x: (int(x.split()[1]), 1))
movieCounts = movies.reduceByKey(lambda x, y: x + y)
flipped = movieCounts.map( lambda (x, y) : (y, x))
sortedMovies = flipped.sortByKey(False)
sortedMoviesWithNames = sortedMovies.map(lambda (count, movie) : (nameDict[movie], count))
results = sortedMoviesWithNames.collect()
subset = results[0:10]
popular_movieNm = [str(i[0]) for i in subset]
popularity_strength = [int(i[1]) for i in subset]
popMovplot = plt.barh(range(len(subset)), popularity_strength, align='center')
plt.yticks(range(len(subset)), popular_movieNm)
popMovplot[0].set_color('g')
print "Most Popular Movies from the Dataset"
ratingsRDD = data.map(lambda l: l.split()).map(lambda l: (int(l[0]), (int(l[1]), float(l[2]))))
ratingsRDD.takeOrdered(10, key = lambda x: x[0])
ratingsRDD.take(4)
# Movies rated by same user. ==> [ user ID ==> ( (movieID, rating), (movieID, rating)) ]
userJoinedRatings = ratingsRDD.join(ratingsRDD)
userJoinedRatings.takeOrdered(10, key = lambda x: x[0])
# Remove dups
def filterDups( (userID, ratings) ):
(movie1, rating1) = ratings[0]
(movie2, rating2) = ratings[1]
return movie1 < movie2
uniqueUserJoinedRatings = userJoinedRatings.filter(filterDups)
uniqueUserJoinedRatings.takeOrdered(10, key = lambda x: x[0])
# Now key by (movie1, movie2) pairs ==> (movie1, movie2) => (rating1, rating2)
def makeMovieRatingPairs((user, ratings)):
(movie1, rating1) = ratings[0]
(movie2, rating2) = ratings[1]
return ((movie1, movie2), (rating1, rating2))
moviePairs = uniqueUserJoinedRatings.map(makeMovieRatingPairs)
moviePairs.takeOrdered(10, key = lambda x: x[0])
#collect all ratings for each movie pair and compute similarity. (movie1, movie2) = > (rating1, rating2), (rating1, rating2) ...
moviePairRatings = moviePairs.groupByKey()
moviePairRatings.takeOrdered(10, key = lambda x: x[0])
#Compute Similarity
def cosineSimilarity(ratingPairs):
numPairs = 0
sum_xx = sum_yy = sum_xy = 0
for ratingX, ratingY in ratingPairs:
sum_xx += ratingX * ratingX
sum_yy += ratingY * ratingY
sum_xy += ratingX * ratingY
numPairs += 1
numerator = sum_xy
denominator = sqrt(sum_xx) * sqrt(sum_yy)
score = 0
if (denominator):
score = (numerator / (float(denominator)))
return (score, numPairs)
moviePairSimilarities = moviePairRatings.mapValues(cosineSimilarity).cache()
moviePairSimilarities.takeOrdered(10, key = lambda x: x[0])
scoreThreshold = 0.97
coOccurenceThreshold = 50
inputMovieID = 1 #Toy Story.
# Filter for movies with this sim that are "good" as defined by our quality thresholds.
filteredResults = moviePairSimilarities.filter(lambda((pair,sim)): \
(pair[0] == inputMovieID or pair[1] == inputMovieID) and sim[0] > scoreThreshold and sim[1] > coOccurenceThreshold)
#Top 10 by quality score.
results = filteredResults.map(lambda((pair,sim)): (sim, pair)).sortByKey(ascending = False).take(10)
print "Top 10 similar movies for " + nameDict[inputMovieID]
for result in results:
(sim, pair) = result
# Display the similarity result that isn't the movie we're looking at
similarMovieID = pair[0]
if (similarMovieID == inputMovieID):
similarMovieID = pair[1]
print nameDict[similarMovieID] + "\tscore: " + str(sim[0]) + "\tstrength: " + str(sim[1])
ratings = data.map(lambda l: l.split()).map(lambda l: Rating(int(l[0]), int(l[1]), float(l[2]))).cache()
ratings.take(3)
nratings = ratings.count()
nUsers = ratings.keys().distinct().count()
nMovies = ratings.values().distinct().count()
print "We have Got %d ratings from %d users on %d movies." % (nratings, nUsers, nMovies)
# Build the recommendation model using Alternating Least Squares
#Train a matrix factorization model given an RDD of ratings given by users to items, in the form of
#(userID, itemID, rating) pairs. We approximate the ratings matrix as the product of two lower-rank matrices
#of a given rank (number of features). To solve for these features, we run a given number of iterations of ALS.
#The level of parallelism is determined automatically based on the number of partitions in ratings.
start = time()
seed = 5L
iterations = 10
rank = 8
model = ALS.train(ratings, rank, iterations)
duration = time() - start
print "Model trained in %s seconds" % round(duration,3)
#Lets recommend movies for the user id - 2
userID = 2
print "\nTop 10 recommendations:"
recommendations = model.recommendProducts(userID, 10)
for recommendation in recommendations:
print nameDict[int(recommendation[1])] + \
" score " + str(recommendation[2])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare the pySpark Environment
Step2: Initialize Spark Context
Step3: Load and Analyse Data
Step4: Ratings Histogram
Step5: Most popular movies
Step6: Similar Movies
Step7: Lets find similar movies for Toy Story (Movie ID
Step8: Recommender using MLLib
Step9: Recommendations
|
549 | <ASSISTANT_TASK:>
Python Code:
## Import libraries necessary for monitor data processing. ##
from matplotlib import pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
from spins.invdes.problem_graph import log_tools
## Define filenames. ##
# `save_folder` is the full path to the directory containing the Pickle (.pkl) log files from the optimization.
save_folder = os.getcwd()
## Load the logged monitor data and monitor spec information. ##
# `df` is a pandas dataframe containing all the data loaded from the log Pickle (.pkl) files.
df = log_tools.create_log_data_frame(log_tools.load_all_logs(save_folder))
# `monitor_spec_filename` is the full path to the monitor spec yml file.
monitor_spec_filename = os.path.join(save_folder, "monitor_spec.yml")
# `monitor_descriptions` now contains the information from the monitor_spec.yml file. It follows the format of
# the schema found in `log_tools.monitor_spec`.
monitor_descriptions = log_tools.load_from_yml(monitor_spec_filename)
## Plot all monitor data and save into a pdf file in the project folder. ##
# `summary_out_name` is the full path to the pdf that will be generated containing plots of all the log data.
summary_out_name = os.path.join(save_folder, "summary.pdf")
# This command plots all the monitor data contained in the log files, saves it to the specified pdf file, and
# displays to the screen.
log_tools.plot_monitor_data(df, monitor_descriptions, summary_out_name)
## Print summary of scalar monitor values to screen during optimization without plotting. ##
# This command is useful to quickly view the current optimization state or
# if one is running an optimization somewhere where plotting to a screen is difficult.
log_tools.display_summary(df, monitor_descriptions)
## Get the iterations and data for a specific 1-dimensional scalar monitor (here, power vs iteration is demonstrated)
## for a specific overlap monitor.
# We call `get_joined_scalar_monitors` because we want the monitor data across all iterations rather than
# just the data for particular transformation or iteration number (contrast with `get_single_monitor_data` usage
# below).
joined_monitor_data = log_tools.get_joined_scalar_monitors(
df, "power1300", event_name="optimizing", scalar_operation="magnitude_squared")
# Now, the iteration numbers are stored in the list iterations and the overlap monitor power values are
# stored in the list data. - If desired, these lists can now be exported for plotting in a different program
# or can be plotted manually by the user in python, as demonstrated next.
iterations = joined_monitor_data.iterations
data = joined_monitor_data.data
## Manually plot the power versus iteration data we've retrieved for the monitor of interest. ##
plt.figure()
plt.plot(iterations, data)
plt.xlabel("Iterations")
plt.ylabel("Transmission")
plt.title("Transmission at 1300 nm")
## Get the data for a specific 2-dimensional field slice monitor. ##
# These functions get the monitor information for the monitor name specified above and return the data associated
# with the monitor name. Here we retrieve the last stored field. We can specify `transformation_name` and
# `iteration` to grab data from a particular transformation or iteration.
field_data = log_tools.get_single_monitor_data(df, "field1550")
# `field_data` is now an array with 3 entries, corresponding to the x-, y-, and z- components of the field,
# so we apply a utility function to get the magnitude of the vector.
field_mag = log_tools.process_field(field_data, vector_operation="magnitude")
## Manually plot this 2-dimensional field data. ##
plt.figure()
plt.imshow(np.squeeze(np.array(field_mag.T)), origin="lower")
plt.title("E-Field Magnitude at 1550 nm")
with open(os.path.join(save_folder, "step1.pkl"), "rb") as fp:
data = pickle.load(fp)
print("Log time: ", data["time"])
print("Transmission at 1300 nm: ", data["monitor_data"]["power1300"])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Option 1
Step2: Option 2
Step3: Option 3
|
550 | <ASSISTANT_TASK:>
Python Code:
import graphlab
products = graphlab.SFrame('amazon_baby_subset.gl/')
products.head()
products['sentiment']
products.head(10)['name']
print '# of positive reviews =', len(products[products['sentiment']==1])
print '# of negative reviews =', len(products[products['sentiment']==-1])
import json
with open('important_words.json', 'r') as f: # Reads the list of most frequent words
important_words = json.load(f)
important_words = [str(s) for s in important_words]
print important_words
def remove_punctuation(text):
import string
return text.translate(None, string.punctuation)
products['review_clean'] = products['review'].apply(remove_punctuation)
for word in important_words:
products[word] = products['review_clean'].apply(lambda s : s.split().count(word))
products['perfect']
def contains_important_word_count(word):
new_feature = "contains_" + word
products[new_feature] = products.apply(lambda x : x[word] >= 1)
word = 'perfect'
new_feature = "contains_" + word
contains_important_word_count(word)
print "Number of reviews containing word `" + word + "` = " + str(products[new_feature].sum())
import numpy as np
def get_numpy_data(data_sframe, features, label):
data_sframe['intercept'] = 1
features = ['intercept'] + features
features_sframe = data_sframe[features]
feature_matrix = features_sframe.to_numpy()
label_sarray = data_sframe[label]
label_array = label_sarray.to_numpy()
return(feature_matrix, label_array)
# Warning: This may take a few minutes...
feature_matrix, sentiment = get_numpy_data(products, important_words, 'sentiment')
feature_matrix.shape
len(important_words)
print "#features in feature_matrix = " + str(len(important_words) + 1)
sentiment
'''
produces probablistic estimate for P(y_i = +1 | x_i, w).
estimate ranges between 0 and 1.
'''
import math
sigmoid = lambda x: 1 / (1 + math.exp(-x))
def predict_probability(feature_matrix, coefficients):
# Take dot product of feature_matrix and coefficients
dot_product = np.dot(feature_matrix, coefficients)
# Compute P(y_i = +1 | x_i, w) using the link function
predictions = []
for dpi in dot_product:
predictions.append(sigmoid(dpi))
# return predictions
return predictions
dummy_feature_matrix = np.array([[1.,2.,3.], [1.,-1.,-1]])
dummy_coefficients = np.array([1., 3., -1.])
correct_scores = np.array( [ 1.*1. + 2.*3. + 3.*(-1.), 1.*1. + (-1.)*3. + (-1.)*(-1.) ] )
correct_predictions = np.array( [ 1./(1+np.exp(-correct_scores[0])), 1./(1+np.exp(-correct_scores[1])) ] )
print 'The following outputs must match '
print '------------------------------------------------'
print 'correct_predictions =', correct_predictions
print 'output of predict_probability =', predict_probability(dummy_feature_matrix, dummy_coefficients)
def feature_derivative(errors, feature):
# Compute the dot product of errors and feature
derivative = np.dot(errors, feature)
# Return the derivative
return derivative
def compute_log_likelihood(feature_matrix, sentiment, coefficients):
indicator = (sentiment==+1)
scores = np.dot(feature_matrix, coefficients)
logexp = np.log(1. + np.exp(-scores))
# Simple check to prevent overflow
mask = np.isinf(logexp)
logexp[mask] = -scores[mask]
lp = np.sum((indicator-1)*scores - logexp)
return lp
dummy_feature_matrix = np.array([[1.,2.,3.], [1.,-1.,-1]])
dummy_coefficients = np.array([1., 3., -1.])
dummy_sentiment = np.array([-1, 1])
correct_indicators = np.array( [ -1==+1, 1==+1 ] )
correct_scores = np.array( [ 1.*1. + 2.*3. + 3.*(-1.), 1.*1. + (-1.)*3. + (-1.)*(-1.) ] )
correct_first_term = np.array( [ (correct_indicators[0]-1)*correct_scores[0], (correct_indicators[1]-1)*correct_scores[1] ] )
correct_second_term = np.array( [ np.log(1. + np.exp(-correct_scores[0])), np.log(1. + np.exp(-correct_scores[1])) ] )
correct_ll = sum( [ correct_first_term[0]-correct_second_term[0], correct_first_term[1]-correct_second_term[1] ] )
print 'The following outputs must match '
print '------------------------------------------------'
print 'correct_log_likelihood =', correct_ll
print 'output of compute_log_likelihood =', compute_log_likelihood(dummy_feature_matrix, dummy_sentiment, dummy_coefficients)
from math import sqrt
def logistic_regression(feature_matrix, sentiment, initial_coefficients, step_size, max_iter):
coefficients = np.array(initial_coefficients) # make sure it's a numpy array
for itr in xrange(max_iter):
# Predict P(y_i = +1|x_i,w) using your predict_probability() function
predictions = predict_probability(feature_matrix, coefficients)
# Compute indicator value for (y_i = +1)
indicator = (sentiment==+1)
# Compute the errors as indicator - predictions
errors = indicator - predictions
for j in xrange(len(coefficients)): # loop over each coefficient
# Recall that feature_matrix[:,j] is the feature column associated with coefficients[j].
# Compute the derivative for coefficients[j]. Save it in a variable called derivative
derivative = feature_derivative(errors, feature_matrix[:,j])
# add the step size times the derivative to the current coefficient
coefficients[j] += step_size * derivative
# Checking whether log likelihood is increasing
if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \
or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:
lp = compute_log_likelihood(feature_matrix, sentiment, coefficients)
print 'iteration %*d: log likelihood of observed labels = %.8f' % \
(int(np.ceil(np.log10(max_iter))), itr, lp)
return coefficients
coefficients = logistic_regression(feature_matrix, sentiment, initial_coefficients=np.zeros(194),
step_size=1e-7, max_iter=301)
print "increases"
# Compute the scores as a dot product between feature_matrix and coefficients.
scores = np.dot(feature_matrix, coefficients)
class_predictions = []
class_predictor = lambda x : 1 if x > 0 else -1
for score in scores:
class_predictions.append(class_predictor(score))
class_predictions
print "#reviews with predicted positive sentiment = " + str(len([x for x in class_predictions if x == 1]))
t = 0
for p in class_predictions:
if p == 1:
t += 1
print t
num_mistakes = 0
for i in xrange(len(sentiment)):
if sentiment[i] != class_predictions[i]:
num_mistakes += 1
accuracy = 1 - float(num_mistakes) / len(sentiment)
print "-----------------------------------------------------"
print '# Reviews correctly classified =', len(products) - num_mistakes
print '# Reviews incorrectly classified =', num_mistakes
print '# Reviews total =', len(products)
print "-----------------------------------------------------"
print 'Accuracy = %.2f' % accuracy
coefficients = list(coefficients[1:]) # exclude intercept
word_coefficient_tuples = [(word, coefficient) for word, coefficient in zip(important_words, coefficients)]
word_coefficient_tuples = sorted(word_coefficient_tuples, key=lambda x:x[1], reverse=True)
word_coefficient_tuples[:10]
word_coefficient_tuples[-10:]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load review dataset
Step2: One column of this dataset is 'sentiment', corresponding to the class label with +1 indicating a review with positive sentiment and -1 indicating one with negative sentiment.
Step3: Let us quickly explore more of this dataset. The 'name' column indicates the name of the product. Here we list the first 10 products in the dataset. We then count the number of positive and negative reviews.
Step4: Note
Step5: Now, we will perform 2 simple data transformations
Step6: Now we proceed with Step 2. For each word in important_words, we compute a count for the number of times the word occurs in the review. We will store this count in a separate column (one for each word). The result of this feature processing is a single column for each word in important_words which keeps a count of the number of times the respective word occurs in the review text.
Step7: The SFrame products now contains one column for each of the 193 important_words. As an example, the column perfect contains a count of the number of times the word perfect occurs in each of the reviews.
Step8: Now, write some code to compute the number of product reviews that contain the word perfect.
Step9: Quiz Question. How many reviews contain the word perfect?
Step10: We now provide you with a function that extracts columns from an SFrame and converts them into a NumPy array. Two arrays are returned
Step11: Let us convert the data into NumPy arrays.
Step12: Are you running this notebook on an Amazon EC2 t2.micro instance? (If you are using your own machine, please skip this section)
Step13: Quiz Question
Step14: Now, let us see what the sentiment column looks like
Step15: Estimating conditional probability with link function
Step16: Aside. How the link function works with matrix algebra
Step17: Compute derivative of log likelihood with respect to a single coefficient
Step18: In the main lecture, our focus was on the likelihood. In the advanced optional video, however, we introduced a transformation of this likelihood---called the log likelihood---that simplifies the derivation of the gradient and is more numerically stable. Due to its numerical stability, we will use the log likelihood instead of the likelihood to assess the algorithm.
Step19: Checkpoint
Step20: Taking gradient steps
Step21: Now, let us run the logistic regression solver.
Step22: Quiz question
Step23: Predicting sentiments
Step24: Now, complete the following code block for Step 2 to compute the class predictions using the scores obtained above
Step25: Quiz question
Step26: Measuring accuracy
Step27: Quiz question
Step28: Now, word_coefficient_tuples contains a sorted list of (word, coefficient_value) tuples. The first 10 elements in this list correspond to the words that are most positive.
Step29: Quiz question
|
551 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
Gu = nx.Graph()
for i, j in [(1, 2), (1, 4), (4, 2), (4, 3)]:
Gu.add_edge(i,j)
nx.draw(Gu, with_labels = True)
import networkx as nx
Gd = nx.DiGraph()
for i, j in [(1, 2), (1, 4), (4, 2), (4, 3)]:
Gd.add_edge(i,j)
nx.draw(Gd, with_labels = True)
nx.draw(Gu, with_labels = True)
nx.draw(Gd, with_labels = True)
import numpy as np
x = [1, 1, 1, 2, 2, 3]
np.mean(x), np.sum(x), np.std(x)
plt.hist(x)
plt.show()
from collections import defaultdict, Counter
freq = defaultdict(int)
for i in x:
freq[i] +=1
freq
freq_sum = np.sum(freq.values())
freq_sum
px = [float(i)/freq_sum for i in freq.values()]
px
plt.plot(freq.keys(), px, 'r-o')
plt.show()
plt.figure(1)
plt.subplot(121)
pos = nx.spring_layout(Gu) #定义一个布局,此处采用了spring布局方式
nx.draw(Gu, pos, with_labels = True)
plt.subplot(122)
nx.draw(Gd, pos, with_labels = True)
G1 = nx.complete_graph(4)
pos = nx.spring_layout(G1) #定义一个布局,此处采用了spring布局方式
nx.draw(G1, pos = pos, with_labels = True)
print(nx.transitivity(G1))
G2 = nx.Graph()
for i, j in [(1, 2), (1, 3), (1, 0), (3, 0)]:
G2.add_edge(i,j)
nx.draw(G2,pos = pos, with_labels = True)
print(nx.transitivity(G2))
G3 = nx.Graph()
for i, j in [(1, 2), (1, 3), (1, 0)]:
G3.add_edge(i,j)
nx.draw(G3, pos =pos, with_labels = True)
print(nx.transitivity(G3))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Directed
Step2: <img src = './img/networks.png' width = 1000>
Step3: Undirected network
Step4: Directed network
Step5: For a sample of N values
Step6: Average Degree
Step7: Undirected
|
552 | <ASSISTANT_TASK:>
Python Code:
choice = raw_input("Choose option 1, 2, or 3: ") #prompts user to input something on the command line, saves it in a variable. see below!
if (choice == "1"):
print "You have chosen option 1: cake"
elif (choice == "2"):
print "You have chosen option 2: ice cream"
elif (choice == "3"):
print "You have chosen option 3: broccoli"
else:
print "Invalid input."
test = raw_input("What is 1+1? ") #prompts user to input something on the command line, saves it in a variable. see below!
if (test == "2"):
print "Correct!"
test2 = raw_input("What is 2314*32626? ")
if (test2 == "75496564"):
print "Correct! You passed all my tests!"
else:
print "Sorry, that's wrong."
else:
print "Sorry, that's wrong.
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Nested if/else
|
553 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
import pandas as pd
def make_data(N, f=0.3, rseed=1087):
rand = np.random.RandomState(rseed)
x = rand.randn(N)
x[int(f*N):] += 5
return x
x = make_data(1000)
hist = plt.hist(x, bins=30, normed=True)
density, bins, patches = hist
widths = bins[1:] - bins[:-1]
(density * widths).sum()
x = make_data(20)
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(1, 2, figsize=(12, 4), sharex=True, sharey=True,
subplot_kw = {'xlim': (-4, 9), 'ylim': (-0.02, 0.3)})
fig.subplots_adjust(wspace=0.05)
for i, offset in enumerate([0.0, 0.6]):
ax[i].hist(x, bins=bins+offset, normed=True)
ax[i].plot(x, np.full_like(x, -0.01), '|k', markeredgewidth=1)
fig, ax = plt.subplots()
bins = np.arange(-3, 8)
ax.plot(x, np.full_like(x, -0.1), '|k',
markeredgewidth=1)
for count, edge in zip(*np.histogram(x, bins)):
for i in range(count):
ax.add_patch(plt.Rectangle((edge, i), 1, 1,
alpha=0.5))
ax.set_xlim(-4, 8)
ax.set_ylim(-0.2, 8)
x_d = np.linspace(-4, 8, 2000)
density = sum((abs(xi - x_d) < 0.5) for xi in x)
plt.fill_between(x_d, density, alpha=0.5)
plt.plot(x, np.full_like(x, -0.1), '|k', markeredgewidth=1)
plt.axis([-4, 8, -0.2, 8]);
x_d[:50]
from scipy.stats import norm
x_d = np.linspace(-4, 8, 1000)
density = sum(norm(xi).pdf(x_d) for xi in x)
plt.fill_between(x_d, density, alpha=0.5)
plt.plot(x, np.full_like(x, -0.1), '|k', markeredgewidth=1)
plt.axis([-4, 8, -0.2, 5]);
from sklearn.neighbors import KernelDensity
# instantiate and fit the KDE model
kde = KernelDensity(bandwidth=1.0, kernel='gaussian')
kde.fit(x[:, None])
# score samples returns the log of the probability density
logprob = kde.score_samples(x_d[:, None])
plt.fill_between(x_d, np.exp(logprob), alpha=0.5)
plt.plot(x, np.full_like(x, -0.01), '|k', markeredgewidth=1)
plt.ylim(-0.02, 0.30);
KernelDensity().get_params().keys()
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import LeaveOneOut
bandwidths = 10 ** np.linspace(-1, 1, 100)
grid = GridSearchCV(KernelDensity(kernel='gaussian'), {'bandwidth': bandwidths}, cv=LeaveOneOut(len(x)))
grid.fit(x[:, None])
grid.best_params_
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Motivation for KDE - Histograms
Step2: Standard count-based histogram can be viewed from the plt.hist() function. normed parameter of this function makes the heights of the bars to reflect probability density
Step3: This histogram is equal binned, hence this normalization simply changes the scale on the y-axis, keeping the shape of the histogram constant. normed keeps the total area under the histogram to be 1, as we can confirm below
Step4: One problem with histogram as a density estimator is that the choice of bin size and location can lead to representations that have qualitatively different features.
Step5: We can think of histogram as a stack of blocks, where we stack one block within each bins on top of each point in the dataset. Let's view this in the following chart
Step6: The effects fo two binnings comes from the fact that the height of the block stack often reflects not on the actual density of points neaby, but on coincidences of how the bins align with the data points. This mis-alignment between points and their blocks is a potential cause of the poor histogram results.
Step7: Rough edges are not aesthetically pleasing, nor are they reflecting of any true properties of the data. In order to smooth them out, we might decide to replace the blocks at each location with a smooth function, like a Gaussian. Let's use a standard normal curve at each point instead of a block
Step8: This smoothed-out plot, with a Gaussian distribution contributed at the location of each input point, gives a much more accurate idea of the shape of the data distribution, and one which has much less variance (i.e., changes much less in response to differences in sampling).
Step9: Selecting the bandwidth via cross-validation
|
554 | <ASSISTANT_TASK:>
Python Code:
def findSum(n , a , b ) :
sum = 0
for i in range(0 , n , 1 ) :
if(i % a == 0 or i % b == 0 ) :
sum += i
return sum
if __name__== ' __main __' :
n = 10
a = 3
b = 5
print(findSum(n , a , b ) )
<END_TASK>
| <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
555 | <ASSISTANT_TASK:>
Python Code:
from systemml import MLContext, dml, jvm_stdout
ml = MLContext(sc)
print (ml.buildTime())
prog =
holdOut = 1/3
kFolds = 1/holdOut
nRows = 6; nCols = 3;
X = matrix(seq(1, nRows * nCols), rows = nRows, cols = nCols) # X data
y = matrix(seq(1, nRows), rows = nRows, cols = 1) # y label data
Xy = cbind (X,y) # Xy Data for CV
sv = rand (rows = nRows, cols = 1, min = 0.0, max = 1.0, pdf = "uniform") # sv selection vector for fold creation
sv = (order(target=sv, by=1, index.return=TRUE)) %% kFolds + 1 # with numbers between 1 .. kFolds
stats = matrix(0, rows=kFolds, cols=1) # stats per kFolds model on test data
parfor (i in 1:kFolds)
{
# Skip empty training data or test data.
if ( sum (sv == i) > 0 & sum (sv == i) < nrow(X) )
{
Xyi = removeEmpty(target = Xy, margin = "rows", select = (sv == i)) # Xyi fold, i.e. 1/k of rows (test data)
Xyni = removeEmpty(target = Xy, margin = "rows", select = (sv != i)) # Xyni data, i.e. (k-1)/k of rows (train data)
# Skip extreme label inbalance
distinctLabels = aggregate( target = Xyni[,1], groups = Xyni[,1], fn = "count")
if ( nrow(distinctLabels) > 1)
{
wi = trainAlg (Xyni[ ,1:ncol(Xy)-1], Xyni[ ,ncol(Xy)]) # wi Model for i-th training data
pi = testAlg (Xyi [ ,1:ncol(Xy)-1], wi) # pi Prediction for i-th test data
ei = evalPrediction (pi, Xyi[ ,ncol(Xy)]) # stats[i,] evaluation of prediction of i-th fold
stats[i,] = ei
print ( "Test data Xyi" + i + "\n" + toString(Xyi)
+ "\nTrain data Xyni" + i + "\n" + toString(Xyni)
+ "\nw" + i + "\n" + toString(wi)
+ "\nstats" + i + "\n" + toString(stats[i,])
+ "\n")
}
else
{
print ("Training data for fold " + i + " has only " + nrow(distinctLabels) + " distinct labels. Needs to be > 1.")
}
}
else
{
print ("Training data or test data for fold " + i + " is empty. Fold not validated.")
}
}
print ("SV selection vector:\n" + toString(sv))
trainAlg = function (matrix[double] X, matrix[double] y)
return (matrix[double] w)
{
w = t(X) %*% y
}
testAlg = function (matrix[double] X, matrix[double] w)
return (matrix[double] p)
{
p = X %*% w
}
evalPrediction = function (matrix[double] p, matrix[double] y)
return (matrix[double] e)
{
e = as.matrix(sum (p - y))
}
with jvm_stdout(True):
ml.execute(dml(prog))
prog =
M1 = matrix ('1 1 2 3 3 3 4 4 5 3 6 4 7 1 8 2 9 1', rows = 9, cols = 2)
M2 = matrix ('1 1 2 8 3 3 4 3 5 1', rows = 5, cols = 2)
I = rowSums (outer (M1[,2], t(M2[,2]), "==")) # I : indicator matrix for M1
M12 = removeEmpty (target = M1, margin = "rows", select = I) # apply filter to retrieve join result
print ("M1 \n" + toString(M1))
print ("M2 \n" + toString(M2))
print ("M1[,2] joined with M2[,2], and return matching M1 rows\n" + toString(M12))
with jvm_stdout():
ml.execute(dml(prog))
prog =
MinFreq = 3 # minimum frequency of tokens
M = matrix ('1 1 2 3 3 3 4 4 5 3 6 4 7 1 8 2 9 1', rows = 9, cols = 2)
gM = aggregate (target = M[,2], groups = M[,2], fn = "count") # gM: group by and count (grouped matrix)
gv = cbind (seq(1,nrow(gM)), gM) # gv: add group values to counts (group values)
fg = removeEmpty (target = gv * (gv[,2] >= MinFreq), margin = "rows") # fg: filtered groups
I = rowSums (outer (M[,2] ,t(fg[,1]), "==")) # I : indicator of size M with filtered groups
fM = removeEmpty (target = M, margin = "rows", select = I) # FM: filter matrix
print (toString(M))
print (toString(fM))
with jvm_stdout():
ml.execute(dml(prog))
prog =
I = matrix ("1 3 3 4 5", rows = 5, cols = 1)
J = matrix ("2 3 4 1 6", rows = 5, cols = 1)
V = matrix ("10 20 30 40 50", rows = 5, cols = 1)
M = table (I, J, V)
print (toString (M))
ml.execute(dml(prog).output('M')).get('M').toNumPy()
prog =
X = matrix ("1 2 3 3 3 4 5 10", rows = 8, cols = 1)
I = rbind (matrix (1,1,1), (X[1:nrow (X)-1,] != X[2:nrow (X),])); # compare current with next value
res = removeEmpty (target = X, margin = "rows", select = I); # select where different
ml.execute(dml(prog).output('res')).get('res').toNumPy()
prog =
X = matrix ("3 2 1 3 3 4 5 10", rows = 8, cols = 1)
I = aggregate (target = X, groups = X[,1], fn = "count") # group and count duplicates
res = removeEmpty (target = seq (1, max (X[,1])), margin = "rows", select = (I != 0)); # select groups
ml.execute(dml(prog).output('res')).get('res').toNumPy()
prog =
X = matrix ("3 2 1 3 3 4 5 10", rows = 8, cols = 1)
X = order (target = X, by = 1) # order values
I = rbind (matrix (1,1,1), (X[1:nrow (X)-1,] != X[2:nrow (X),]));
res = removeEmpty (target = X, margin = "rows", select = I);
ml.execute(dml(prog).output('res')).get('res').toNumPy()
prog =
X = matrix (1, rows = 1, cols = 100)
J = matrix ("10 20 25 26 28 31 50 67 79", rows = 1, cols = 9)
res = X + table (matrix (1, rows = 1, cols = ncol (J)), J, 10)
print (toString (res))
ml.execute(dml(prog).output('res')).get('res').toNumPy()
prog =
C = matrix ('50 40 20 10 30 20 40 20 30', rows = 9, cols = 1) # category data
V = matrix ('20 11 49 33 94 29 48 74 57', rows = 9, cols = 1) # value data
PCV = cbind (cbind (seq (1, nrow (C), 1), C), V); # PCV representation
PCV = order (target = PCV, by = 3, decreasing = TRUE, index.return = FALSE);
PCV = order (target = PCV, by = 2, decreasing = FALSE, index.return = FALSE);
# Find all rows of PCV where the category has a new value, in comparison to the previous row
is_new_C = matrix (1, rows = 1, cols = 1);
if (nrow (C) > 1) {
is_new_C = rbind (is_new_C, (PCV [1:nrow(C) - 1, 2] < PCV [2:nrow(C), 2]));
}
# Associate each category with its index
index_C = cumsum (is_new_C); # cumsum
# For each category, compute:
# - the list of distinct categories
# - the maximum value for each category
# - 0-1 aggregation matrix that adds records of the same category
distinct_C = removeEmpty (target = PCV [, 2], margin = "rows", select = is_new_C);
max_V_per_C = removeEmpty (target = PCV [, 3], margin = "rows", select = is_new_C);
C_indicator = table (index_C, PCV [, 1], max (index_C), nrow (C)); # table
sum_V_per_C = C_indicator %*% V
res = ml.execute(dml(prog).output('PCV','distinct_C', 'max_V_per_C', 'C_indicator', 'sum_V_per_C'))
print (res.get('PCV').toNumPy())
print (res.get('distinct_C').toNumPy())
print (res.get('max_V_per_C').toNumPy())
print (res.get('C_indicator').toNumPy())
print (res.get('sum_V_per_C').toNumPy())
cumsum_prod_def =
cumsum_prod = function (Matrix[double] X, Matrix[double] C, double start)
return (Matrix[double] Y)
# Computes the following recurrence in log-number of steps:
# Y [1, ] = X [1, ] + C [1, ] * start;
# Y [i+1, ] = X [i+1, ] + C [i+1, ] * Y [i, ]
{
Y = X; P = C; m = nrow(X); k = 1;
Y [1,] = Y [1,] + C [1,] * start;
while (k < m) {
Y [k + 1:m,] = Y [k + 1:m,] + Y [1:m - k,] * P [k + 1:m,];
P [k + 1:m,] = P [1:m - k,] * P [k + 1:m,];
k = 2 * k;
}
}
prog = cumsum_prod_def +
X = matrix ("1 2 3 4 5 6 7 8 9", rows = 9, cols = 1);
#Zeros in C cause "breaks" that restart the cumulative summation from 0
C = matrix ("0 1 1 0 1 1 1 0 1", rows = 9, cols = 1);
Y = cumsum_prod (X, C, 0);
print (toString(Y))
with jvm_stdout():
ml.execute(dml(prog))
prog = cumsum_prod_def +
X = matrix ("1 2 3 4 5 6 7 8 9", rows = 9, cols = 1);
# Ones in S represent selected rows to be copied, zeros represent non-selected rows
S = matrix ("1 0 0 1 0 0 0 1 0", rows = 9, cols = 1);
Y = cumsum_prod (X * S, 1 - S, 0);
print (toString(Y))
with jvm_stdout():
ml.execute(dml(prog))
cumsum_prod_naive_def =
cumsum_prod_naive = function (Matrix[double] X, Matrix[double] C, double start)
return (Matrix[double] Y)
{
Y = matrix (0, rows = nrow(X), cols = ncol(X));
Y [1,] = X [1,] + C [1,] * start;
for (i in 2:nrow(X))
{
Y [i,] = X [i,] + C [i,] * Y [i - 1,]
}
}
prog = cumsum_prod_def + cumsum_prod_naive_def +
X = rand (rows = 20000, cols = 10, min = 0, max = 1, pdf = "uniform", sparsity = 1.0);
C = rand (rows = 20000, cols = 10, min = 0, max = 1, pdf = "uniform", sparsity = 1.0);
Y1 = cumsum_prod_naive (X, C, 0.123);
with jvm_stdout():
ml.execute(dml(prog))
prog = cumsum_prod_def + cumsum_prod_naive_def +
X = rand (rows = 20000, cols = 10, min = 0, max = 1, pdf = "uniform", sparsity = 1.0);
C = rand (rows = 20000, cols = 10, min = 0, max = 1, pdf = "uniform", sparsity = 1.0);
Y2 = cumsum_prod (X, C, 0.123);
with jvm_stdout():
ml.execute(dml(prog))
invert_lower_triangular_def =
invert_lower_triangular = function (Matrix[double] LI)
return (Matrix[double] LO)
{
n = nrow (LI);
LO = matrix (0, rows = n, cols = n);
LO = LO + diag (1 / diag (LI));
k = 1;
while (k < n)
{
LPF = matrix (0, rows = n, cols = n);
parfor (p in 0:((n - 1) / (2 * k)), check = 0)
{
i = 2 * k * p;
j = i + k;
q = min (n, j + k);
if (j + 1 <= q) {
L1 = LO [i + 1:j, i + 1:j];
L2 = LI [j + 1:q, i + 1:j];
L3 = LO [j + 1:q, j + 1:q];
LPF [j + 1:q, i + 1:j] = -L3 %*% L2 %*% L1;
}
}
LO = LO + LPF;
k = 2 * k;
}
}
prog = invert_lower_triangular_def +
n = 1000;
A = rand (rows = n, cols = n, min = -1, max = 1, pdf = "uniform", sparsity = 1.0);
Mask = cumsum (diag (matrix (1, rows = n, cols = 1)));
L = (A %*% t(A)) * Mask; # Generate L for stability of the inverse
X = invert_lower_triangular (L);
print ("Maximum difference between X %*% L and Identity = " + max (abs (X %*% L - diag (matrix (1, rows = n, cols = 1)))));
with jvm_stdout():
ml.execute(dml(prog))
invert_lower_triangular_naive_def =
invert_lower_triangular_naive = function (Matrix[double] LI)
return (Matrix[double] LO)
{
n = nrow (LI);
LO = diag (matrix (1, rows = n, cols = 1));
for (i in 1:n - 1)
{
LO [i,] = LO [i,] / LI [i, i];
LO [i + 1:n,] = LO [i + 1:n,] - LI [i + 1:n, i] %*% LO [i,];
}
LO [n,] = LO [n,] / LI [n, n];
}
prog = invert_lower_triangular_naive_def +
n = 1000;
A = rand (rows = n, cols = n, min = -1, max = 1, pdf = "uniform", sparsity = 1.0);
Mask = cumsum (diag (matrix (1, rows = n, cols = 1)));
L = (A %*% t(A)) * Mask; # Generate L for stability of the inverse
X = invert_lower_triangular_naive (L);
print ("Maximum difference between X %*% L and Identity = " + max (abs (X %*% L - diag (matrix (1, rows = n, cols = 1)))));
with jvm_stdout():
ml.execute(dml(prog))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Cross Validation<a id="CrossValidation" />
Step4: Value-based join of two Matrices<a id="JoinMatrices"/>
Step6: Filter Matrix to include only Frequent Column Values <a id="FilterMatrix"/>
Step8: Construct (sparse) Matrix from (rowIndex, colIndex, values) triplets<a id="Construct_sparse_Matrix"></a>
Step10: Find and remove duplicates in columns or rows<a id="Find_and_remove_duplicates"></a>
Step12: No assumptions on values.
Step14: Order the values and then remove duplicates.
Step16: Set based Indexing<a id="Set_based_Indexing"></a>
Step18: Group by Aggregate using Linear Algebra<a id="Multi_column_Sorting"></a>
Step20: Cumulative Summation with Decay Multiplier<a id="CumSum_Product"></a>
Step22: In this example we use cumsum_prod for cumulative summation with "breaks", that is, multiple cumulative summations in one.
Step24: In this example, we copy selected rows downward to all consecutive non-selected rows.
Step26: This is a naive implementation of cumulative summation with decay multiplier.
Step29: There is a significant performance difference between the <b>naive</b> implementation and the <b>tricky</b> implementation.
Step32: Invert Lower Triangular Matrix<a id="Invert_Lower_Triangular_Matrix"></a>
Step34: This is a naive implementation of inverting a lower triangular matrix.
Step36: The naive implementation is significantly slower than the divide-and-conquer implementation.
|
556 | <ASSISTANT_TASK:>
Python Code:
import datetime
print( "packages imported at " + str( datetime.datetime.now() ) )
%pwd
%ls
%run ../django_init.py
from context_text.models import Article
# how many articles in "grp_month"?
article_qs = Article.objects.filter( tags__name__in = [ "grp_month" ] )
grp_month_count = article_qs.count()
print( "grp_month count = {}".format( grp_month_count ) )
%run ../config-coder_index-prelim_month.py
# output debug JSON to file
my_reliability_instance.debug_output_json_file_path = "/home/jonathanmorgan/" + label + ".json"
#===============================================================================
# process
#===============================================================================
# process articles
#my_reliability_instance.process_articles( tag_list )
# output to database.
#my_reliability_instance.output_reliability_data( label )
print( "reliability data created at " + str( datetime.datetime.now() ) )
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup - virtualenv jupyter kernel
Step2: Data characterization
Step3: Reliability data creation - prelim_month
Step4: Example snapshot of configuration in this file
|
557 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import pymc3 as pm
from pymc3.gp.util import plot_gp_dist
import theano.tensor as tt
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('dark')
seasonal_pitch_raw = pd.read_csv('../private_data/seasonal_pitch_data.csv', encoding='utf-8')
seasonal_pitch_raw.head()
colnames = seasonal_pitch_raw.columns.copy()
seasonal_pitch_raw.iloc[:, 5] = seasonal_pitch_raw.iloc[:, 5] + seasonal_pitch_raw.iloc[:, 6]
seasonal_pitch_raw.iloc[:, 1] = seasonal_pitch_raw.iloc[:, 1] + seasonal_pitch_raw.iloc[:, 2]
seasonal_pitch = (seasonal_pitch_raw.drop(colnames[[2, 6]], axis=1)
.reset_index())
seasonal_pitch.columns = colnames
seasonal_pitch['pi_pitch_type'] = seasonal_pitch.pi_pitch_type.str.upper()
seasonal_pitch['date'] = pd.to_datetime(seasonal_pitch.date)
seasonal_pitch.head()
bad_pitches = ~seasonal_pitch.pi_pitch_type.isin(['KN', 'IB', 'XX'])
data_subset = seasonal_pitch[bad_pitches].copy()
data_subset['month'] = data_subset.date.dt.month
data_subset['week'] = data_subset.date.dt.week
data_subset['dayofyear'] = data_subset.date.dt.dayofyear
data_subset.groupby(['pitcher', 'pi_pitch_type']).lw.sum().sort_values()
kluber_pitches = (data_subset.loc[data_subset.pitcherid==446372,
['pi_pitch_type', 'month', 'dayofyear', 'lw']]
.sort_values(by='lw'))
kluber_pitches.head()
kluber_pitches[kluber_pitches.pi_pitch_type=='FC'].lw.sum()
kluber_month_sum = kluber_pitches.groupby(['pi_pitch_type', 'month']).lw.sum().reset_index()
g = sns.factorplot(data=kluber_month_sum, col="pi_pitch_type", x="month", y="lw",
col_wrap=3);
kluber_game_sum = (kluber_pitches.groupby(['pi_pitch_type', 'dayofyear']).lw
.sum().reset_index())
g = sns.factorplot(data=kluber_game_sum, col="pi_pitch_type", x="dayofyear", y="lw",
col_wrap=3)
g.set_xticklabels(rotation=90);
g = sns.factorplot(data=kluber_pitches, col="pi_pitch_type", x="dayofyear", y="lw",
col_wrap=3)
g.set_xticklabels(rotation=90);
PITCH = 'SL'
day_min = kluber_pitches.dayofyear - kluber_pitches.dayofyear.min()
day_kluber_fc, lw_kluber_fc = (kluber_pitches.assign(day=day_min)
.loc[kluber_pitches.pi_pitch_type==PITCH, ['day', 'lw']].T.values)
X = day_kluber_fc.reshape(-1,1)
y = lw_kluber_fc
ls = 0.1
tau = 0.5
cov = tau * pm.gp.cov.Matern32(1, ls)
X_vals = np.linspace(0, 2, 200)[:,None]
K = cov(X_vals).eval()
plt.figure(figsize=(14,4))
plt.plot(X_vals, pm.MvNormal.dist(mu=np.zeros(K.shape[0]), cov=K).random(size=3).T);
plt.xlabel("X");
with pm.Model() as kluber_model:
# Specify covariance function
ℓ = pm.Exponential("ℓ", 0.1)
η = pm.HalfCauchy("η", 1)
cov = η**2 * pm.gp.cov.Matern32(1, ℓ)
# Define marginal GP
gp = pm.gp.Marginal(cov_func=cov)
# Noise parameter
σ = pm.Uniform("σ", 0, 0.3)
# Pass data to marginal likelihood
ml = gp.marginal_likelihood("ml", X=X, y=y, noise=σ)
mp = pm.find_MAP()
mp['σ']
# new values from April through September
X_new = np.linspace(0, 180, 500)[:,None]
# add the GP conditional to the model, given the new X values
with kluber_model:
f_pred = gp.conditional("f_pred", X_new)
with kluber_model:
pred_samples = pm.sample_ppc([mp], vars=[f_pred], samples=1000)
# plot the results
fig, axes = plt.subplots(figsize=(12,5), sharex=True)
scale = 100
# plot the samples from the gp posterior with samples and shading
plot_gp_dist(axes, pred_samples["f_pred"]*scale, X_new, palette="bone_r");
# plot the data alongside the esitmates
axes.plot(X, y*scale, 'ok', ms=3, alpha=0.1, label="Observed pitch");
axes.set_ylim(-0.1*scale, 0.1*scale)
axes.set_title("Corey Kluber {}".format(PITCH))
axes.set_ylabel("Linear weight")
mean_lw = (kluber_pitches[kluber_pitches.pi_pitch_type==PITCH].groupby('dayofyear')
.lw.mean()*scale)
mean_lw.index = mean_lw.index - mean_lw.index.min()
mean_lw.plot(ax=axes, style=':', label='Empirical mean')
# axis labels and title
plt.xlabel("Day")
plt.legend()
pred_samples['f_pred'][:, 150:].mean()
np.percentile(pred_samples['f_pred'][:, 150:], [2.5, 97.5])
player_lookup = dict(data_subset[['pitcherid', 'pitcher']].drop_duplicates().values)
def predict_weights(player_id, pitch):
player_pitches = (data_subset.loc[(data_subset.pitcherid==player_id) & (data_subset.pi_pitch_type==pitch),
['dayofyear', 'lw']]
.sort_values(by='lw'))
day_min = player_pitches.dayofyear - player_pitches.dayofyear.min()
day, lw = (player_pitches.assign(day=day_min)[['day', 'lw']].T.values)
X = day.reshape(-1,1)
y = lw
with pm.Model():
# Short-term variation
η_short = pm.HalfCauchy("η_short", beta=0.5, testval=0.1)
ℓ_short = pm.Gamma("ℓ_short", alpha=1, beta=0.75)
cov_short = η_short**2 * pm.gp.cov.Matern32(1, ℓ_short)
gp_short = pm.gp.Marginal(cov_func=cov_short)
# long term trend (1-2 month scale)
η_trend = pm.HalfCauchy("η_trend", beta=2, testval=2)
ℓ_trend = pm.Gamma("ℓ_trend", alpha=20, beta=0.5)
cov_trend = η_trend**2 * pm.gp.cov.ExpQuad(1, ℓ_trend)
gp_trend = pm.gp.Marginal(cov_func=cov_trend)
# Define marginal GP
gp = gp_trend + gp_short
# Noise parameter
σ = pm.Exponential("σ", 10)
cov_noise = pm.gp.cov.WhiteNoise(σ)
# Pass data to marginal likelihood
ml = gp.marginal_likelihood("ml", X=X, y=y, noise=cov_noise)
mp = pm.find_MAP()
X_new = np.linspace(0, 180, 500)[:,None]
f_pred = gp.conditional("f_pred", X_new)
pred_samples = pm.sample_ppc([mp], vars=[f_pred], samples=1000)
# plot the results
fig, axes = plt.subplots(figsize=(12,5), sharex=True)
scale = 100
# plot the samples from the gp posterior with samples and shading
plot_gp_dist(axes, pred_samples["f_pred"]*scale, X_new, palette="bone_r");
# plot the data alongside the esitmates
axes.plot(X, y*scale, 'ok', ms=3, alpha=0.1, label="Observed pitch");
axes.set_ylim(-0.1*scale, 0.1*scale)
axes.set_title("{} {}".format(player_lookup[player_id], pitch))
axes.set_ylabel("Linear weight")
mean_lw = player_pitches.groupby('dayofyear').lw.mean()*scale
mean_lw.index = mean_lw.index - mean_lw.index.min()
mean_lw.plot(ax=axes, style=':', label='Empirical mean')
# axis labels and title
plt.xlabel("Day")
plt.legend()
return pred_samples
pred_samples = predict_weights(545333, 'FA')
pred_samples['f_pred'][:, 150:].mean() * 100
np.percentile(pred_samples['f_pred'][:, 150:], [2.5, 97.5]) * 100
data_summary = (data_subset[data_subset.pi_pitch_type=='CU'].groupby(['pitcher', 'month']).lw
.agg([sum, np.size])
.reset_index()
.rename(columns={'sum': 'weight', 'size': 'n'}))
all_pitchers = data_summary.pitcher.unique()
pitcher_lookup = dict(zip(all_pitchers, np.arange(len(all_pitchers))))
data_summary['pitcher_idx'] = data_summary.pitcher.replace(pitcher_lookup)
# all_pitches = data_summary.pi_pitch_type.unique()
# pitch_lookup = dict(zip(all_pitches, np.arange(len(all_pitches))))
# data_summary['pitch_idx'] = data_summary.pi_pitch_type.replace(pitch_lookup)
data_summary['var_weight'] = data_summary['n'] / data_summary['n'].mean()
y = data_summary.weight.values
w = data_summary.var_weight.values
i = data_summary.pitcher_idx.values
with pm.Model() as hier_weights_curves:
p = pm.Beta('p', 1, 1)
v = pm.Bernoulli('v', p, shape=len(all_pitchers))
σ_a = pm.HalfCauchy('σ_a', 1)
η = pm.Normal('η', 0, 1, shape=len(all_pitchers))
α = pm.Deterministic('α', η*σ_a*v)
μ = pm.Normal('μ', 0, sd=100)
σ = pm.HalfCauchy('σ', 1)
r = pm.Deterministic('r', σ_a / (σ_a + σ))
weight_pred = pm.Normal('weight_pred', μ + α[i], w*σ, observed=y)
with hier_weights_curves:
trace = pm.sample(1000, tune=2000)
pm.energyplot(trace)
pm.traceplot(trace, varnames=['p', 'r']);
pm.summary(trace, varnames=['p', 'r']).round(3)
plt.figure(figsize=(5, 16))
pm.forestplot(trace, varnames=['α'], quartiles=False, ylabels=['']);
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data import and cleaning
Step2: The data are messed up; name fields contain commas in a comma-separated file so two extra columns are created.
Step3: Clean pitch type column (convert all to upper case)
Step4: Parse dates to datetime types
Step5: I'm going to discard a few pitch types
Step6: So that I can look at patterns at different scales, I will create columns for month, week and day (game).
Step7: Data exploration
Step8: Let's look at Corey Kluber, just to isolate one player
Step9: About 10 runs saved from his cutter over 5 months
Step10: If you sum the allowed weights by month for each pitch, it gives the impression of a trend, in some instances.
Step11: However, if you look at the per-game observed run values, by summing the weights for each game, the trends mostly disappear.
Step12: If you, take this further and look at the distribution of linear weights allowed per game, you can see the underlying variability in the data. I will proceed with the analysis using the pitch-level data, as the monthly/weekly sums would gloss over the variability associated with those summaries.
Step13: Predictive modeling
Step14: I'm going to use PyMC3, and open-source Bayesian library for Python that I created many years ago, and continue to develop and maintain today. There are a variety of other Python packages I could have used instead
Step15: So, this is a flexible covariance function that is parameterized by scale and lengthscale parameters, which will estimate from the data. I will also specify a noise parameter $\sigma$ to characterize the variation of weights allowed within a game.
Step16: Here's an estimate of the standard deviation within days, which looks reasonable compared to the empirical, which is around 0.1.
Step17: The great thing about Gaussian processes is that it is trivial to predict to other points outside the dataset, so we can define a set of points that extends into September, and draw from the conditional distribution
Step18: Here we draw 1000 posterior samples from the predictive GP, to use for inference.
Step19: The plot below shows the estimated function, along with its uncertainty, which is characterized by many poserior draws from the estimated function. I've also plotted the observed mean of the daily weights allowed each day as a dashed blue line, as well as the per-pitch weights allowed themselves, for which I've specified a shading alpha so that mutliple occurrences of the same weight value appear darker.
Step20: If we look at the mean of the estimates for days in September, we get
Step21: That is, an estimate wSL/C of around -1.5 runs per 100 pitches, with a credible interval of (-4.3, 1.4).
Step22: Here is Trevor Bauer's fastball, as another example. The prediction is smoothed relative to the simpler covariance model.
Step23: Here are the resulting predictions (mean and 95% interval) for September, shown as wSI/C
Step24: Conclusions
Step25: The predictiveness can be characterized by both $p$, which quantifies the proportion players that differ from the league mean, and the proportion of "skill variance" relative to the total variance
|
558 | <ASSISTANT_TASK:>
Python Code:
# Ejemplo de lista, los valores van entre corchetes
una_lista = [4, "Hola", 6.0, 99 ]
# Ejemplo de tupla, los valores van entre paréntesis
una_tupla = (4, "Hola", 6.0, 99)
print ("Lista: " , una_lista)
print ("Tupla: " , una_tupla)
# Las tuplas y las listas aceptan operadores de comparación y devuelven un booleano
print (una_lista == una_tupla)
# una_lista = [4, "Hola", 6.0, 99 ]
# una_tupla = (4, "Hola", 6.0, 99)
# Uso del operador IN
4 in una_lista, 5 not in una_tupla
# Uso de la función LEN, que nos dice cuantos elementos componen una lista o una tupla
len(una_lista), len(una_tupla)
# Uso de la funcion SORTED para ordenar los elementos de una lista/tupla
# Al ordenar la lista no la modifica, se crea una nueva; de ahí que tengamos que definir una variable para ejecutar la función
lista = [ 5, 6, 7, 1, 4, 2, 9 ]
otra = sorted(lista)
otra
lista = [5, 3, 1, 6, 99]
print(lista)
# Mostrar el primer elemento
print(lista[0])
# Mostrar el tercer elemento
print(lista[2])
# Uso de la sintaxis [<inicio>:<final>:<salto>]
lista = [2, 9, 6, 4, 3, 71, 1, 32, 534, 325, 2, 6, 9, 0]
print(lista)
# Muestra 3 elementos, comenzando por el elemento situado en la posición 3 y
# terminando en el de la posición 6, con saltos de 1 elementos
# [3,6) es como se expresa en notación científica, donde el corchete incluye y el paréntesis excluye
print(lista[3:6:1])
# Muestra todos los elementos, desde el primero hasta el último, con saltos de 2 elementos
print(lista[::2])
# Se puede acceder a los elementos de una secuencia de manera inversa
print ( lista[-1] ) # El último elemento
print ( lista[-2] ) # El penúltimo elemento
# Lista con valores heterogéneos
lista = ["A", 26, "lista", 9, -62]
lista2 = [1, 2, 3, 4]
lista3 = ["A", "B", "C", "D"]
# Lista que incluye otra lista, así como otros valores
lista4 = [lista2, [6, 98, "Abc"]]
print(lista4)
# Acceder a los elementos de una lista que a su vez está incluida en otra lista (una matriz o array)
# [acceso a la lista][acceso a la posición de dicha lista]
d = lista4[1][2]
print(d)
# Lista simple o de 1 dimensión
matriz1 = [
1, 2, 3,
4, 5, 6,
7, 8, 9,
]
# Acceso al elemento 3 de la lista
print("Valor A: ", matriz1[3])
# Lista de 3 dimensiones
matriz2 = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
# Acceso al elemento 1 de la segunda lista
print("Valor B: ", matriz2[2][0])
lista = [1, 2, 3, 4]
print ( "Antes: ", lista)
lista[0] = 0.0
print ("Después: ", lista)
print(matriz2)
matriz2[1] = [0,0,0]
matriz2
# Con esta instrucción "matriz2.*?" nos muestra todas las posibilidades existentes:
# matriz2.append
# matriz2.clear
# matriz2.copy
# matriz2.count
# matriz2.extend
# matriz2.index
# matriz2.insert
# matriz2.pop
# matriz2.remove
# matriz2.reverse
# matriz2.sort
# Por ejemplo para añadir una lista a otra lista mediante la instrucción append.
# Se puede añadir sólo un valor, no es necesario añadir una lista entera
matriz = [[1,2,3],[4,5,6],[7,8,9]]
matriz.append(['A','B','C'])
print(matriz)
matriz.append(5)
matriz
# Para quitar un elemento de la lista, se usa la instrucción remove
matriz.remove(5)
matriz
# Ejemplo de concatenación de dos tuplas
# En el primer ejemplo se crea una tupla con dos tuplas concatenadas, definiendolas en la misma línea
tupla = (1,2,3) + ('a','b','c')
print(tupla)
# Creación de una tupla a partir de dos tuplas definidas previamente
tupla1 = (6,9,7,'abc')
tupla2 = (1,6,0,'def')
tupla_f = tupla1 + tupla2
tupla_f
# Se pueden utilizar operadores matemáticos con las listas o tuplas
# A destacar que el uso de los operadores + y * no modifican las secuencias originales, sino que crean nuevas secuencias
tupla_m = tupla1 * 4
tupla_m
# Se define una tupla llamada "laborales"
laborales = (1, 2, 3, 4, 5 )
# Se define una variable llamada "laborales" con una serie de valores para dicha tupla
# La definición de la variable puede hacerse antes de los valores o despues de los valores
lunes, martes, miercoles, jueves, viernes = laborales
# Si preguntamos por un valor de dicha variable nos devolverá su correspondencia dentro de la tupla
martes
# Creamos una tupla llamada "dias" anidando dos tuplas, una ya existente ("laborales") y otra que creamos al anidar (6,7)
dias = laborales, (6, 7)
# El resultado es una tupla de tuplas
dias
# Crea la variable "dias" a partir de la variable "laborales", añadiendo dos valores nuevos (sabado, domingo)
laborales, (sabado, domingo) = dias
sabado
# Definimos una tupla
tupla = (3, 4, 5)
# Mediante la función "list" creamos una lista a partir de la tupla anterior
lista = list(tupla)
lista
# Se puede modificar una lista especificando la posición a modificar y un valor
lista[0] = None # valor nulo en Python
lista
lista[0] = 3
lista
# Definimos una lista
lista = ['Lunes', 'Jueves']
print(lista)
# Mediante la instrucción "append(valor)" añadimos a dicha lista un valor
# Con append(valor) añadimos siempre el elemento al final de la lista
lista.append('Viernes')
print(lista)
# La instrucción "insert(posición,valor)" es similar a append(valor), con la salvedad de que nos permite añadir el elemento
# en la posición que nostros queramos
lista.insert(1, 'Martes')
print(lista)
lista.insert(2, 'Miércoles')
print(lista)
# Para usar la instrucción "pop(posición)" hay que definir una variable que realice dicha operación
# En este ejemplo borramos la posición 3 "Jueves"
borrar = lista.pop(3)
borrar
# Verificamos que la lista ya no contiene el elemento "Jueves"
lista
lista.remove('Viernes')
lista
# Definimos una lista
lista = [5,7,2,0,4,7,1,5,4,3,4,1,9,0]
# Mediante el método "sort()" ordenamos la lista
lista.sort()
# Con "remove()" eliminamos un elemento especificando entre paréntesis el primer valor a eliminar, en caso de que haya dos
# o más elementos igules en la lista. La diferencia con "pop(posición)" es que si la lista no está ordenada eliminamos el primer
# elemento que coincida, con pop() podemos eliminar un elemento concreto.
print(lista)
lista.remove(0)
lista
# Se define la variable 'l' como un rango que empieza en 0, termina en 11 y va en saltos de 2
l = range(0,11,2)
# Después mediante la función list() creamos una lista de 'l'
list(l)
# Ejemplo desglosado, en este caso si no se especifica un paso, lo realiza de 1 en 1
# Variable y muestra del valor de dicha variable
l = range(-5, 5)
l
# Uso de list() para crear la lista de la variable 'l'
list(l)
# Definimos una variable con un valor string
a = "Ana"
# Se considera que la cadena o string "Ana" es una secuencia de 3 posiciones, siendo:
# A --> posición 0
# n --> posición 1
# a --> posición 2
# Por tanto se puede acceder a cada uno de los caracteres que componen un string como si fueran elementos de una lista o tupla
a, a[0], a[2]
# Los espacios no se tienen en cuenta
mensaje = "Vaya calor que hace"
mensaje[0], mensaje[12]
# Las cadenas en Python son inmutables. Eso quiere decir que no es posible modificar una cadena sin crear otra nueva.
b = mensaje.replace('V', 'v')
b, mensaje
# Uso de len() para saber la longitud de la string como si fuera una lista o tupla
len(mensaje)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <br />
Step2: Los elementos de las secuencias, tanto listas como tuplas, son hetereogéneos, así que es posible definir listas que contienen valores númericos y cadenas de caracteres, así como otras listas
Step3: En el caso de las listas, podemos modificar los datos almacenados
Step4: Se pueden concatenar listas o tuplas mediante el operador +
Step5: <BR />
Step6: Mostramos otro ejemplo con tuplas anidadas
Step7: <BR />
Step8: <br />
Step9: La operación pop permite eliminar el elemento de la lista que ocupa una determinada posición.
Step10: Pero puede darse el caso de que necesitemos eliminar un elemento de la lista y que no conozcamos la posición que ocupa. En esos casos utilizaremos el método remove.
Step11: <BR />
Step12: <BR />
Step13: <BR />
|
559 | <ASSISTANT_TASK:>
Python Code:
import os
import datetime
import numpy
import scipy.signal
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib.dates as md
%matplotlib inline
paths = ['/home/roman/mnt/server-space/storage/bolidozor/ZVPP/ZVPP-R6/snapshots/2017/09/']
times = numpy.ndarray((0,2))
start_time = datetime.datetime.now()
fits_browsed = 0
for path in paths:
for root, dirs, files in os.walk(path):
print("")
print(root, " ")
for name in files:
if name.endswith(("snap.fits")):
hdulist = fits.open(os.path.join(root, name))
DATE_ts = datetime.datetime.strptime(hdulist[1].header['DATE'], '%Y-%m-%dT%H:%M:%S').timestamp()*1000+2*60*60*1000
crval = hdulist[1].header['CRVAL2']
#print(DATE_ts, crval)
time = [DATE_ts - hdulist[1].header['CDELT2']* hdulist[1].header['NAXIS2'], crval]
times = numpy.vstack( [times, time] )
hdulist.close()
print("+", end='')
fits_browsed += 1
times.sort(axis=0)
print("")
print("===================================")
print(fits_browsed, "was successfully processed")
print("It takes", datetime.datetime.now()-start_time)
paths = ['/home/roman/mnt/server-space/storage/bolidozor/ZVPP/ZVPP-R6/snapshots/2017/09/03/',
'/home/roman/mnt/server-space/storage/bolidozor/ZVPP/ZVPP-R6/snapshots/2017/09/04/',
'/home/roman/mnt/server-space/storage/bolidozor/ZVPP/ZVPP-R6/snapshots/2017/09/05/']
times_ts = numpy.ndarray((0,2))
start_time = datetime.datetime.now()
fits_browsed = 0
for path in paths:
for root, dirs, files in os.walk(path):
print("")
print(root, " ")
for name in files:
if name.endswith(("snap.fits")):
try:
hdulist = fits.open(os.path.join(root, name))
sysdate = hdulist[1].header['SYSDATE1']
sysdate_beg = sysdate - hdulist[1].header['CDELT2']* hdulist[1].header['NAXIS2']
crval = hdulist[1].header['CRVAL2']
time = [sysdate_beg, crval]
times_ts = numpy.vstack( [times_ts, time] )
hdulist.close()
print("+", end='')
fits_browsed += 1
except Exception:
print("-", end='')
times_ts.sort(axis=0)
print("")
print("===================================")
print(fits_browsed, "was successfully processed")
print("It takes", datetime.datetime.now()-start_time)
plt.figure(figsize=(30, 20))
data=md.date2num([datetime.datetime.fromtimestamp(ts, datetime.timezone.utc) for ts in times[:,0]/1000])
data_ts=md.date2num([datetime.datetime.fromtimestamp(ts, datetime.timezone.utc) for ts in times_ts[:,0]/1000])
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
ax.set_title('Difference between DATE and CRVAL2 (radio-observer time of 1st .FITS row)')
ax.set_xlabel('datetime [UTC]')
ax.set_ylabel('time difference (DATE - CRVAL2) [s]')
plt.plot(data_ts, (times_ts[:,0]-times_ts[:,1])/1000.0, 'or')
plt.plot(data, (times[:,0]-times[:,1])/1000.0, 'xb')
plt.plot(data, scipy.signal.savgol_filter(times[:,0]-times[:,1],501, 3)/1000.0, 'w')
plt.show()
fits_path = '/home/roman/mnt/server-space/storage/bolidozor/ZVPP/ZVPP-R6/snapshots/2017/09/04/19/20170904192530311_ZVPP-R6_snap.fits'
print("")
hdulist = fits.open(fits_path)
sysdate = hdulist[1].header['SYSDATE1']
#sysdate_beg = sysdate - hdulist[1].header['CDELT2']* hdulist[1].header['NAXIS2']
DATE_ts = datetime.datetime.strptime(hdulist[1].header['DATE-OBS'], '%Y-%m-%dT%H:%M:%S').timestamp()*1000.0
crval = hdulist[1].header['CRVAL2']
hdulist.close()
time = (DATE_ts - crval)/1000.0
if time>0:
print("difference between times is", time, "s. (SYSDATE is ahead, radio-observer time is late)")
else:
print("difference between times is", time, "s. (CRVAL2 is ahead, radio-observer time is in the future :-) )")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SYSDATE1
Step2: Plotter
Step3: <br>
|
560 | <ASSISTANT_TASK:>
Python Code:
import boto3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import io
import os
import sys
import time
import json
from IPython.display import display
from time import strftime, gmtime
import sagemaker
from sagemaker.predictor import csv_serializer
from sagemaker import get_execution_role
sess = sagemaker.Session()
role = get_execution_role()
region = boto3.Session().region_name
print("IAM role ARN: {}".format(role))
#bucket = 'snowflake-sagemaker-workshop'
bucket = '<REPLACE WITH YOUR BUCKET NAME>'
prefix = 'churn-analytics-lab'
import snowflake.connector
# Connecting to Snowflake using the default authenticator
ctx = snowflake.connector.connect(
user='sagemaker',
password='AWSSF123',
account='<ACCOUNT>',
warehouse='SAGEMAKER_WH',
database='ML_WORKSHOP',
schema='PUBLIC'
)
# Query Snowflake Data
cs=ctx.cursor()
allrows=cs.execute(select Cust_ID,STATE,ACCOUNT_LENGTH,AREA_CODE,PHONE,INTL_PLAN,VMAIL_PLAN,VMAIL_MESSAGE,
DAY_MINS,DAY_CALLS,DAY_CHARGE,EVE_MINS,EVE_CALLS,EVE_CHARGE,NIGHT_MINS,NIGHT_CALLS,
NIGHT_CHARGE,INTL_MINS,INTL_CALLS,INTL_CHARGE,CUSTSERV_CALLS,
CHURN from CUSTOMER_CHURN ).fetchall()
churn = pd.DataFrame(allrows)
churn.columns=['Cust_id','State','Account Length','Area Code','Phone','Intl Plan', 'VMail Plan', 'VMail Message','Day Mins',
'Day Calls', 'Day Charge', 'Eve Mins', 'Eve Calls', 'Eve Charge', 'Night Mins', 'Night Calls','Night Charge',
'Intl Mins','Intl Calls','Intl Charge','CustServ Calls', 'Churn?']
pd.set_option('display.max_columns', 500) # Make sure we can see all of the columns
pd.set_option('display.max_rows', 10) # Keep the output on one page
churn
# Frequency tables for each categorical feature
for column in churn.select_dtypes(include=['object']).columns:
display(pd.crosstab(index=churn[column], columns='% observations', normalize='columns'))
# Histograms for each numeric features
display(churn.describe())
%matplotlib inline
hist = churn.hist(bins=30, sharey=True, figsize=(10, 10))
churn = churn.drop('Phone', axis=1)
churn['Area Code'] = churn['Area Code'].astype(object)
for column in churn.select_dtypes(include=['object']).columns:
if column != 'Churn?':
display(pd.crosstab(index=churn[column], columns=churn['Churn?'], normalize='columns'))
for column in churn.select_dtypes(exclude=['object']).columns:
print(column)
hist = churn[[column, 'Churn?']].hist(by='Churn?', bins=30)
plt.show()
display(churn.corr())
pd.plotting.scatter_matrix(churn, figsize=(18, 18))
plt.show()
churn = churn.drop(['Day Charge', 'Eve Charge', 'Night Charge', 'Intl Charge'], axis=1)
model_data = pd.get_dummies(churn)
model_data = pd.concat([model_data['Churn?_True.'], model_data.drop(['Churn?_False.', 'Churn?_True.'], axis=1)], axis=1)
to_split_data = model_data.drop(['Cust_id'], axis=1)
train_data, validation_data, test_data = np.split(to_split_data.sample(frac=1, random_state=1729), [int(0.7 * len(to_split_data)), int(0.9 * len(to_split_data))])
train_data.to_csv('train.csv', header=False, index=False)
validation_data.to_csv('validation.csv', header=False, index=False)
pd.set_option('display.max_columns', 100)
pd.set_option('display.width', 1000)
display(train_data)
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train/train.csv')).upload_file('train.csv')
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'validation/validation.csv')).upload_file('validation.csv')
from sagemaker.amazon.amazon_estimator import get_image_uri
xgb_training_container = get_image_uri(boto3.Session().region_name, 'xgboost', '0.90-1')
s3_input_train = sagemaker.s3_input(s3_data='s3://{}/{}/train'.format(bucket, prefix), content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data='s3://{}/{}/validation/'.format(bucket, prefix), content_type='csv')
xgb = sagemaker.estimator.Estimator(xgb_training_container,
role,
train_instance_count=1,
train_instance_type='ml.m5.xlarge',
output_path='s3://{}/{}/output'.format(bucket, prefix),
sagemaker_session=sess)
xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
objective='binary:logistic',
num_round=100)
xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})
compiled_model = xgb
#try:
# xgb.create_model()._neo_image_account(boto3.Session().region_name)
#except:
# print('Neo is not currently supported in', boto3.Session().region_name)
#else:
# output_path = '/'.join(xgb.output_path.split('/')[:-1])
# compiled_model = xgb.compile_model(target_instance_family='ml_c5',
# input_shape={'data':[1, 69]},
# role=role,
# framework='xgboost',
# framework_version='0.7',
# output_path=output_path)
# compiled_model.name = 'deployed-xgboost-customer-churn-c5'
# compiled_model.image = get_image_uri(sess.boto_region_name, 'xgboost-neo', repo_version='latest')
batch_input = model_data.iloc[:,1:]
batch_input.to_csv('model.csv', header=False, index=False)
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'model/model.csv')).upload_file('model.csv')
s3uri_batch_input ='s3://{}/{}/model'.format(bucket, prefix)
print('Batch Transform input S3 uri: {}'.format(s3uri_batch_input))
s3uri_batch_output= 's3://{}/{}/out'.format(bucket, prefix)
print('Batch Transform output S3 uri: {}'.format(s3uri_batch_output))
from sagemaker.transformer import Transformer
BATCH_INSTANCE_TYPE = 'ml.c5.xlarge'
transformer = compiled_model.transformer(instance_count=1,
strategy='SingleRecord',
assemble_with='Line',
instance_type= BATCH_INSTANCE_TYPE,
accept = 'text/csv',
output_path=s3uri_batch_output)
transformer.transform(s3uri_batch_input,
split_type= 'Line',
content_type= 'text/csv',
input_filter = "$[1:]",
join_source = "Input",
output_filter = "$[0,-1,-2]")
transformer.wait()
batched_churn_scores = pd.read_csv(s3uri_batch_output+'/model.csv.out', usecols=[0,1], names=['id','scores'])
gt_df = pd.DataFrame(model_data['Churn?_True.']).reset_index(drop=True)
results_df= pd.concat([gt_df,batched_churn_scores],axis=1,join_axes=[gt_df.index])
pd.crosstab(index=results_df['Churn?_True.'], columns=np.round(results_df['scores']), rownames=['actual'], colnames=['predictions'])
results_df.to_csv('results.csv', header=False, index=False)
cs.execute("PUT file://results.csv @ml_results")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's set the S3 bucket and prefix that you want to use for training and model data. This bucket should be created within the same region as the Notebook Instance, training, and hosting.
Step2: Data
Step4: Explore
Step5: By modern standards, it’s a relatively small dataset, with only 3,333 records, where each record uses 21 attributes to describe the profile of a customer of an unknown US mobile operator. The attributes are
Step6: We can see immediately that
Step7: Next let's look at the relationship between each of the features and our target variable.
Step8: Interestingly we see that churners appear
Step9: We see several features that essentially have 100% correlation with one another. Including these feature pairs in some machine learning algorithms can create catastrophic problems, while in others it will only introduce minor redundancy and bias. Let's remove one feature from each of the highly correlated pairs
Step10: Now that we've cleaned up our dataset, let's determine which algorithm to use. As mentioned above, there appear to be some variables where both high and low (but not intermediate) values are predictive of churn. In order to accommodate this in an algorithm like linear regression, we'd need to generate polynomial (or bucketed) terms. Instead, let's attempt to model this problem using gradient boosted trees. Amazon SageMaker provides an XGBoost container that we can use to train in a managed, distributed setting, and then host as a real-time prediction endpoint. XGBoost uses gradient boosted trees which naturally account for non-linear relationships between features and the target variable, as well as accommodating complex interactions between features.
Step11: And now let's split the data into training, validation, and test sets. This will help prevent us from overfitting the model, and allow us to test the models accuracy on data it hasn't already seen.
Step12: Now we'll upload these files to S3.
Step13: Train
Step14: Then, because we're training with the CSV file format, we'll create s3_inputs that our training function can use as a pointer to the files in S3.
Step15: Now, we can specify a few parameters like what type of training instances we'd like to use and how many, as well as our XGBoost hyperparameters. A few key hyperparameters are
Step16: Compile
Step17: Batch Inference
Step18: Batch transform jobs run asynchronously, and are non-blocking by default. Run the command below to block until the batch job completes.
Step19: There are many ways to compare the performance of a machine learning model, but let's start by simply by comparing actual to predicted values. In this case, we're simply predicting whether the customer churned (1) or not (0), which produces a simple confusion matrix.
Step20: Upload Churn Score to Snowflake
|
561 | <ASSISTANT_TASK:>
Python Code:
import music21
from music21.chord import Chord
from music21.duration import Duration
from music21.instrument import Instrument
from music21.note import Note, Rest
from music21.stream import Stream
from music21.tempo import MetronomeMark
from music21.volume import Volume
import os
data_dir = 'data/working/example-parametric-note'
os.makedirs(data_dir, exist_ok=True)
Note('C')
s = Stream([Note('C')])
s.show()
s.show('midi')
s.write('midi', data_dir + '/c.midi')
n = Note('C')
n
def describe_note(note):
p = note.pitch
print(note)
print('pitch:', note.pitch)
print('duration:', note.duration)
print('name:', p.name)
print('full name:', p.fullName)
print('pitch class:', p.pitchClass)
print('octave:', p.octave)
print('frequency', p.frequency, 'Hz')
print('midi:', p.midi)
print('pitch space:', p.ps) # like MIDI, but floating point
describe_note(n)
# different note in the default octave
describe_note(Note('E'))
# a note in the specific octave
describe_note(Note('G#3'))
# note specified by its octave and pitch class within an octave
describe_note(Note(octave=2, pitchClass=3))
# note specified by its integer MIDI number
describe_note(Note(midi=21))
# microtonal pitch using the pitch space attribute (like MIDI but floating point)
describe_note(Note(ps=21.25))
# note with duration of half of a quarter note
note = Note(midi=21, duration=Duration(0.5))
describe_note(note)
# note with duration of half of a quarter note
note = Note(midi=21, duration=Duration(2.5))
describe_note(note)
for v in [0, 32, 64, 127]:
print(Volume(velocity=v))
for v in [0, 0.25, 0.5, 1.0]:
print(Volume(velocityScalar=v))
Chord(['C']).volume
c = Chord([Note('C')])
c.volume = Volume(velocityScalar=0.25)
c.volume
metronome = MetronomeMark(number=60)
metronome.durationToSeconds(Duration(1.0))
Stream([MetronomeMark(number=60), Note('C')]).show()
def make_instrument(id):
i = Instrument()
i.midiProgram = id
return i
def chord_with_volume(chord, volume):
chord.volume = Volume(velocityScalar=volume)
return chord
def generate_single_note(midi_number, midi_instrument=0, volume=1.0, duration=1.0, tempo=120):
Generates a stream containing a single note with given parameters.
midi_number - MIDI note number, 0 to 127
midi_instrument - MIDI intrument number, 0 to 127
duration - floating point number (in quarter note lengths)
volume - 0.0 to 1.0
tempo - number of quarter notes per minute (eg. 120)
Note that there's a quarter note rest at the beginning and at the end.
return Stream([
MetronomeMark(number=tempo),
make_instrument(midi_instrument),
chord_with_volume(Chord([
Note(midi=midi_number, duration=Duration(duration))
]), volume)
])
generate_single_note(60).show('midi')
s = Stream()
s.append(make_instrument(50))
s.append(Note(midi=60))
s.append(Note(midi=64))
s.append(Note(midi=67))
s.write('midi', data_dir + '/sequence_separated.midi')
s.show('midi')
s = Stream()
s.append(make_instrument(50))
s.append(Note(midi=60))
s.append(Rest(duration=Duration(2.0)))
s.append(Note(midi=64))
s.append(Rest(duration=Duration(2.0)))
s.append(Note(midi=67))
s.write('midi', data_dir + '/sequence_separated.midi')
s.show('midi')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We're about to create a Note object which represents a single note and both its pitch and duration.
Step2: If we have MuseScore installed, we can we the music sheet representation.
Step3: Note that there's some rest at the beginning and end of the MIDI file. It looks like a quarter-note rest. The reason is that "MIDI controllers may not be able to play notes at deltaTime=0" See
Step4: Properties of the Note
Step5: Creating Note with parameters
Step6: Changing duration
Step7: Changing volume
Step8: How to set tempo?
Step9: Just add a metronome mark at the beginning of the stream.
Step11: Sequence of notes
Step12: Let's make a sequence. Note that by just passing a list of notes to the Stream we get a chord, not a sequence, so we must append each note separately.
Step13: In the previous example we see, that notes may overlap. So let's add some rests to make better separation.
|
562 | <ASSISTANT_TASK:>
Python Code:
range(0,10)
x =range(0,10)
type(x)
start = 0 #Default
stop = 20
x = range(start,stop)
x
x = range(start,stop,2)
#Show
x
for num in range(10):
print num
for num in xrange(10):
print num
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Great! Notice how it went up to 20, but doesn't actually produce 20. Just like in indexing. What about step size? We can specify that as a third argument
Step2: Awesome! Well thats it...or is it?
|
563 | <ASSISTANT_TASK:>
Python Code:
import graphlab
graphlab.canvas.set_target("ipynb")
sf = graphlab.SFrame.read_csv("/Users/chengjun/bigdata/w15", header=False)
sf
dir(sf['X1'])
bow = sf['X1']._count_words()
type(sf['X1'])
type(bow)
bow.dict_has_any_keys(['limited'])
bow.dict_values()[0][:20]
sf['bow'] = bow
type(sf['bow'])
len(sf['bow'])
sf['bow'][0].items()[:5]
sf['tfidf'] = graphlab.text_analytics.tf_idf(sf['X1'])
sf['tfidf'][0].items()[:5]
sf.show()
sf
docs = sf['bow'].dict_trim_by_values(2)
docs = docs.dict_trim_by_keys(graphlab.text_analytics.stopwords(), exclude=True)
m = graphlab.topic_model.create(docs)
m
m.get_topics()
topics = m.get_topics().unstack(['word','score'], new_column_name='topic_words')['topic_words'].apply(lambda x: x.keys())
for topic in topics:
print topic
pred = m.predict(docs)
pred.show()
pred = m.predict(docs, output_type='probabilities')
m['vocabulary']
m['topics']
def print_topics(m):
topics = m.get_topics(num_words=5)
topics = topics.unstack(['word','score'], new_column_name='topic_words')['topic_words']
topics = topics.apply(lambda x: x.keys())
for topic in topics:
print topic
print_topics(m)
m2 = graphlab.topic_model.create(docs,
num_topics=20,
initial_topics=m['topics'])
associations = graphlab.SFrame()
associations['word'] = ['recognition']
associations['topic'] = [0]
m2 = graphlab.topic_model.create(docs,
num_topics=20,
num_iterations=50,
associations=associations,
verbose=False)
m2.get_topics(num_words=10)
print_topics(m2)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download Data
Step2: Transformations
Step3: Text cleaning
Step4: Topic modeling
Step5: Initializing from other models
Step6: Seeding the model with prior knowledge
|
564 | <ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import data
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', '/tmp/data/', 'Directory for storing data')
# Load training and test data sets with labels
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# define and initialize the tensors
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W0 = tf.Variable(tf.truncated_normal([784, 100], stddev=0.1))
b0 = tf.Variable(tf.zeros([100]))
W1 = tf.Variable(tf.truncated_normal([100, 10], stddev=0.1))
b1 = tf.Variable(tf.zeros([10]))
# Feed forward neural network with one hidden layer
# y0 is the hidden layer with sigmoid activation
y0 = tf.sigmoid(tf.matmul(x, W0) + b0)
# y1 is the output layer (softmax)
# y1[n] is the predicted probability that the input image depicts number 'n'
y1 = tf.nn.softmax(tf.matmul(y0, W1) + b1)
# The the loss function is defined as cross_entropy
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y1), reduction_indices=[1]))
# train the network using gradient descent
train_step = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cross_entropy)
# start a TensorFlow interactive session
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
batch_size = 100
train_iterations = 30000
# There are mnist.train.num_examples=55000 images in the train sample
# train in batches of 'batch_size' images at a time
# Repeat for 'train_iterations' number of iterations
# Training batches are randomly calculated as each new epoch starts
for i in range(train_iterations):
batch = mnist.train.next_batch(100)
train_data = feed_dict={x: batch[0], y_: batch[1]}
train_step.run(train_data)
# Test the accuracy of the trained network
correct_prediction = tf.equal(tf.argmax(y1, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Accuracy of the trained network over the test images: %s" %
accuracy.eval({x: mnist.test.images, y_: mnist.test.labels}))
# There are 2 matrices and 2 vectors used in this neural network:
W0_matrix=W0.eval()
b0_array=b0.eval()
W1_matrix=W1.eval()
b1_array=b1.eval()
print ("W0 is matrix of size: %s " % (W0_matrix.shape,) )
print ("b0 is array of size: %s " % (b0_array.shape,) )
print ("W1 is matrix of size: %s " % (W1_matrix.shape,) )
print ("b1 is array of size: %s " % (b1_array.shape,) )
testlabels=tf.argmax(mnist.test.labels,1).eval()
testimages=mnist.test.images
print ("testimages is matrix of size: %s " % (testimages.shape,) )
print ("testlabels is array of size: %s " % (testlabels.shape,) )
import numpy as np
def softmax(x):
Compute the softmax function on a numpy array
return np.exp(x) / np.sum(np.exp(x), axis=0)
def sigmoid(x):
Compute the sigmoid function on a numpy array
return (1 / (1 + np.exp(-x)))
testimage=testimages[0]
testlabel=testlabels[0]
hidden_layer = sigmoid(np.dot(testimage, W0_matrix) + b0_array)
predicted = np.argmax(softmax(np.dot(hidden_layer, W1_matrix) + b1_array))
print ("image label %d, predicted value by the neural network: %d" % (testlabel, predicted))
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(testimage.reshape(28,28), cmap='Greys')
import cx_Oracle
ora_conn = cx_Oracle.connect('mnist/mnist@dbserver:1521/orcl.cern.ch')
cursor = ora_conn.cursor()
i = 0
sql="insert into tensors values ('W0', :val_id, :val)"
for column in W0_matrix:
array_values = []
for element in column:
array_values.append((i, float(element)))
i += 1
cursor.executemany(sql, array_values)
ora_conn.commit()
i = 0
sql="insert into tensors values ('W1', :val_id, :val)"
for column in W1_matrix:
array_values = []
for element in column:
array_values.append((i, float(element)))
i += 1
cursor.executemany(sql, array_values)
ora_conn.commit()
i = 0
sql="insert into tensors values ('b0', :val_id, :val)"
array_values = []
for element in b0_array:
array_values.append((i, float(element)))
i += 1
cursor.executemany(sql, array_values)
i = 0
sql="insert into tensors values ('b1', :val_id, :val)"
array_values = []
for element in b1_array:
array_values.append((i, float(element)))
i += 1
cursor.executemany(sql, array_values)
ora_conn.commit()
image_id = 0
array_values = []
sql="insert into testdata values (:image_id, :label, :val_id, :val)"
for image in testimages:
val_id = 0
array_values = []
for element in image:
array_values.append((image_id, testlabels[image_id], val_id, float(element)))
val_id += 1
cursor.executemany(sql, array_values)
image_id += 1
ora_conn.commit()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Definition of the neural network
Step2: Train the network
Step3: Learning exercise
Step4: Extracting the test images and labels as numpy arrays
Step7: Example of how to run the neural network "manually" using the tensor values extracted into numpy arrays
Step8: Visual test that the predicted value is indeed correct
Step9: Transfer of the tensors and test data into Oracle tables
Step10: Transfer the matrixes W0 and W1 into the table tensors (which must be precreated as described above)
Step11: Transfer the vectors b0 and b1 into the table "tensors" (the table is expected to exist on the DB, create it using the SQL described above)
Step12: Transfer the test data with images and labels into the table "testdata" (the table is expected to exist on the DB, create it using the SQL described above)
|
565 | <ASSISTANT_TASK:>
Python Code:
import json
import urllib.request
from time import sleep
def search_magazine(key='JUMPrgl', n_pages=25):
「ユニークID」「雑誌巻号ID」あるいは「雑誌コード」にkey含む雑誌を,
n_pages分取得する関数です.
url = 'https://mediaarts-db.bunka.go.jp/mg/api/v1/results_magazines?id=' + \
key + '&page='
magazines = []
for i in range(1, n_pages):
response = urllib.request.urlopen(url + str(i))
content = json.loads(response.read().decode('utf8'))
magazines.extend(content['results'])
return magazines
magazines = search_magazine()
len(magazines)
magazines[0]
magazines[-1]
def extract_data(content):
contentに含まれる目次情報を取得する関数です.
- year: 発行年
- no: 号数
- title: 作品名
- author: 著者
- color: カラーか否か
- pages: 掲載ページ数
- start_page: 作品のスタートページ
- best: 巻頭から数えた掲載順
- worst: 巻末から数えた掲載順
# マンガ作品のみ抽出します.
comics = [comic for comic in content['contents']
if comic['category']=='マンガ作品']
data = []
year = int(content['basics']['date_indication'][:4])
# 号数が記載されていない場合があるので,例外処理が必要です.
try:
no = int(content['basics']['number_indication'])
except ValueError:
no = content['basics']['number_indication']
for comic in comics:
title= comic['work']
if not title:
continue
# ページ数が記載されていない作品があるので,例外処理が必要です.
# 特に理由はないですが,無記載の作品は10ページとして処理を進めます.
try:
pages = int(comic['work_pages'])
except ValueError:
pages = 10
# 「いぬまるだしっ」等,1週に複数話掲載されている作品に対応するため
# data中にすでにtitleが含まれる場合は,新規datumとして登録せずに,
# 既存のdatumのページ数のみ加算します.
if len(data) > 0 and title in [datum['title'] for datum in data]:
data[[datum['title'] for datum in
data].index(title)]['pages'] += pages
else:
data.append({
'year': year,
'no': no,
'title': comic['work'],
'author': comic['author'],
'subtitle': comic['subtitle'],
'color': comic['note'].count('カラー'),
'pages': int(comic['work_pages']),
'start_pages': int(comic['start_page'])
})
# 企画物のミニマンガを除外するため,合計5ページ以下のdatumはリストから除外します.
filterd_data = [datum for datum in data if datum['pages'] > 5]
for n, datum in enumerate(filterd_data):
datum['best'] = n + 1
datum['worst'] = len(filterd_data) - n
return filterd_data
def save_data(magazines, offset=0, file_name='data/wj-api.json'):
magazinesに含まれる全てのmagazineについて,先頭からoffset以降の巻号の
目次情報を取得し,file_nameに保存する関数です.
url = 'https://mediaarts-db.bunka.go.jp/mg/api/v1/magazine?id='
# ファイル先頭行
if offset == 0:
with open(file_name, 'w') as f:
f.write('[\n')
with open(file_name, 'a') as f:
# magazines中のmagazine毎にWeb APIを叩きます.
for m, magazine in enumerate(magazines[offset:]):
response = urllib.request.urlopen(url + str(magazine['id']),
timeout=30)
content = json.loads(response.read().decode('utf8'))
# 前記の関数extract_data()で,必要な情報を抽出します.
comics = extract_data(content)
print('{0:4d}/{1}: Extracted data from {2}'.\
format(m + offset, len(magazines), url + str(magazine['id'])))
# comics中の各comicについて,file_nameに情報を保存します.
for n, comic in enumerate(comics):
# ファイル先頭以外の,magazineの最初のcomicの場合は,
# まず',\n'を追記.
if m + offset > 0 and n == 0:
f.write(',\n')
json.dump(comic, f, ensure_ascii=False)
# 最後のcomic以外は',\n'を追記.
if not n == len(comics) - 1:
f.write(',\n')
print('{0:9}: Saved data to {1}'.format(' ', file_name))
# サーバへの負荷を抑えるため,必ず一時停止します.
sleep(3)
# ファイル最終行
with open(file_name, 'a') as f:
f.write(']')
save_data(magazines)
save_data(magazines, offset=448)
save_data(magazines, offset=500)
save_data(magazines, offset=1269)
save_data(magazines, offset=1889)
save_data(magazines, offset=2274)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 雑誌巻号検索結果の取得
Step3: Web APIでは,パラメータidで「ユニークID」「雑誌巻号ID」あるいは「雑誌コード」を,pageで検索結果の取得ページ番号(1ページあたり100件,デフォルトは1)を指定することができます.ここで,週刊少年ジャンプは「雑誌巻号ID」にJUMPrglを含むため,id=JUMPrglを指定します.また,週刊少年ジャンプの検索結果は合計24ページ(2320件)あるため,pageに1から24を順次指定する必要があります.
Step5: 雑誌巻号情報の取得
Step7: 泥臭い話ですが,一部のギャグ漫画の扱いに苦労しました.例えば,「いぬまるだしっ」は,基本的に一週間に2話ずつ掲載していましたが,データベースでは各話が別々の行に記載されています.これらを1つの作品として見なす必要があるので,当該comicのtitleがdata中にある場合は,別datumとしてdataに追加せず,既存のdatumのpagesを加算する処理を行っています.また,例えば「ピューと吹く!ジャガー」は,その人気に関係なく(実際めちゃくちゃ面白かったです),連載中は常に雑誌の最後に掲載されていました.これを外れ値として除外するかどうかで悩みましたが,結局残すことにしました.
Step8: タイムアウトに柔軟に対応するため,目次情報を一括処理せず,無理やり逐次処理にしています.また,サーバに負荷をかけないよう,sleep()で一時停止していることにご注意ください.
Step9: タイムアウトした場合は,offsetを利用して再開します.例えば,447/2320でタイムアウトした場合は,save_data(offset=448)を実行します.
|
566 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes ** -0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes ** -0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x: 1.0/(1+np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
# def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
# self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer - f is sigmoid
# Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer - f is identity function
#### Implement the backward pass here ####
### Backward pass ###
# Output error - Replace this value with your calculations.
error = y-final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(error , self.weights_hidden_to_output.T)
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error # the activation is the identity function so f`(h)=1
hidden_error_term = hidden_error*hidden_outputs*(1-hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:,None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:,None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr*delta_weights_h_o/n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr*delta_weights_i_h/n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
import sys
### Set the hyperparameters here ###
iterations = 1500
learning_rate = 0.8
hidden_nodes = 10
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train': [], 'validation': []}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii / float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: Check out your predictions
|
567 | <ASSISTANT_TASK:>
Python Code:
# Import numpy, pandas, linearsolve, scipy.optimize, matplotlib.pyplot
import numpy as np
import pandas as pd
import linearsolve as ls
from scipy.optimize import root,fsolve,broyden1,broyden2
import matplotlib.pyplot as plt
plt.style.use('classic')
%matplotlib inline
alpha = 0.36
beta = 0.989
delta = 0.019
eta = 1
psi = 1.34
sigma = 2
A = 1
rhoa = 0.95
gamma = 0.8
phi=0.5
r_ss = 1/beta
yk_ss= 1/alpha*(r_ss-1+delta)
ck_ss = yk_ss-delta
def func(n):
'''Funciton to compute steady state labor'''
return (1-alpha)/psi*beta*yk_ss**((sigma-alpha)/(1-alpha))*ck_ss**(-sigma) - (1-n)**-eta*n**sigma
n_ss = root(func,0.3)['x'][0]
nk_ss = (yk_ss)**(1/(1-alpha))
k_ss = n_ss/nk_ss
y_ss = yk_ss*k_ss
c_ss = ck_ss*k_ss
m_ss = c_ss
a_ss = 1
u_ss = 1
pi_ss = 1
lam_ss = beta*c_ss**-sigma
mu_ss = (1-beta)*c_ss**-sigma
# Store steady state values in a list
ss = [a_ss,u_ss,m_ss,k_ss,pi_ss,r_ss,n_ss,c_ss,lam_ss,mu_ss,y_ss]
# Load parameter values into a Pandas Series
parameters = pd.Series({
'alpha':alpha,
'beta':beta,
'delta':delta,
'eta':eta,
'psi':psi,
'sigma':sigma,
'rhoa':rhoa,
'gamma':gamma,
'phi':phi,
'n_ss':n_ss,
'yk_ss':yk_ss,
'ck_ss':ck_ss
})
# Define function to compute equilibrium conditions
def equations(variables_forward,variables_current,parameters):
# Parameters
p = parameters
# Variables
fwd = variables_forward
cur = variables_current
# Household Euler equation
foc1 = p.alpha*cur.k+(1-p.alpha)*cur.n + fwd.a - cur.y
foc2 = p.ck_ss*fwd.m + fwd.k - (1-p.delta)*cur.k - p.yk_ss*cur.y
foc3 = p.alpha*p.yk_ss*(fwd.y - fwd.k) - cur.r
foc4 = fwd.lam + cur.r - cur.lam
foc5 = (1+p.eta*p.n_ss/(1-p.n_ss))*cur.n - cur.y - cur.lam
foc6 = cur.r + fwd.pi - cur.rn
foc7 = -p.sigma*fwd.maux-fwd.pi - cur.lam
foc8 = cur.m-cur.pi+cur.u - fwd.m
foc9 = cur.maux - fwd.m
foc10= p.gamma*cur.u+p.phi*fwd.a - fwd.u
foc11= p.rhoa*cur.a - fwd.a
# Stack equilibrium conditions into a numpy array
return np.array([
foc1,
foc2,
foc3,
foc4,
foc5,
foc6,
foc7,
foc8,
foc9,
foc10,
foc11,
])
# Initialize the model
model = ls.model(equations = equations,
n_states=4,
n_exo_states=3,
var_names=['a', 'u', 'm', 'k', 'lam', 'pi', 'rn', 'r', 'n', 'y','maux'],
parameters = parameters)
# Compute the steady state numerically
guess = 0*np.array([1,1,10,10,1,1,0.5,2,1,1,1])
model.compute_ss(guess,method='fsolve')
# Construct figure and axes
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
# Iterate over different degrees of persistence for money growth shock
for gamma in [0.5,0.8]:
model.parameters['gamma'] = gamma
# Solve the model
model.approximate_and_solve(log_linear=False)
# Compute impulse responses and plot
model.impulse(T=17,t0=1,shocks=None,percent=True)
# Plot
y = model.irs['e_u']['y']
n = model.irs['e_u']['n']
rn = model.irs['e_u']['rn']
pi = model.irs['e_u']['pi']
tme=rn.index
ax1.plot(tme,y,lw=5,alpha=0.5,label='y ($\gamma='+str(gamma)+'$)')
ax1.plot(tme,n,'--',lw=5,alpha=0.5,label='n ($\gamma='+str(gamma)+'$)')
ax1.grid(True)
ax1.legend(loc='lower right')
ax2.plot(tme,rn,lw=5,alpha=0.5,label='Rn ($\gamma='+str(gamma)+'$)')
ax2.plot(tme,pi,'--',lw=5,alpha=0.5,label='$\pi$ ($\gamma='+str(gamma)+'$)')
ax2.grid(True)
ax2.legend()
# Define function to compute equilibrium conditions
def equations(variables_forward,variables_current,parameters):
# Parameters
p = parameters
# Variables
fwd = variables_forward
cur = variables_current
# Household Euler equation
foc_1 = cur.a**p.rhoa - fwd.a
foc_2 = cur.u**p.gamma*cur.a**p.phi - fwd.u
foc_3 = cur.lam+cur.mu - cur.c**-p.sigma
foc_4 = cur.lam*(1-p.alpha)*cur.y/cur.n - p.psi*(1-cur.n)**-p.eta
foc_5 = p.beta*(fwd.lam*cur.Rn)/fwd.pi - cur.lam
foc_6 = p.beta*(fwd.mu+fwd.lam)/fwd.pi - cur.lam
foc_7 = p.beta*(fwd.lam*(p.alpha*fwd.y/fwd.k+1-p.delta)) - cur.lam
foc_8 = cur.a*cur.k**alpha*cur.n**(1-p.alpha) - cur.y
foc_9 = cur.c+fwd.k-(1-p.delta)*cur.k - cur.y
foc_10 = cur.m/cur.pi*cur.u - fwd.m
foc_11 = fwd.m - cur.c
# Stack equilibrium conditions into a numpy array
return np.array([
foc_1,
foc_2,
foc_3,
foc_4,
foc_5,
foc_6,
foc_7,
foc_8,
foc_9,
foc_10,
foc_11
])
# Initialize the model
varNames=['a','u','m','k','pi','Rn','n','c','lam','mu','y']
parameters = parameters[['alpha','beta','delta','eta','psi','sigma','rhoa','gamma','phi']]
model = ls.model(equations = equations,
n_states=4,
n_exo_states=3,
var_names=varNames,
parameters = parameters)
# Set the steady state using exact values calculated above
model.set_ss(ss)
# Construct figure and axes
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
# Iterate over different degrees of persistence for money growth shock
for gamma in [0.5,0.8]:
model.parameters['gamma'] = gamma
# Find the log-linear approximation around the non-stochastic steady state
model.approximate_and_solve()
# Compute impulse responses and plot
model.impulse(T=17,t0=1,shocks=None,percent=True)
# Plot
y = model.irs['e_u']['y']
n = model.irs['e_u']['n']
rn = model.irs['e_u']['Rn']
pi = model.irs['e_u']['pi']
tme=rn.index
ax1.plot(tme,y,lw=5,alpha=0.5,label='y ($\gamma='+str(gamma)+'$)')
ax1.plot(tme,n,'--',lw=5,alpha=0.5,label='n ($\gamma='+str(gamma)+'$)')
ax1.grid(True)
ax1.legend(loc='lower right')
ax2.plot(tme,rn,lw=5,alpha=0.5,label='Rn ($\gamma='+str(gamma)+'$)')
ax2.plot(tme,pi,'--',lw=5,alpha=0.5,label='$\pi$ ($\gamma='+str(gamma)+'$)')
ax2.grid(True)
ax2.legend()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Compute exact steady state
Step3: Linear model
Step4: Nonlinear model
|
568 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
from pandas.io import wb
file1 = '/users/susan/desktop/PISA/PISA2012clean.csv' # file location
df1 = pd.read_csv(file1)
#pandas remote data access API for World Bank GDP per capita data
df2 = wb.download(indicator='NY.GDP.PCAP.PP.KD', country='all', start=2012, end=2012)
df1
#drop multilevel index
df2.index = df2.index.droplevel('year')
df1.columns = ['Country','Math','Reading','Science']
df2.columns = ['GDPpc']
#combine PISA and GDP datasets based on country column
df3 = pd.merge(df1, df2, how='left', left_on = 'Country', right_index = True)
df3.columns = ['Country','Math','Reading','Science','GDPpc']
#drop rows with missing GDP per capita values
df3 = df3[pd.notnull(df3['GDPpc'])]
print (df3)
df3.index = df3.Country #set country column as the index
df3 = df3.drop(['Qatar', 'Vietnam']) # drop outlier
Reading = df3.Reading
Science = df3.Science
Math = df3.Math
GDP = np.log(df3.GDPpc)
#PISA reading vs GDP per capita
plt.scatter(x = GDP, y = Reading, color = 'r')
plt.title('PISA 2012 Reading scores vs. GDP per capita')
plt.xlabel('GDP per capita (log)')
plt.ylabel('PISA Reading Score')
plt.show()
#PISA math vs GDP per capita
plt.scatter(x = GDP, y = Math, color = 'b')
plt.title('PISA 2012 Math scores vs. GDP per capita')
plt.xlabel('GDP per capita (log)')
plt.ylabel('PISA Math Score')
plt.show()
#PISA science vs GDP per capita
plt.scatter(x = GDP, y = Science, color = 'g')
plt.title('PISA 2012 Science scores vs. GDP per capita')
plt.xlabel('GDP per capita (log)')
plt.ylabel('PISA Science Score')
plt.show()
lm = smf.ols(formula='Reading ~ GDP', data=df3).fit()
lm2.params
lm.summary()
lm2 = smf.ols(formula='Math ~ GDP', data=df3).fit()
lm2.params
lm2.summary()
lm3 = smf.ols(formula='Science ~ GDP', data=df3).fit()
lm3.params
lm3.summary()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating the Dataset
Step2: Excluding Outliers
Step3: Plotting the Data
Step4: Regression Analysis
|
569 | <ASSISTANT_TASK:>
Python Code:
[telepyth]
token = 3916589616287113937
import telepyth
%telepyth 'line magic'
%%telepyth 'cell magic'
'some code here'
%telepyth
%%telepyth raise Exception('in title.')
raise Exception('in cell')
%%telepyth ' '.join(('Title', 'message'))
forty_two = '42'
pi = 3.1415926
int(forty_two) / pi # approximatly 13
from telepyth import TelepythClient
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
tp = TelepythClient()
N = 75
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
area = np.pi * (15 * np.random.rand(N))**2
fig = plt.figure()
ax = plt.subplot(111)
ax.scatter(x, y, s=area, c=colors, marker='D', cmap='hsv', alpha=0.5)
tp.send_figure(fig, 'Diamonds!')
%telepyth -v
%telepyth?
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Once telepyth package is imported, it tries to load settings from .telepythrc.
Step2: Actually telepyth provides both line magic and cell magic. The visual difference is in the number of % symbols. In fact, line magic performs only statements in the same line after %telepyth, cell magic runs lines below %%telepyth as well.
Step3: Too lazy to write a notification message? No problem. telepyth magic could be used without any statements. In this case telepyth notifies a user with a simple message
Step4: Line magic as well as cell magic allows to catch exceptions and notify user with their tracebacks.
Step5: Another benefit of telepyth is that you can construct notification message from title and body.
Step6: To send figures use TelePyth Client like this
Step7: To check telepyth client version
Step8: The previous command also returns telepyth object that can be accessed directly. More options are avaliable with command
|
570 | <ASSISTANT_TASK:>
Python Code:
# from kmapper import jupyter
import kmapper as km
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.cluster import AgglomerativeClustering
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import Isomap
from sklearn.preprocessing import MinMaxScaler
newsgroups = fetch_20newsgroups(subset='train')
X, y, target_names = np.array(newsgroups.data), np.array(newsgroups.target), np.array(newsgroups.target_names)
print("SAMPLE",X[0])
print("SHAPE",X.shape)
print("TARGET",target_names[y[0]])
mapper = km.KeplerMapper(verbose=2)
projected_X = mapper.fit_transform(X,
projection=[TfidfVectorizer(analyzer="char",
ngram_range=(1,6),
max_df=0.83,
min_df=0.05),
TruncatedSVD(n_components=100,
random_state=1729),
Isomap(n_components=2,
n_jobs=-1)],
scaler=[None, None, MinMaxScaler()])
print("SHAPE",projected_X.shape)
from sklearn import cluster
graph = mapper.map(projected_X,
inverse_X=None,
clusterer=cluster.AgglomerativeClustering(n_clusters=3,
linkage="complete",
affinity="cosine"),
overlap_perc=0.33)
vec = TfidfVectorizer(analyzer="word",
strip_accents="unicode",
stop_words="english",
ngram_range=(1,3),
max_df=0.97,
min_df=0.02)
interpretable_inverse_X = vec.fit_transform(X).toarray()
interpretable_inverse_X_names = vec.get_feature_names()
print("SHAPE", interpretable_inverse_X.shape)
print("FEATURE NAMES SAMPLE", interpretable_inverse_X_names[:400])
html = mapper.visualize(graph,
inverse_X=interpretable_inverse_X,
inverse_X_names=interpretable_inverse_X_names,
path_html="newsgroups20.html",
projected_X=projected_X,
projected_X_names=["ISOMAP1", "ISOMAP2"],
title="Newsgroups20: Latent Semantic Char-gram Analysis with Isometric Embedding",
custom_tooltips=np.array([target_names[ys] for ys in y]),
color_function=y)
# jupyter.display("newsgroups20.html")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Projection
Step3: Mapping
Step4: Interpretable inverse X
Step5: Visualization
|
571 | <ASSISTANT_TASK:>
Python Code:
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import timeit
logging.basicConfig(format=
"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s",
# filename="/tmp/caiman.log",
level=logging.DEBUG)
import caiman.external.houghvst.estimation as est
from caiman.external.houghvst.gat import compute_gat, compute_inverse_gat
import caiman as cm
from caiman.paths import caiman_datadir
def main():
fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')]
movie = cm.load(fnames)
movie = movie.astype(np.float)
# makes estimation numerically better:
movie -= movie.mean()
# use one every 200 frames
temporal_stride = 100
# use one every 8 patches (patches are 8x8 by default)
spatial_stride = 6
movie_train = movie[::temporal_stride]
t = timeit.default_timer()
estimation_res = est.estimate_vst_movie(movie_train, stride=spatial_stride)
print('\tTime', timeit.default_timer() - t)
alpha = estimation_res.alpha
sigma_sq = estimation_res.sigma_sq
movie_gat = compute_gat(movie, sigma_sq, alpha=alpha)
# save movie_gat here
movie_gat_inv = compute_inverse_gat(movie_gat, sigma_sq, alpha=alpha,
method='asym')
# save movie_gat_inv here
return movie, movie_gat, movie_gat_inv
movie, movie_gat, movie_gat_inv = main()
movie_gat.play(magnification=4, q_max=99.8)
CI = movie.local_correlations(swap_dim=False)
CI_gat = movie_gat.local_correlations(swap_dim=False)
plt.figure(figsize=(15,5))
plt.subplot(1,2,1); plt.imshow(CI); plt.colorbar(); plt.title('Correlation Image (original)')
plt.subplot(1,2,2); plt.imshow(CI_gat); plt.colorbar(); plt.title('Correlation Image (transformed)')
sn = cm.source_extraction.cnmf.pre_processing.get_noise_fft(movie.transpose(1,2,0), noise_method='mean')[0]
sn_gat = cm.source_extraction.cnmf.pre_processing.get_noise_fft(movie_gat.transpose(1,2,0), noise_method='mean')[0]
# sn = np.std(movie.transpose(1,2,0), axis=-1)
# sn_gat = np.std(movie_gat.transpose(1,2,0), axis=-1)
plt.figure(figsize=(15,5))
plt.subplot(1,2,1); plt.imshow(sn); plt.colorbar(); plt.title('Noise standard deviation (original)')
plt.subplot(1,2,2); plt.imshow(sn_gat); plt.colorbar(); plt.title('Noise standard deviation (transformed)')
cm.concatenate([movie,movie_gat_inv],axis=2).play(magnification=5, q_max=99.5)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below is a function that will compute and apply the transformation and its inverse. The underlying noise model is scaled Poisson plus Gaussian, i.e., the underlying fluorescence value $x$ is related to the observed value $y$ by the equation
Step2: The transformed movie should have more uniform dynamic range (press q to exit)
Step3: The movie might appear more noisy but information is preserved as seen from the correlation image
Step4: The noise estimates in space should also be more uniform
Step5: If we apply the inverse transform we approximately get back the original movie (press q to exit)
|
572 | <ASSISTANT_TASK:>
Python Code:
import re
# List of patterns to search for
patterns = [ 'term1', 'term2' ]
# Text to parse
text = 'This is a string with term1, but it does not have the other term.'
for pattern in patterns:
print 'Searching for "%s" in: \n"%s"' % (pattern, text),
#Check for match
if re.search(pattern, text):
print '\n'
print 'Match was found. \n'
else:
print '\n'
print 'No Match was found.\n'
# List of patterns to search for
pattern = 'term1'
# Text to parse
text = 'This is a string with term1, but it does not have the other term.'
match = re.search(pattern, text)
type(match)
# Show start of match
match.start()
# Show end
match.end()
# Term to split on
split_term = '@'
phrase = 'What is the domain name of someone with the email: hello@gmail.com'
# Split the phrase
re.split(split_term,phrase)
# Returns a list of all matches
re.findall('match','test phrase match is in middle')
def multi_re_find(patterns,phrase):
'''
Takes in a list of regex patterns
Prints a list of all matches
'''
for pattern in patterns:
print 'Searching the phrase using the re check: %r' %pattern
print re.findall(pattern,phrase)
print '\n'
test_phrase = 'sdsd..sssddd...sdddsddd...dsds...dsssss...sdddd'
test_patterns = [ 'sd*', # s followed by zero or more d's
'sd+', # s followed by one or more d's
'sd?', # s followed by zero or one d's
'sd{3}', # s followed by three d's
'sd{2,3}', # s followed by two to three d's
]
multi_re_find(test_patterns,test_phrase)
test_phrase = 'sdsd..sssddd...sdddsddd...dsds...dsssss...sdddd'
test_patterns = [ '[sd]', # either s or d
's[sd]+'] # s followed by one or more s or d
multi_re_find(test_patterns,test_phrase)
test_phrase = 'This is a string! But it has punctuation. How can we remove it?'
re.findall('[^!.? ]+',test_phrase)
test_phrase = 'This is an example sentence. Lets see if we can find some letters.'
test_patterns=[ '[a-z]+', # sequences of lower case letters
'[A-Z]+', # sequences of upper case letters
'[a-zA-Z]+', # sequences of lower or upper case letters
'[A-Z][a-z]+'] # one upper case letter followed by lower case letters
multi_re_find(test_patterns,test_phrase)
test_phrase = 'This is a string with some numbers 1233 and a symbol #hashtag'
test_patterns=[ r'\d+', # sequence of digits
r'\D+', # sequence of non-digits
r'\s+', # sequence of whitespace
r'\S+', # sequence of non-whitespace
r'\w+', # alphanumeric characters
r'\W+', # non-alphanumeric
]
multi_re_find(test_patterns,test_phrase)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we've seen that re.search() will take the pattern, scan the text, and then returns a Match object. If no pattern is found, a None is returned. To give a clearer picture of this match object, check out the cell below
Step2: This Match object returned by the search() method is more than just a Boolean or None, it contains information about the match, including the original input string, the regular expression that was used, and the location of the match. Let's see the methods we can use on the match object
Step3: Split with regular expressions
Step4: Note how re.split() returns a list with the term to spit on removed and the terms in the list are a split up version of the string. Create a couple of more examples for yourself to make sure you understand!
Step5: Pattern re Syntax
Step6: Repetition Syntax
Step7: Character Sets
Step8: It makes sense that the first [sd] returns every instance. Also the second input will just return any thing starting with an s in this particular case of the test phrase input.
Step9: Use [^!.? ] to check for matches that are not a !,.,?, or space. Add the + to check that the match appears at least once, this basically translate into finding the words.
Step10: Character Ranges
Step11: Escape Codes
|
573 | <ASSISTANT_TASK:>
Python Code:
import sys
from packages.learntools.deep_learning.exercise_1 import load_my_image, apply_conv_to_image, show, print_hints
# Detects light vs. dark pixels:
horizontal_line_conv = [[1, 1],
[-1, -1]]
vertical_line_conv = [[-1, -1],
[1, 1]]
conv_list = [horizontal_line_conv, vertical_line_conv]
original_image = load_my_image()
print("Original Image: ")
show(original_image)
for conv in conv_list:
filtered_image = apply_conv_to_image(conv, original_image)
show(filtered_image)
from os.path import join
image_dir = 'data/dog_breed/train/'
img_paths = [join(image_dir, filename) for filename in
['0246f44bb123ce3f91c939861eb97fb7.jpg',
'84728e78632c0910a69d33f82e62638c.jpg',
'8825e914555803f4c67b26593c9d5aff.jpg',
'91a5e8db15bccfb6cfa2df5e8b95ec03.jpg']]
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.applications.resnet50 import preprocess_input
from tensorflow.python.keras.preprocessing.image import load_img, img_to_array
image_size=224
def read_and_prep_images(img_paths, img_height=image_size, img_width=image_size):
imgs = [load_img(img_path, target_size=(img_height, img_width)) for img_path in img_paths]
img_array = np.array([img_to_array(img) for img in imgs])
return preprocess_input(img_array)
from tensorflow.python.keras.applications import ResNet50
my_model = ResNet50(weights='inputs/resnet50_weights_tf_dim_ordering_tf_kernels.h5')
test_data = read_and_prep_images(img_paths)
preds = my_model.predict(test_data)
import sys
# Add a directory with prefabricated code to your path.
sys.path.append('inputs/utils')
from decode_predictions import decode_predictions
from IPython.display import Image, display
most_likely_labels = decode_predictions(preds, top=3, class_list_path='inputs/imagenet_class_index.json')
for i, img_path in enumerate(img_paths):
display(Image(img_path))
print(most_likely_labels[i])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example Convolution
Step2: Vertical Line Detector
Step3: Now create a list that contains your convolutions, then apply them to the image data
Step4: Let's see the image with just the horizontal and vertical line filters
Step5: Building Models from Convolutions
Step6: Write a function to read and prepare images for modeling
Step7: Create a model with the pre-trained weights file and make predictions
Step8: Visualization Time!
|
574 | <ASSISTANT_TASK:>
Python Code:
#from IPython.core.display import display, HTML
#display(HTML("<style>.container { width:95% !important; }</style>"))
%%time
import pandas as pd
import functions as f
import list_builder as lb
%%time
%run build_program_files sample3
%%time
%run make_skeleton
%%time
%run standalone prex
%%time
%run compute_measures p1 prex ratio
%%time
%run compute_measures p2 prex count
%%time
%run compute_measures p3 prex
%%time
%run join_inactives p3 ffill
%%time
f.print_settings()
%%time
# this will build a "hybrid" list with equal weighting given to longevity and standalone job position
master = lb.prepare_master_list()
lb.build_list(master, ['ldate', 'jobp'], [.5, .5])
%%time
# generate dataset for the hybrid list
%run compute_measures hybrid prex
%%time
# print only the first 5 lines of each file
case_study = pd.read_pickle('dill/case_dill.pkl').case.value
file_names = ['master', 'skeleton', 'standalone', 'p_p1', 'pay_table_enhanced', 'pay_table_basic', 'ds_p1', 'p_hybrid', case_study + '_final']
for file in file_names:
try:
df = pd.read_pickle('dill/' + file + '.pkl')
print('< ' + file.upper() + ' >', '\n', 'row count: ' + str(len(df)),
'\n', 'column count: '+ str(len(df.columns)), '\n',
df.columns, '\n',
'\n', df.head().to_string(line_width=120), '\n\n', '-' * 80, '\n')
except:
pass
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: RESTART kernel prior to running after any changes to spreadsheet input files...
Step2: build program files
Step3: generate skeleton
Step4: calculate standalone dataset
Step5: calculate integrated dataset for each list order proposal
Step6: merge an ordered list with the master list
Step7: cells below for demonstration only
Step8: sample of files created
|
575 | <ASSISTANT_TASK:>
Python Code:
%pylab inline
import GPyOpt
from numpy.random import seed
func = GPyOpt.objective_examples.experimentsNd.alpine1(input_dim=5)
mixed_domain =[{'name': 'var1', 'type': 'continuous', 'domain': (-5,5),'dimensionality': 3},
{'name': 'var2', 'type': 'discrete', 'domain': (3,8,10)},
{'name': 'var3', 'type': 'categorical', 'domain': (0,1,2)},
{'name': 'var4', 'type': 'continuous', 'domain': (-1,2)}]
myBopt = GPyOpt.methods.BayesianOptimization(f=func.f, # Objective function
domain=mixed_domain, # Box-constraints of the problem
initial_design_numdata = 5, # Number data initial design
acquisition_type='EI', # Expected Improvement
exact_feval = True,
evaluator_type = 'local_penalization',
batch_size = 5
) # True evaluations, no sample noise
max_iter = 2 ## maximum number of iterations
max_time = 60 ## maximum allowed time
eps = 0 ## tolerance, max distance between consicutive evaluations.
myBopt.run_optimization(max_iter,eps=eps)
myBopt.run_optimization(max_iter,eps=eps,context = {'var1_1':.3, 'var1_2':0.4})
myBopt.run_optimization(max_iter,eps=eps,context = {'var1_1':0, 'var3':2})
myBopt.run_optimization(max_iter,eps=eps,context = {'var1_1':0, 'var2':3},)
myBopt.run_optimization(max_iter,eps=eps,context = {'var1_1':0.3, 'var3':1, 'var4':-.4})
myBopt.run_optimization(max_iter,eps=eps)
np.round(myBopt.X,2)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we define the domain of the function to optimize as usual.
Step2: Now, we run the optimization for 20 iterations or a maximum of 60 seconds and we show the convergence plots.
Step3: To set a context, we just need to create a dicctionary with the variables to fix and pass it to the Bayesian ottimization object when running the optimization. Note that, everytime we run new iterations we can set other variables to be the context. Note that for variables in which the dimaensionality has been specified in the domain, a subindex is internally asigned. For instance if the variables is called 'var1' and has dimensionality 3, the first three positions in the internal representation of the domain will be occupied by variables 'var1_1', 'var1_2' and 'var1_3'. If no dimensionality is added, the internal naming remains the same. For instance, in the example above 'var3' should be fixed its original name. See below for details.
Step4: We can now visualize the results
|
576 | <ASSISTANT_TASK:>
Python Code:
!pwd
dbf_path = ps.examples.get_path('NAT.dbf')
print(dbf_path)
csv_path = ps.examples.get_path('usjoin.csv')
shp_path = ps.examples.get_path('NAT.shp')
print(shp_path)
f = ps.open(shp_path)
f.header
f.by_row(14) #gets the 14th shape from the file
all_polygons = f.read() #reads in all polygons from memory
len(all_polygons)
all_polygons[0].centroid #the centroid of the first polygon
all_polygons[0].area
all_polygons[0].perimeter
polygon = all_polygons[0]
polygon. #press tab when the cursor is right after the dot
f = ps.open(dbf_path)
f.header
HR90 = f.by_col('HR90')
print(type(HR90).__name__, HR90[0:5])
HR90 = f.by_col_array('HR90')
print(type(HR90).__name__, HR90[0:5])
HRs = f.by_col('HR90', 'HR80')
HRs = f.by_col_array('HR90', 'HR80')
HRs
allcolumns = f.by_col_array(['NAME', 'STATE_NAME', 'HR90', 'HR80'])
allcolumns
ps.pdio
data_table = ps.pdio.read_files(shp_path)
data_table.head()
usjoin = pd.read_csv(csv_path)
#usjoin = ps.pdio.read_files(usjoin) #will not work, not a shp/dbf pair
data_table.groupby("STATE_NAME").size()
data_table.query('STATE_NAME == "Arizona"')
data_table.STATE_NAME == 'Arizona'
data_table[data_table.STATE_NAME == 'Arizona']
data_table.geometry.apply(lambda poly: poly.centroid[0] < -121)
data_table[data_table.geometry.apply(lambda x: x.centroid[0] < -121)]
data_table.query('(NAME == "Cook") & (STATE_NAME == "Illinois")')
geom = data_table.query('(NAME == "Cook") & (STATE_NAME == "Illinois")').geometry
geom.values[0].centroid
cook_county_centroid = geom.values[0].centroid
import scipy.spatial.distance as d
def near_target_point(polygon, target=cook_county_centroid, threshold=1):
return d.euclidean(polygon.centroid, target) < threshold
data_table[data_table.geometry.apply(near_target_point)]
data_table.NAME.tolist()
HRs = [col for col in data_table.columns if col.startswith('HR')]
data_table[HRs]
data_table[HRs].values
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PySAL has a command that it uses to get the paths of its example datasets. Let's work with a commonly-used dataset first.
Step2: For the purposes of this part of the workshop, we'll use the NAT.dbf example data, and the usjoin.csv data.
Step3: Working with shapefiles
Step4: Then, we open the file using the ps.open command
Step5: f is what we call a "file handle." That means that it only points to the data and provides ways to work with it. By itself, it does not read the whole dataset into memory. To see basic information about the file, we can use a few different methods.
Step6: To actually read in the shapes from memory, you can use the following commands
Step7: So, all 3085 polygons have been read in from file. These are stored in PySAL shape objects, which can be used by PySAL and can be converted to other Python shape objects. ]
Step8: While in the Jupyter Notebook, you can examine what properties an object has by using the tab key.
Step9: Working with Data Tables
Step10: Just like with the shapefile, we can examine the header of the dbf file
Step11: So, the header is a list containing the names of all of the fields we can read. If we were interested in getting the ['NAME', 'STATE_NAME', 'HR90', 'HR80'] fields.
Step12: As you can see, the by_col function returns a list of data, with no shape. It can only return one column at a time
Step13: This error message is called a "traceback," as you see in the top right, and it usually provides feedback on why the previous command did not execute correctly. Here, you see that one-too-many arguments was provided to __call__, which tells us we cannot pass as many arguments as we did to by_col.
Step14: It is best to use by_col_array on data of a single type. That is, if you read in a lot of columns, some of them numbers and some of them strings, all columns will get converted to the same datatype
Step15: Note that the numerical columns, HR90 & HR80 are now considered strings, since they show up with the single tickmarks around them, like '0.0'.
Step16: To use it, you can read in shapefile/dbf pairs using the ps.pdio.read_files command.
Step17: This reads in the entire database table and adds a column to the end, called geometry, that stores the geometries read in from the shapefile.
Step18: The read_files function only works on shapefile/dbf pairs. If you need to read in data using CSVs, use pandas directly
Step19: The nice thing about working with pandas dataframes is that they have very powerful baked-in support for relational-style queries. By this, I mean that it is very easy to find things like
Step20: Or, to get the rows of the table that are in Arizona, we can use the query function of the dataframe
Step21: Behind the scenes, this uses a fast vectorized library, numexpr, to essentially do the following.
Step22: Then, use that to filter out rows where the condition is true
Step23: We might need this behind the scenes knowledge when we want to chain together conditions, or when we need to do spatial queries.
Step24: If we use this as a filter on the table, we can get only the rows that match that condition, just like we did for the STATE_NAME query
Step25: This works on any type of spatial query.
Step26: Moving in and out of the dataframe
Step27: To extract many columns, you must select the columns you want and call their .values attribute.
Step28: We can use this to focus only on the columns we want
Step29: With this, calling .values gives an array containing all of the entries in this subset of the table
|
577 | <ASSISTANT_TASK:>
Python Code:
# Lasagne is pre-release, so it's interface is changing.
# Whenever there's a backwards-incompatible change, a warning is raised.
# Let's ignore these for the course of the tutorial
import warnings
warnings.filterwarnings('ignore', module='lasagne')
import theano
import theano.tensor as T
import lasagne
import numpy as np
import sklearn.datasets
from __future__ import print_function
import os
import matplotlib.pyplot as plt
%matplotlib inline
import IPython.display
IPython.display.Image("http://static-vegetariantimes.s3.amazonaws.com/wp-content/uploads/2009/03/10851medium.jpg")
# Generate synthetic data
N_CLASSES = 4
X, y = sklearn.datasets.make_classification(n_features=2, n_redundant=0,
n_classes=N_CLASSES, n_clusters_per_class=1)
# Convert to theano floatX
X = X.astype(theano.config.floatX)
# Labels should be ints
y = y.astype('int32')
# Make a scatter plot where color encodes class
plt.scatter(X[:, 0], X[:, 1], c=y)
# First, construct an input layer.
# The shape parameter defines the expected input shape, which is just the shape of our data matrix X.
l_in = lasagne.layers.InputLayer(shape=X.shape)
# We'll create a network with two dense layers: A tanh hidden layer and a softmax output layer.
l_hidden = lasagne.layers.DenseLayer(
# The first argument is the input layer
l_in,
# This defines the layer's output dimensionality
num_units=10,
# Various nonlinearities are available
nonlinearity=lasagne.nonlinearities.tanh)
# For our output layer, we'll use a dense layer with a softmax nonlinearity.
l_output = lasagne.layers.DenseLayer(
l_hidden, num_units=N_CLASSES, nonlinearity=lasagne.nonlinearities.softmax)
net_output = l_output.get_output()
# As a loss function, we'll use Theano's categorical_crossentropy function.
# This allows for the network output to be class probabilities,
# but the target output to be class labels.
true_output = T.ivector('true_output')
objective = lasagne.objectives.Objective(
l_output,
# categorical_crossentropy computes the cross-entropy loss where the network
# output is class probabilities and the target value is an integer denoting the class.
loss_function=lasagne.objectives.categorical_crossentropy)
# get_loss computes a Theano expression for the objective, given a target variable
# By default, it will use the network's InputLayer input_var, which is what we want.
loss = objective.get_loss(target=true_output)
# Retrieving all parameters of the network is done using get_all_params,
# which recursively collects the parameters of all layers connected to the provided layer.
all_params = lasagne.layers.get_all_params(l_output)
# Now, we'll generate updates using Lasagne's SGD function
updates = lasagne.updates.sgd(loss, all_params, learning_rate=1)
# Finally, we can compile Theano functions for training and computing the output.
# Note that because loss depends on the input variable of our input layer,
# we need to retrieve it and tell Theano to use it.
train = theano.function([l_in.input_var, true_output], loss, updates=updates)
get_output = theano.function([l_in.input_var], net_output)
# Train (bake?) for 100 epochs
for n in xrange(100):
train(X, y)
# Compute the predicted label of the training data.
# The argmax converts the class probability output to class label
y_predicted = np.argmax(get_output(X), axis=1)
# Plot incorrectly classified points as black dots
plt.scatter(X[:, 0], X[:, 1], c=(y != y_predicted), cmap=plt.cm.gray_r)
# Compute and display the accuracy
plt.title("Accuracy: {}%".format(100*np.mean(y == y_predicted)))
# We'll use the load_data function from the mnist.py example
from mnist import _load_data
data = _load_data()
# Convert the data from the pickle file into a dict
# The keys will be 'train', 'valid', and 'test'
subsets = ['train', 'valid', 'test']
# Each entry will be a dict with 'X' and 'y' entries
# for the data and labels respectively.
dataset = {}
for (subset_data, subset_labels), subset_name in zip(data, subsets):
# The data is provided in the shape (n_examples, 784)
# where 784 = width*height = 28*28
# We need to reshape for convolutional layer shape conventions - explained below!
subset_data = subset_data.reshape(
(subset_data.shape[0], 1, 28, 28))
dataset[subset_name] = {
# We need to use data matrices of dtype theano.config.floatX
'X': subset_data.astype(theano.config.floatX),
# Labels are integers
'y': subset_labels.astype(np.int32)}
# Plot an example digit with its label
plt.imshow(dataset['train']['X'][0][0], interpolation='nearest', cmap=plt.cm.gray)
plt.title("Label: {}".format(dataset['train']['y'][0]))
plt.gca().set_axis_off()
# We'll determine the input shape from the first example from the training set.
input_shape = dataset['train']['X'][0].shape
l_in = lasagne.layers.InputLayer(
shape=(None, input_shape[0], input_shape[1], input_shape[2]))
# Create the first convolutional layer
l_conv1 = lasagne.layers.Conv2DLayer(
l_in,
# Here, we set the number of filters and their size.
num_filters=32, filter_size=(5, 5),
# lasagne.nonlinearities.rectify is the common ReLU nonlinearity
nonlinearity=lasagne.nonlinearities.rectify,
# Use He et. al.'s initialization
W=lasagne.init.HeNormal(gain='relu'))
# Other arguments: Convolution type (full, same, or valid) and stride
# Here, we do 2x2 max pooling. The max pooling layer also supports striding
l_pool1 = lasagne.layers.MaxPool2DLayer(l_conv1, pool_size=(2, 2))
# The second convolution/pooling pair is the same as above.
l_conv2 = lasagne.layers.Conv2DLayer(
l_pool1, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeNormal(gain='relu'))
l_pool2 = lasagne.layers.MaxPool2DLayer(l_conv2, pool_size=(2, 2))
l_hidden1 = lasagne.layers.DenseLayer(
l_pool2, num_units=256,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeNormal(gain='relu'))
# p is the dropout probability
l_hidden1_dropout = lasagne.layers.DropoutLayer(l_hidden1, p=0.5)
l_output = lasagne.layers.DenseLayer(
l_hidden1_dropout,
# The number of units in the softmas output layer is the number of classes.
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
true_output = T.ivector('true_output')
objective = lasagne.objectives.Objective(l_output,
loss_function=lasagne.objectives.categorical_crossentropy)
# As mentioned above, when using dropout we should define different losses:
# One for training, one for evaluation. The training loss should apply dropout,
# while the evaluation loss shouldn't. This is controlled by setting the deterministic kwarg.
loss_train = objective.get_loss(target=true_output, deterministic=False)
loss_eval = objective.get_loss(target=true_output, deterministic=True)
all_params = lasagne.layers.get_all_params(l_output)
# Use ADADELTA for updates
updates = lasagne.updates.adadelta(loss_train, all_params)
train = theano.function([l_in.input_var, true_output], loss_train, updates=updates)
# This is the function we'll use to compute the network's output given an input
# (e.g., for computing accuracy). Again, we don't want to apply dropout here
# so we set the deterministic kwarg to True.
get_output = theano.function([l_in.input_var], l_output.get_output(deterministic=True))
# Now, let's train it! We'll chop the training data into mini-batches,
# and compute the validation accuracy every epoch.
BATCH_SIZE = 100
N_EPOCHS = 10
# Keep track of which batch we're training with
batch_idx = 0
# Keep track of which epoch we're on
epoch = 0
while epoch < N_EPOCHS:
# Extract the training data/label batch and update the parameters with it
train(dataset['train']['X'][batch_idx:batch_idx + BATCH_SIZE],
dataset['train']['y'][batch_idx:batch_idx + BATCH_SIZE])
batch_idx += BATCH_SIZE
# Once we've trained on the entire training set...
if batch_idx >= dataset['train']['X'].shape[0]:
# Reset the batch index
batch_idx = 0
# Update the number of epochs trained
epoch += 1
# Compute the network's on the validation data
val_output = get_output(dataset['valid']['X'])
# The predicted class is just the index of the largest probability in the output
val_predictions = np.argmax(val_output, axis=1)
# The accuracy is the average number of correct predictions
accuracy = np.mean(val_predictions == dataset['valid']['y'])
print("Epoch {} validation accuracy: {}".format(epoch, accuracy))
# Hmm... maybe MNIST is a toy example too.
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Toy example
Step2: Ingredients
Step3: DenseLayer
Step4: get_output
Step5: Tasting
Step6: Baking
Step7: Real-world example (MNIST ConvNet)
Step8: ConvNet Input
Step9: Convolutional layers
Step10: Pooling layers
Step11: Dense layers
Step12: Dropout
Step13: Objectives, updates, and training
|
578 | <ASSISTANT_TASK:>
Python Code:
import gammalib
import ctools
import cscripts
obsfile = 'obs_crab_selected.xml'
select = ctools.ctselect()
select['usethres'] = 'DEFAULT'
select['inobs'] = '$HESSDATA/obs/obs_crab.xml'
select['emin'] = 'INDEF' # no manual energy selection
select['rad'] = 2 # by default select around pointing direction of each observations
select['tmin'] = 'INDEF' # no temporal selection
select['outobs'] = obsfile
select.execute()
obs_container = gammalib.GObservations(obsfile)
print(obs_container)
for obs in obs_container:
print(obs)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The first step of your analysis consists in selecting the relevant events from the observations. In this step you can select a specific energy range, time range, or region of interest. In the example below you will select events according to the safe energy thresholds that are defined in the effective area component of the instrument response functions. You do this by setting the hidden parameter usethres=DEFAULT. In addition, you will select all events within 2 degrees of the pointing direction. We will write the selected observations to disk for easier use in the following steps.
Step2: Below is a summary of the selected observations.
Step3: Let's look at the properties of each individual observation.
|
579 | <ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('..')
from twords.twords import Twords
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
# this pandas line makes the dataframe display all text in a line; useful for seeing entire tweets
pd.set_option('display.max_colwidth', -1)
twit_mars = Twords()
# set path to folder that contains jar files for twitter search
twit_mars.jar_folder_path = "../jar_files_and_background/"
twit_mars.create_java_tweets(total_num_tweets=100, tweets_per_run=50, querysearch="mars rover",
final_until=None, output_folder="mars_rover",
decay_factor=4, all_tweets=True)
twit_mars.get_java_tweets_from_csv_list()
twit_mars.tweets_df.head(5)
twit = Twords()
twit.jar_folder_path = "../jar_files_and_background/"
twit.get_all_user_tweets("barackobama", tweets_per_run=500)
twit = Twords()
twit.data_path = "barackobama"
twit.get_java_tweets_from_csv_list()
twit.convert_tweet_dates_to_standard()
twit.tweets_df["retweets"] = twit.tweets_df["retweets"].map(int)
twit.tweets_df["favorites"] = twit.tweets_df["favorites"].map(int)
twit.tweets_df.sort_values("favorites", ascending=False)[:5]
twit.tweets_df.sort_values("retweets", ascending=False)[:5]
twit.background_path = '../jar_files_and_background/freq_table_72319443_total_words_twitter_corpus.csv'
twit.create_Background_dict()
twit.create_Stop_words()
twit.keep_column_of_original_tweets()
twit.lower_tweets()
twit.keep_only_unicode_tweet_text()
twit.remove_urls_from_tweets()
twit.remove_punctuation_from_tweets()
twit.drop_non_ascii_characters_from_tweets()
twit.drop_duplicate_tweets()
twit.convert_tweet_dates_to_standard()
twit.sort_tweets_by_date()
twit.create_word_bag()
twit.make_nltk_object_from_word_bag()
twit.create_word_freq_df(10000)
twit.word_freq_df.sort_values("log relative frequency", ascending = False, inplace = True)
twit.word_freq_df.head(20)
twit.tweets_containing("sotu")[:10]
num_words_to_plot = 32
background_cutoff = 100
twit.word_freq_df[twit.word_freq_df['background occurrences']>background_cutoff].sort_values("log relative frequency", ascending=True).set_index("word")["log relative frequency"][-num_words_to_plot:].plot.barh(figsize=(20,
num_words_to_plot/2.), fontsize=30, color="c");
plt.title("log relative frequency", fontsize=30);
ax = plt.axes();
ax.xaxis.grid(linewidth=4);
num_words_to_plot = 50
background_cutoff = 1000
twit.word_freq_df[twit.word_freq_df['background occurrences']>background_cutoff].sort_values("log relative frequency", ascending=True).set_index("word")["log relative frequency"][-num_words_to_plot:].plot.barh(figsize=(20,
num_words_to_plot/2.), fontsize=30, color="c");
plt.title("log relative frequency", fontsize=30);
ax = plt.axes();
ax.xaxis.grid(linewidth=4);
num_words_to_plot = 32
background_cutoff = 5000
twit.word_freq_df[twit.word_freq_df['background occurrences']>background_cutoff].sort_values("log relative frequency", ascending=True).set_index("word")["log relative frequency"][-num_words_to_plot:].plot.barh(figsize=(20,
num_words_to_plot/2.), fontsize=30, color="c");
plt.title("log relative frequency", fontsize=30);
ax = plt.axes();
ax.xaxis.grid(linewidth=4);
num_words_to_plot = 32
background_cutoff = 5000
twit.word_freq_df[twit.word_freq_df['background occurrences']>background_cutoff].sort_values("log relative frequency", ascending=False).set_index("word")["log relative frequency"][-num_words_to_plot:].plot.barh(figsize=(20,
num_words_to_plot/2.), fontsize=30, color="c");
plt.title("log relative frequency", fontsize=30);
ax = plt.axes();
ax.xaxis.grid(linewidth=4);
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Collect Tweets by search term
Step2: Collect Tweets from user
Step3: If you want to sort the tweets by retweets or favorites, you'll need to convert the retweets and favorites columns from unicode into integers
Step4: For fun
Step5: Make word frequency dataframe
Step6: Now plot relative frequency results. We see from word_freq_df that the largest relative frequency terms are specialized things like "sotu" (state of the union) and specific policy-related words like "middle-class." We'll increase the requirement on background words to remove these policy-specific words and get at more general words that the president's twitter account nevertheless uses more often than usual
Step7: At least 1000 background occurrences
Step8: The month of January appears to carry special import with the president's twitter account.
Step9: And finally we'll look at the least presidential words on Barack Obama's twitter account
|
580 | <ASSISTANT_TASK:>
Python Code:
# Data for manual OHE
# Note: the first data point does not include any value for the optional third feature
#from pyspark import SparkContext
#sc =SparkContext()
sampleOne = [(0, 'mouse'), (1, 'black')]
sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
sampleDataRDD = sc.parallelize([sampleOne, sampleTwo, sampleThree])
print sampleDataRDD.collect()
# EXERCICIO
#aux = sampleOne+sampleTwo+sampleThree
#aux.sort(cmp=lambda x,y:x[0]<y[0])
#sampleOHEDictManual = dict((aux[k], k) for k in range(len(aux)))
sampleOHEDictManual = {}
sampleOHEDictManual[(0,'bear')] = 0
sampleOHEDictManual[(0,'cat')] = 1
sampleOHEDictManual[(0,'mouse')] = 2
sampleOHEDictManual[(1,'black')] = 3
sampleOHEDictManual[(1,'tabby')] = 4
sampleOHEDictManual[(2,'mouse')] = 5
sampleOHEDictManual[(2,'salmon')] = 6
print sampleOHEDictManual
#print sampleOHEDictManual
#<COMPLETAR>
# TEST One-hot-encoding (1a)
from test_helper import Test
Test.assertEqualsHashed(sampleOHEDictManual[(0,'bear')],
'b6589fc6ab0dc82cf12099d1c2d40ab994e8410c',
"incorrect value for sampleOHEDictManual[(0,'bear')]")
Test.assertEqualsHashed(sampleOHEDictManual[(0,'cat')],
'356a192b7913b04c54574d18c28d46e6395428ab',
"incorrect value for sampleOHEDictManual[(0,'cat')]")
Test.assertEqualsHashed(sampleOHEDictManual[(0,'mouse')],
'da4b9237bacccdf19c0760cab7aec4a8359010b0',
"incorrect value for sampleOHEDictManual[(0,'mouse')]")
Test.assertEqualsHashed(sampleOHEDictManual[(1,'black')],
'77de68daecd823babbb58edb1c8e14d7106e83bb',
"incorrect value for sampleOHEDictManual[(1,'black')]")
Test.assertEqualsHashed(sampleOHEDictManual[(1,'tabby')],
'1b6453892473a467d07372d45eb05abc2031647a',
"incorrect value for sampleOHEDictManual[(1,'tabby')]")
Test.assertEqualsHashed(sampleOHEDictManual[(2,'mouse')],
'ac3478d69a3c81fa62e60f5c3696165a4e5e6ac4',
"incorrect value for sampleOHEDictManual[(2,'mouse')]")
Test.assertEqualsHashed(sampleOHEDictManual[(2,'salmon')],
'c1dfd96eea8cc2b62785275bca38ac261256e278',
"incorrect value for sampleOHEDictManual[(2,'salmon')]")
Test.assertEquals(len(sampleOHEDictManual.keys()), 7,
'incorrect number of keys in sampleOHEDictManual')
import numpy as np
from pyspark.mllib.linalg import SparseVector
# EXERCICIO
aDense = np.array([0., 3., 0., 4.])
aSparse = SparseVector(len(aDense),dict((k,aDense[k]) for k in range(len(aDense))))#<COMPLETAR>)
bDense = np.array([0., 0., 0., 1.])
bSparse = SparseVector(len(bDense),dict((k,bDense[k]) for k in range(len(aDense))))#<COMPLETAR>)
w = np.array([0.4, 3.1, -1.4, -.5])
print aDense.dot(w)
print aSparse.dot(w)
print bDense.dot(w)
print bSparse.dot(w)
# TEST Sparse Vectors (1b)
Test.assertTrue(isinstance(aSparse, SparseVector), 'aSparse needs to be an instance of SparseVector')
Test.assertTrue(isinstance(bSparse, SparseVector), 'aSparse needs to be an instance of SparseVector')
Test.assertTrue(aDense.dot(w) == aSparse.dot(w),
'dot product of aDense and w should equal dot product of aSparse and w')
Test.assertTrue(bDense.dot(w) == bSparse.dot(w),
'dot product of bDense and w should equal dot product of bSparse and w')
# Reminder of the sample features
# sampleOne = [(0, 'mouse'), (1, 'black')]
# sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
# sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
# EXERCICIO
sampleOneOHEFeatManual = SparseVector(len(sampleOHEDictManual), [(sampleOHEDictManual[i],1.0) for i in sampleOne])#<COMPLETAR>)
sampleTwoOHEFeatManual = SparseVector(len(sampleOHEDictManual), [(sampleOHEDictManual[i],1.0) for i in sampleTwo])#<COMPLETAR>)
sampleThreeOHEFeatManual = SparseVector(len(sampleOHEDictManual), [(sampleOHEDictManual[i],1.0) for i in sampleThree])#<COMPLETAR>)
# TEST OHE Features as sparse vectors (1c)
Test.assertTrue(isinstance(sampleOneOHEFeatManual, SparseVector),
'sampleOneOHEFeatManual needs to be a SparseVector')
Test.assertTrue(isinstance(sampleTwoOHEFeatManual, SparseVector),
'sampleTwoOHEFeatManual needs to be a SparseVector')
Test.assertTrue(isinstance(sampleThreeOHEFeatManual, SparseVector),
'sampleThreeOHEFeatManual needs to be a SparseVector')
Test.assertEqualsHashed(sampleOneOHEFeatManual,
'ecc00223d141b7bd0913d52377cee2cf5783abd6',
'incorrect value for sampleOneOHEFeatManual')
Test.assertEqualsHashed(sampleTwoOHEFeatManual,
'26b023f4109e3b8ab32241938e2e9b9e9d62720a',
'incorrect value for sampleTwoOHEFeatManual')
Test.assertEqualsHashed(sampleThreeOHEFeatManual,
'c04134fd603ae115395b29dcabe9d0c66fbdc8a7',
'incorrect value for sampleThreeOHEFeatManual')
# EXERCICIO
def oneHotEncoding(rawFeats, OHEDict, numOHEFeats):
Produce a one-hot-encoding from a list of features and an OHE dictionary.
sampleOne = [(0, 'mouse'), (1, 'black')]
sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
aux = sampleOne+sampleTwo+sampleThree
sampleOHEDictManual = dict((aux[k], k) for k in range(len(aux)))
sampleOneOHEFeatManual = SparseVector(len(sampleOHEDictManual), [(sampleOHEDictManual[i],1.0) for i in sampleOne])#<COMPLETAR>)
Note:
You should ensure that the indices used to create a SparseVector are sorted.
Args:
rawFeats (list of (int, str)): The features corresponding to a single observation. Each
feature consists of a tuple of featureID and the feature's value. (e.g. sampleOne)
OHEDict (dict): A mapping of (featureID, value) to unique integer.
numOHEFeats (int): The total number of unique OHE features (combinations of featureID and
value).
Returns:
SparseVector: A SparseVector of length numOHEFeats with indicies equal to the unique
identifiers for the (featureID, value) combinations that occur in the observation and
with values equal to 1.0.
rawFeats.sort(cmp=lambda x,y:x[0]<y[0])
sampleOneOHEFeatManual = SparseVector((numOHEFeats), [(OHEDict[i],1.0) for i in rawFeats])#<COMPLETAR>)
return sampleOneOHEFeatManual#<COMPLETAR>
# Calculate the number of features in sampleOHEDictManual
numSampleOHEFeats = len(sampleOHEDictManual)
# Run oneHotEnoding on sampleOne
print sampleOne,sampleOHEDictManual,"\n\n"
sampleOneOHEFeat = oneHotEncoding(sampleOne, sampleOHEDictManual, numSampleOHEFeats)
print sampleOneOHEFeat
# TEST Define an OHE Function (1d)
Test.assertTrue(sampleOneOHEFeat == sampleOneOHEFeatManual,
'sampleOneOHEFeat should equal sampleOneOHEFeatManual')
Test.assertEquals(sampleOneOHEFeat, SparseVector(7, [2,3], [1.0,1.0]),
'incorrect value for sampleOneOHEFeat')
Test.assertEquals(oneHotEncoding([(1, 'black'), (0, 'mouse')], sampleOHEDictManual,
numSampleOHEFeats), SparseVector(7, [2,3], [1.0,1.0]),
'incorrect definition for oneHotEncoding')
# EXERCICIO
sampleOHEData = sampleDataRDD.map(lambda x:oneHotEncoding(x, sampleOHEDictManual, numSampleOHEFeats))#<COMPLETAR>
print sampleOHEData.collect()
# TEST Apply OHE to a dataset (1e)
sampleOHEDataValues = sampleOHEData.collect()
Test.assertTrue(len(sampleOHEDataValues) == 3, 'sampleOHEData should have three elements')
Test.assertEquals(sampleOHEDataValues[0], SparseVector(7, {2: 1.0, 3: 1.0}),
'incorrect OHE for first sample')
Test.assertEquals(sampleOHEDataValues[1], SparseVector(7, {1: 1.0, 4: 1.0, 5: 1.0}),
'incorrect OHE for second sample')
Test.assertEquals(sampleOHEDataValues[2], SparseVector(7, {0: 1.0, 3: 1.0, 6: 1.0}),
'incorrect OHE for third sample')
# EXERCICIO
sampleDistinctFeats = (sampleDataRDD
.flatMap(lambda x: x)#<COMPLETAR>
.distinct()#<COMPLETAR>
)
print sampleDistinctFeats.collect()
# TEST Pair RDD of (featureID, category) (2a)
Test.assertEquals(sorted(sampleDistinctFeats.collect()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'incorrect value for sampleDistinctFeats')
# EXERCICIO
sampleOHEDict = (sampleDistinctFeats
.zipWithIndex()#<COMPLETAR>
.collectAsMap())#<COMPLETAR>)
print sampleOHEDict
# TEST OHE Dictionary from distinct features (2b)
Test.assertEquals(sorted(sampleOHEDict.keys()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'sampleOHEDict has unexpected keys')
Test.assertEquals(sorted(sampleOHEDict.values()), range(7), 'sampleOHEDict has unexpected values')
# EXERCICIO
def createOneHotDict(inputData):
Creates a one-hot-encoder dictionary based on the input data.
Args:
inputData (RDD of lists of (int, str)): An RDD of observations where each observation is
made up of a list of (featureID, value) tuples.
Returns:
dict: A dictionary where the keys are (featureID, value) tuples and map to values that are
unique integers.
return (inputData
.flatMap(lambda x:x)#<COMPLETAR>
.distinct()#<COMPLETAR>
.zipWithIndex()#<COMPLETAR>
.collectAsMap()#<COMPLETAR>
)
sampleOHEDictAuto = createOneHotDict(sampleDataRDD)
print sampleOHEDictAuto
# TEST Automated creation of an OHE dictionary (2c)
Test.assertEquals(sorted(sampleOHEDictAuto.keys()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'sampleOHEDictAuto has unexpected keys')
Test.assertEquals(sorted(sampleOHEDictAuto.values()), range(7),
'sampleOHEDictAuto has unexpected values')
import os.path
baseDir = os.path.join('Data')
inputPath = os.path.join('Aula04', 'dac_sample.txt')
fileName = os.path.join(baseDir, inputPath)
if os.path.isfile(fileName):
rawData = (sc
.textFile(fileName, 2)
.map(lambda x: x.replace('\t', ','))) # work with either ',' or '\t' separated data
print rawData.take(1)
# EXERCICIO
weights = [.8, .1, .1]
seed = 42
# Use randomSplit with weights and seed
rawTrainData, rawValidationData, rawTestData = rawData.randomSplit(weights, seed)
# Cache the data
rawTrainData.cache()#<COMPLETAR>
rawValidationData.cache()#<COMPLETAR>
rawTestData.cache()#<COMPLETAR>
nTrain = rawTrainData.count()
nVal = rawValidationData.count()
nTest = rawTestData.count()
print nTrain, nVal, nTest, nTrain + nVal + nTest
print rawData.take(1)
# TEST Loading and splitting the data (3a)
Test.assertTrue(all([rawTrainData.is_cached, rawValidationData.is_cached, rawTestData.is_cached]),
'you must cache the split data')
Test.assertEquals(nTrain, 79911, 'incorrect value for nTrain')
Test.assertEquals(nVal, 10075, 'incorrect value for nVal')
Test.assertEquals(nTest, 10014, 'incorrect value for nTest')
# EXERCICIO
def parsePoint(point):
Converts a comma separated string into a list of (featureID, value) tuples.
Note:
featureIDs should start at 0 and increase to the number of features - 1.
Args:
point (str): A comma separated string where the first value is the label and the rest
are features.
Returns:
list: A list of (featureID, value) tuples.
#<COMPLETAR>
return list(enumerate(point.split(",")[1:]))
parsedTrainFeat = rawTrainData.map(parsePoint)
from operator import add
numCategories = (parsedTrainFeat
.flatMap(lambda x: x)#<COMPLETAR>
.distinct()#<COMPLETAR>
.map(lambda x: (x[0],1))#<COMPLETAR>
.reduceByKey(add)#<COMPLETAR>
.sortByKey()#<COMPLETAR>
.collect()
)
#numCategories = (parsedTrainFeat
# .flatMap(lambda x: x)
# .distinct()
# .map(lambda x: (x[0],1))
# .reduceByKey(add)
# .sortByKey()
# .collect()
# )
print numCategories[2][1]
# TEST Extract features (3b)
Test.assertEquals(numCategories[2][1], 855, 'incorrect implementation of parsePoint')
Test.assertEquals(numCategories[32][1], 4, 'incorrect implementation of parsePoint')
# EXERCICIO
ctrOHEDict = createOneHotDict(parsedTrainFeat)#<COMPLETAR>
numCtrOHEFeats = len(ctrOHEDict.keys())
print numCtrOHEFeats
print ctrOHEDict[(0, '')]
# TEST Create an OHE dictionary from the dataset (3c)
Test.assertEquals(numCtrOHEFeats, 233286, 'incorrect number of features in ctrOHEDict')
Test.assertTrue((0, '') in ctrOHEDict, 'incorrect features in ctrOHEDict')
from pyspark.mllib.regression import LabeledPoint
# EXERCICIO
def parseOHEPoint(point, OHEDict, numOHEFeats):
Obtain the label and feature vector for this raw observation.
Note:
You must use the function `oneHotEncoding` in this implementation or later portions
of this lab may not function as expected.
Args:
point (str): A comma separated string where the first value is the label and the rest
are features.
OHEDict (dict of (int, str) to int): Mapping of (featureID, value) to unique integer.
numOHEFeats (int): The number of unique features in the training dataset.
Returns:
LabeledPoint: Contains the label for the observation and the one-hot-encoding of the
raw features based on the provided OHE dictionary.
#def oneHotEncoding(rawFeats, OHEDict, numOHEFeats):
#<COMPLETAR>
return LabeledPoint(point.split(',')[0],oneHotEncoding(parsePoint(point), OHEDict, numOHEFeats))
OHETrainData = rawTrainData.map(lambda point: parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats))
OHETrainData.cache()
print OHETrainData.take(1)
# Check that oneHotEncoding function was used in parseOHEPoint
backupOneHot = oneHotEncoding
oneHotEncoding = None
withOneHot = False
try: parseOHEPoint(rawTrainData.take(1)[0], ctrOHEDict, numCtrOHEFeats)
except TypeError: withOneHot = True
oneHotEncoding = backupOneHot
# TEST Apply OHE to the dataset (3d)
numNZ = sum(parsedTrainFeat.map(lambda x: len(x)).take(5))
numNZAlt = sum(OHETrainData.map(lambda lp: len(lp.features.indices)).take(5))
Test.assertEquals(numNZ, numNZAlt, 'incorrect implementation of parseOHEPoint')
Test.assertTrue(withOneHot, 'oneHotEncoding not present in parseOHEPoint')
def bucketFeatByCount(featCount):
Bucket the counts by powers of two.
for i in range(11):
size = 2 ** i
if featCount <= size:
return size
return -1
featCounts = (OHETrainData
.flatMap(lambda lp: lp.features.indices)
.map(lambda x: (x, 1))
.reduceByKey(lambda x, y: x + y))
featCountsBuckets = (featCounts
.map(lambda x: (bucketFeatByCount(x[1]), 1))
.filter(lambda (k, v): k != -1)
.reduceByKey(lambda x, y: x + y)
.collect())
print featCountsBuckets
%matplotlib inline
import matplotlib.pyplot as plt
x, y = zip(*featCountsBuckets)
x, y = np.log(x), np.log(y)
def preparePlot(xticks, yticks, figsize=(10.5, 6), hideLabels=False, gridColor='#999999',
gridWidth=1.0):
Template for generating the plot layout.
plt.close()
fig, ax = plt.subplots(figsize=figsize, facecolor='white', edgecolor='white')
ax.axes.tick_params(labelcolor='#999999', labelsize='10')
for axis, ticks in [(ax.get_xaxis(), xticks), (ax.get_yaxis(), yticks)]:
axis.set_ticks_position('none')
axis.set_ticks(ticks)
axis.label.set_color('#999999')
if hideLabels: axis.set_ticklabels([])
plt.grid(color=gridColor, linewidth=gridWidth, linestyle='-')
map(lambda position: ax.spines[position].set_visible(False), ['bottom', 'top', 'left', 'right'])
return fig, ax
# generate layout and plot data
fig, ax = preparePlot(np.arange(0, 10, 1), np.arange(4, 14, 2))
ax.set_xlabel(r'$\log_e(bucketSize)$'), ax.set_ylabel(r'$\log_e(countInBucket)$')
plt.scatter(x, y, s=14**2, c='#d6ebf2', edgecolors='#8cbfd0', alpha=0.75)
pass
# EXERCICIO
def oneHotEncoding(rawFeats, OHEDict, numOHEFeats):
Produce a one-hot-encoding from a list of features and an OHE dictionary.
rawFeats.sort(cmp=lambda x,y:x[0]<y[0])
sampleOneOHEFeatManual = SparseVector((numOHEFeats), [(OHEDict[i],1.0) for i in rawFeats])#<COMPLETAR>)
return sampleOneOHEFeatManual#<COMPLETAR>
Note:
If a (featureID, value) tuple doesn't have a corresponding key in OHEDict it should be
ignored.
Args:
rawFeats (list of (int, str)): The features corresponding to a single observation. Each
feature consists of a tuple of featureID and the feature's value. (e.g. sampleOne)
OHEDict (dict): A mapping of (featureID, value) to unique integer.
numOHEFeats (int): The total number of unique OHE features (combinations of featureID and
value).
Returns:
SparseVector: A SparseVector of length numOHEFeats with indicies equal to the unique
identifiers for the (featureID, value) combinations that occur in the observation and
with values equal to 1.0.
rawFeats.sort(cmp=lambda x,y:x[0]<y[0])
sampleOneOHEFeatManual = SparseVector((numOHEFeats), [(OHEDict[i],1.0) for i in rawFeats if i in OHEDict.keys()])#<COMPLETAR>)
return sampleOneOHEFeatManual#<COMPLETAR>
#<COMPLETAR>
OHEValidationData = rawValidationData.map(lambda point: parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats))
OHEValidationData.cache()
print OHEValidationData.take(1)
# TEST Handling unseen features (3e)
numNZVal = (OHEValidationData
.map(lambda lp: len(lp.features.indices))
.sum())
Test.assertEquals(numNZVal, 372080, 'incorrect number of features')
from pyspark.mllib.classification import LogisticRegressionWithSGD
# fixed hyperparameters
numIters = 50
stepSize = 10.
regParam = 1e-6
regType = 'l2'
includeIntercept = True
# EXERCICIO
model0 = LogisticRegressionWithSGD.train(OHETrainData,numIters,stepSize,regParam=regParam,regType=regType,intercept=includeIntercept)#<COMPLETAR>
sortedWeights = sorted(model0.weights)
print sortedWeights[:5], model0.intercept
# TEST Logistic regression (4a)
Test.assertTrue(np.allclose(model0.intercept, 0.56455084025), 'incorrect value for model0.intercept')
Test.assertTrue(np.allclose(sortedWeights[0:5],
[-0.45899236853575609, -0.37973707648623956, -0.36996558266753304,
-0.36934962879928263, -0.32697945415010637]), 'incorrect value for model0.weights')
# EXERCICIO
from math import log
def computeLogLoss(p, y):
Calculates the value of log loss for a given probabilty and label.
Note:
log(0) is undefined, so when p is 0 we need to add a small value (epsilon) to it
and when p is 1 we need to subtract a small value (epsilon) from it.
Args:
p (float): A probabilty between 0 and 1.
y (int): A label. Takes on the values 0 and 1.
Returns:
float: The log loss value.
epsilon = 10e-12
if p == 0:
p += epsilon
elif p == 1:
p -= epsilon
if y==1:
return -log(p)
else:
return -log(1-p)
#<COMPLETAR>
print computeLogLoss(.5, 1)
print computeLogLoss(.5, 0)
print computeLogLoss(.99, 1)
print computeLogLoss(.99, 0)
print computeLogLoss(.01, 1)
print computeLogLoss(.01, 0)
print computeLogLoss(0, 1)
print computeLogLoss(1, 1)
print computeLogLoss(1, 0)
# TEST Log loss (4b)
Test.assertTrue(np.allclose([computeLogLoss(.5, 1), computeLogLoss(.01, 0), computeLogLoss(.01, 1)],
[0.69314718056, 0.0100503358535, 4.60517018599]),
'computeLogLoss is not correct')
Test.assertTrue(np.allclose([computeLogLoss(0, 1), computeLogLoss(1, 1), computeLogLoss(1, 0)],
[25.3284360229, 1.00000008275e-11, 25.3284360229]),
'computeLogLoss needs to bound p away from 0 and 1 by epsilon')
# EXERCICIO
# Note that our dataset has a very high click-through rate by design
# In practice click-through rate can be one to two orders of magnitude lower
classOneFracTrain = OHETrainData.mean()#<COMPLETAR>
print classOneFracTrain,"\ncheou\n"
logLossTrBase = OHETrainData.mean()#<COMPLETAR>
print 'Baseline Train Logloss = {0:.3f}\n'.format(logLossTrBase)
# TEST Baseline log loss (4c)
Test.assertTrue(np.allclose(classOneFracTrain, 0.22717773523), 'incorrect value for classOneFracTrain')
Test.assertTrue(np.allclose(logLossTrBase, 0.535844), 'incorrect value for logLossTrBase')
# EXERCICIO
from math import exp # exp(-t) = e^-t
def getP(x, w, intercept):
Calculate the probability for an observation given a set of weights and intercept.
Note:
We'll bound our raw prediction between 20 and -20 for numerical purposes.
Args:
x (SparseVector): A vector with values of 1.0 for features that exist in this
observation and 0.0 otherwise.
w (DenseVector): A vector of weights (betas) for the model.
intercept (float): The model's intercept.
Returns:
float: A probability between 0 and 1.
# calculate rawPrediction = w.x + intercept
rawPrediction = <COMPLETAR>
# Bound the raw prediction value
rawPrediction = min(rawPrediction, 20)
rawPrediction = max(rawPrediction, -20)
# calculate (1+e^-rawPrediction)^-1
return <COMPLETAR>
trainingPredictions = OHETrainData.<COMPLETAR>
print trainingPredictions.take(5)
# TEST Predicted probability (4d)
Test.assertTrue(np.allclose(trainingPredictions.sum(), 18135.4834348),
'incorrect value for trainingPredictions')
# EXERCICIO
def evaluateResults(model, data):
Calculates the log loss for the data given the model.
Args:
model (LogisticRegressionModel): A trained logistic regression model.
data (RDD of LabeledPoint): Labels and features for each observation.
Returns:
float: Log loss for the data.
return (data
.<COMPLETAR>
.<COMPLETAR>
.<COMPLETAR>
)
logLossTrLR0 = evaluateResults(model0, OHETrainData)
print ('OHE Features Train Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossTrBase, logLossTrLR0))
# TEST Evaluate the model (4e)
Test.assertTrue(np.allclose(logLossTrLR0, 0.456903), 'incorrect value for logLossTrLR0')
# EXERCICIO
logLossValBase = OHEValidationData.<COMPLETAR>
logLossValLR0 = evaluateResults(model0, OHEValidationData)
print ('OHE Features Validation Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossValBase, logLossValLR0))
# TEST Validation log loss (4f)
Test.assertTrue(np.allclose(logLossValBase, 0.527603), 'incorrect value for logLossValBase')
Test.assertTrue(np.allclose(logLossValLR0, 0.456957), 'incorrect value for logLossValLR0')
labelsAndScores = OHEValidationData.map(lambda lp:
(lp.label, getP(lp.features, model0.weights, model0.intercept)))
labelsAndWeights = labelsAndScores.collect()
labelsAndWeights.sort(key=lambda (k, v): v, reverse=True)
labelsByWeight = np.array([k for (k, v) in labelsAndWeights])
length = labelsByWeight.size
truePositives = labelsByWeight.cumsum()
numPositive = truePositives[-1]
falsePositives = np.arange(1.0, length + 1, 1.) - truePositives
truePositiveRate = truePositives / numPositive
falsePositiveRate = falsePositives / (length - numPositive)
# Generate layout and plot data
fig, ax = preparePlot(np.arange(0., 1.1, 0.1), np.arange(0., 1.1, 0.1))
ax.set_xlim(-.05, 1.05), ax.set_ylim(-.05, 1.05)
ax.set_ylabel('True Positive Rate (Sensitivity)')
ax.set_xlabel('False Positive Rate (1 - Specificity)')
plt.plot(falsePositiveRate, truePositiveRate, color='#8cbfd0', linestyle='-', linewidth=3.)
plt.plot((0., 1.), (0., 1.), linestyle='--', color='#d6ebf2', linewidth=2.) # Baseline model
pass
from collections import defaultdict
import hashlib
def hashFunction(numBuckets, rawFeats, printMapping=False):
Calculate a feature dictionary for an observation's features based on hashing.
Note:
Use printMapping=True for debug purposes and to better understand how the hashing works.
Args:
numBuckets (int): Number of buckets to use as features.
rawFeats (list of (int, str)): A list of features for an observation. Represented as
(featureID, value) tuples.
printMapping (bool, optional): If true, the mappings of featureString to index will be
printed.
Returns:
dict of int to float: The keys will be integers which represent the buckets that the
features have been hashed to. The value for a given key will contain the count of the
(featureID, value) tuples that have hashed to that key.
mapping = {}
for ind, category in rawFeats:
featureString = category + str(ind)
mapping[featureString] = int(int(hashlib.md5(featureString).hexdigest(), 16) % numBuckets)
if(printMapping): print mapping
sparseFeatures = defaultdict(float)
for bucket in mapping.values():
sparseFeatures[bucket] += 1.0
return dict(sparseFeatures)
# Reminder of the sample values:
# sampleOne = [(0, 'mouse'), (1, 'black')]
# sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
# sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
# EXERCICIO
# Use four buckets
sampOneFourBuckets = hashFunction(4, sampleOne, True)#<COMPLETAR>
sampTwoFourBuckets = hashFunction(4, sampleTwo, True)#<COMPLETAR>
sampThreeFourBuckets = hashFunction(4, sampleThree, True)#<COMPLETAR>
# Use one hundred buckets
sampOneHundredBuckets = hashFunction(100, sampleOne, True)#<COMPLETAR>
sampTwoHundredBuckets = hashFunction(100, sampleTwo, True)#<COMPLETAR>
sampThreeHundredBuckets = hashFunction(100, sampleThree, True)#<COMPLETAR>
print '\t\t 4 Buckets \t\t\t 100 Buckets'
print 'SampleOne:\t {0}\t\t {1}'.format(sampOneFourBuckets, sampOneHundredBuckets)
print 'SampleTwo:\t {0}\t\t {1}'.format(sampTwoFourBuckets, sampTwoHundredBuckets)
print 'SampleThree:\t {0}\t {1}'.format(sampThreeFourBuckets, sampThreeHundredBuckets)
# TEST Hash function (5a)
Test.assertEquals(sampOneFourBuckets, {2: 1.0, 3: 1.0}, 'incorrect value for sampOneFourBuckets')
Test.assertEquals(sampThreeHundredBuckets, {72: 1.0, 5: 1.0, 14: 1.0},
'incorrect value for sampThreeHundredBuckets')
# EXERCICIO
def parseHashPoint(point, numBuckets):
Create a LabeledPoint for this observation using hashing.
Args:
point (str): A comma separated string where the first value is the label and the rest are
features.
numBuckets: The number of buckets to hash to.
Returns:
LabeledPoint: A LabeledPoint with a label (0.0 or 1.0) and a SparseVector of hashed
features.
return LabeledPoint(point.split(",")[0], SparseVector(numBuckets, hashFunction(numBuckets, parsePoint(point))))
#<COMPLETAR>
numBucketsCTR = 2 ** 15
hashTrainData = rawTrainData.map(lambda x: parseHashPoint(x,numBucketsCTR))
hashTrainData.cache()
hashValidationData = rawValidationData.map(lambda x: parseHashPoint(x,numBucketsCTR))
hashValidationData.cache()
hashTestData = rawTestData.map(lambda x: parseHashPoint(x,numBucketsCTR))
hashTestData.cache()
print hashTrainData.take(1)
# TEST Creating hashed features (5b)
hashTrainDataFeatureSum = sum(hashTrainData
.map(lambda lp: len(lp.features.indices))
.take(20))
hashTrainDataLabelSum = sum(hashTrainData
.map(lambda lp: lp.label)
.take(100))
hashValidationDataFeatureSum = sum(hashValidationData
.map(lambda lp: len(lp.features.indices))
.take(20))
hashValidationDataLabelSum = sum(hashValidationData
.map(lambda lp: lp.label)
.take(100))
hashTestDataFeatureSum = sum(hashTestData
.map(lambda lp: len(lp.features.indices))
.take(20))
hashTestDataLabelSum = sum(hashTestData
.map(lambda lp: lp.label)
.take(100))
Test.assertEquals(hashTrainDataFeatureSum, 772, 'incorrect number of features in hashTrainData')
Test.assertEquals(hashTrainDataLabelSum, 24.0, 'incorrect labels in hashTrainData')
Test.assertEquals(hashValidationDataFeatureSum, 776,
'incorrect number of features in hashValidationData')
Test.assertEquals(hashValidationDataLabelSum, 16.0, 'incorrect labels in hashValidationData')
Test.assertEquals(hashTestDataFeatureSum, 774, 'incorrect number of features in hashTestData')
Test.assertEquals(hashTestDataLabelSum, 23.0, 'incorrect labels in hashTestData')
# EXERCICIO
def computeSparsity(data, d, n):
Calculates the average sparsity for the features in an RDD of LabeledPoints.
Args:
data (RDD of LabeledPoint): The LabeledPoints to use in the sparsity calculation.
d (int): The total number of features.
n (int): The number of observations in the RDD.
Returns:
float: The average of the ratio of features in a point to total features.
return (data
.map(lambda x : x.features)#<COMPLETAR>
.map(add)#<COMPLETAR>
)/(d*n*1.)
averageSparsityHash = computeSparsity(hashTrainData, numBucketsCTR, nTrain)
averageSparsityOHE = computeSparsity(OHETrainData, numCtrOHEFeats, nTrain)
print 'Average OHE Sparsity: {0:.7e}'.format(averageSparsityOHE)
print 'Average Hash Sparsity: {0:.7e}'.format(averageSparsityHash)
# TEST Sparsity (5c)
Test.assertTrue(np.allclose(averageSparsityOHE, 1.6717677e-04),
'incorrect value for averageSparsityOHE')
Test.assertTrue(np.allclose(averageSparsityHash, 1.1805561e-03),
'incorrect value for averageSparsityHash')
numIters = 500
regType = 'l2'
includeIntercept = True
# Initialize variables using values from initial model training
bestModel = None
bestLogLoss = 1e10
# EXERCICIO
stepSizes = [1, 10]
regParams = [1e-6, 1e-3]
for stepSize in stepSizes:
for regParam in regParams:
model = (<COMPLETAR>)
logLossVa = <COMPLETAR>
print ('\tstepSize = {0:.1f}, regParam = {1:.0e}: logloss = {2:.3f}'
.format(stepSize, regParam, logLossVa))
if (logLossVa < bestLogLoss):
bestModel = model
bestLogLoss = logLossVa
print ('Hashed Features Validation Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossValBase, bestLogLoss))
# TEST Logistic model with hashed features (5d)
Test.assertTrue(np.allclose(bestLogLoss, 0.4481683608), 'incorrect value for bestLogLoss')
# EXERCICIO
# Log loss for the best model from (5d)
logLossValLR0 = <COMPLETAR>
logLossTest = <COMPLETAR>
# Log loss for the baseline model
logLossTestBaseline = hashTestData.map(lambda lp: computeLogLoss(classOneFracTrain,lp.label)).mean()
print ('Hashed Features Test Log Loss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossTestBaseline, logLossTest))
# TEST Evaluate on the test set (5e)
Test.assertTrue(np.allclose(logLossTestBaseline, 0.537438),
'incorrect value for logLossTestBaseline')
Test.assertTrue(np.allclose(logLossTest, 0.455616931), 'incorrect value for logLossTest')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: (1b) Vetores Esparsos
Step2: (1c) Atributos OHE como vetores esparsos
Step4: (1d) Função de codificação OHE
Step5: (1e) Aplicar OHE em uma base de dados
Step6: Part 2
Step7: (2b) Dicionário OHE de atributos únicos
Step9: (2c) Criação automática do dicionário OHE
Step10: Part 3
Step11: (3a) Carregando e dividindo os dados
Step13: (3b) Extração de atributos
Step14: (3c) Crie o dicionário de OHE dessa base de dados
Step16: (3d) Aplicando OHE à base de dados
Step19: Visualização 1
Step21: (3e) Atributos não observados
Step22: Part 4
Step24: (4b) Log loss
Step25: (4c) Baseline log loss
Step27: (4d) Probabilidade da Predição
Step29: (4e) Avalie o modelo
Step30: (4f) log-loss da validação
Step31: Visualização 2
Step33: Parte 5
Step35: (5b) Criando hashed features
Step37: (5c) Esparsidade
Step38: (5d) Modelo logístico com hashed features
Step39: (5e) Avaliando a base de testes
|
581 | <ASSISTANT_TASK:>
Python Code:
class DoppelDict(dict):
def __setitem__(self, key, value):
super().__setitem__(key, [value] * 2)
dd = DoppelDict(one=1)
dd # 继承 dict 的 __init__ 方法忽略了我们覆盖的 __setitem__方法,'one' 值没有重复
dd['two'] = 2 # `[]` 运算符会调用我们覆盖的 __setitem__ 方法
dd
dd.update(three=3) #继承自 dict 的 update 方法也不会调用我们覆盖的 __setitem__ 方法
dd
class AnswerDict(dict):
def __getitem__(self, key):
return 42
ad = AnswerDict(a='foo')
ad['a'] # 返回 42,与预期相符
d = {}
d.update(ad) # d 是 dict 的实例,使用 ad 中的值更新 d
d['a'] #dict.update 方法忽略了 AnswerDict.__getitem__ 方法
import collections
class DoppelDict2(collections.UserDict):
def __setitem__(self, key, value):
super().__setitem__(key, [value] * 2)
dd = DoppelDict2(one=1)
dd
dd['two'] = 2
dd
dd.update(three=3)
dd
class AnswerDict2(collections.UserDict):
def __getitem__(self, key):
return 42
ad = AnswerDict2(a='foo')
ad['a']
d = {}
d.update(ad)
d['a']
d
ad # 这里是自己加的,感觉还是有点问题,但是调用时候结果符合预期
class A:
def ping(self):
print('ping', self)
class B(A):
def pong(self):
print('pong', self)
class C(A):
def pong(self):
print('PONG', self)
class D(B, C):
def ping(self):
super().ping()
print('post-ping:', self)
def pingpong(self):
self.ping()
super().ping()
self.pong()
super().pong
C.pong(self)
d = D()
d.pong() # 直接调用 d.pong() 是调用的 B 类中的版本
C.pong(d) #超类中的方法都可以直接调用,此时要把实例作为显式参数传入
D.__mro__
def ping(self):
A.ping(self) # 而不是 super().ping()
print('post-ping', self)
d = D()
d.ping() # 输出了两行,第一行是 super() A 类输出,第二行是 D 类输出
d.pingpong() #最后一个是直接找到 C 类实现 pong 方法,忽略 mro
bool.__mro__
def print_mro(cls):
print(', '.join(c.__name__ for c in cls.__mro__))
print_mro(bool)
import numbers
print_mro(numbers.Integral)
import io
print_mro(io.BytesIO)
print_mro(io.TextIOWrapper)
import tkinter
print_mro(tkinter.Text)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 原生类型的这种行为违背了面向对象编程的一个基本原则:始终应该从实例(self)所属的类开始搜索方法,即使在超类实现的类中调用也是如此。在这种糟糕的局面中,__missing__ 却能按照预期工作(3.4 节),但这是特例
Step2: 直接子类化内置类型(如 dict,list,str)容易出错,因为内置类型的方法通常忽略用户覆盖的方法,不要子类化内置类型,用户自己定义的类应该继承 collections 模块中的类,例如 UserDict, UserList, UserString,这些类,这些类做了特殊设计,因此易于扩展
Step3: 综上,本节所述的问题只是针对与 C 语言实现的内置类型内部的方法委托上,而且只影响直接继承内置类型的用户自定义类。如果子类化使用 Python 编写的类,如 UserDict 和 MutableMapping,就不会受此影响
Step4: B 和 C 都实现了 pong 方法,唯一区别就是打印不一样。在 D 上调用 d.pong 运行的是哪个 pong 方法呢? C++ 中,必须使用类名限定方法调用来避免歧义。Python 也可以,如下:
Step5: Python 能区分 d.pong() 调用的是哪个方法,因为 Python 会按照特定的顺序遍历继承图,这个顺序叫顺序解析(Method Resolution Order,MRO)。类都有一个名为 __mro__ 的属性,它的值是一个元组,按照方法解析顺序列出各个超类。从当前类一直向上,直到 object 类。D 类的 __mro__ 属性如下:
Step6: 若想把方法调用委托给超类,推荐的方法是使用内置的 super() 函数。在 Python 3 中,这种方式变得更容易了,如上面的 D 类中的 pingpong 方法所示。然而,有时可能幸亏绕过方法解析顺序,直接调用某个类的超方法 -- 这样有时更加方便。,例如,D.ping 方法可以这样写
Step7: 注意,直接在类上调用实例方法时,必须显式传入 self 参数,因为这样访问的是未绑定方法(unbound method)
Step8: 下面看看 D 在实例上调用 pingpong 方法得到的结果,如下所示:
Step9: 方法解析顺序不仅考虑继承图,还考虑子类声明中列出超类的顺序。也就是说,如果声明 D 类时把 D 声明为 class D(C, B),那么 D 类的 __mro__ 就会不一样,先搜索 C 类,再 搜索 B 类
Step10: 结束方法解析之前,我们再看看 Tkinter 复杂的多重继承:
|
582 | <ASSISTANT_TASK:>
Python Code:
# Import deriva modules and pandas DataFrame (for use in examples only)
from deriva.core import ErmrestCatalog, get_credential
from pandas import DataFrame
# Connect with the deriva catalog
protocol = 'https'
hostname = 'www.facebase.org'
catalog_number = 1
credential = None
# If you need to authenticate, use Deriva Auth agent and get the credential
# credential = get_credential(hostname)
catalog = ErmrestCatalog(protocol, hostname, catalog_number, credential)
# Get the path builder interface for this catalog
pb = catalog.getPathBuilder()
# Get some local variable handles to tables for convenience
dataset = pb.isa.dataset
experiment = pb.isa.experiment
biosample = pb.isa.biosample
replicate = pb.isa.replicate
entities = dataset.filter(dataset.released == True).entities()
len(entities)
path = dataset.alias('D').path
path.link(experiment).link(replicate)
results = path.attributes(path.D)
print(len(results))
print(results.uri)
results = path.attributes(path.D,
path.experiment.experiment_type,
path.replicate)
print(len(results))
print(results.uri)
results = path.D.attributes(path.D,
path.experiment.experiment_type,
path.replicate)
print(len(results))
print(results.uri)
path = dataset.link(experiment).filter(experiment.molecule_type == None)
print(path.uri)
print(len(path.entities()))
path = dataset.filter(dataset.description.ciregexp('palate'))
print(path.uri)
print(len(path.entities()))
path = dataset.filter( ~ (dataset.description.ciregexp('palate')) )
print(path.uri)
print(len(path.entities()))
path = dataset.link(biosample).filter(
((biosample.species == 'NCBITAXON:10090') & (biosample.anatomy == 'UBERON:0002490')))
print(path.uri)
DataFrame(path.entities())
path = dataset.link(biosample).filter(
((biosample.species == 'NCBITAXON:10090') & (biosample.anatomy == 'UBERON:0002490')) |
((biosample.specimen == 'FACEBASE:1-4GNR') & (biosample.stage == 'FACEBASE:1-4GJA')))
print(path.uri)
DataFrame(path.entities())
path = dataset.filter(dataset.release_date >= '2017-01-01') \
.link(experiment).filter(experiment.experiment_type == 'OBI:0001271') \
.link(replicate).filter(replicate.bioreplicate_number == 1)
print(path.uri)
DataFrame(path.entities())
path = dataset.link(experiment, on=(dataset.RID==experiment.dataset))
print(path.uri)
DataFrame(path.entities().fetch(limit=3))
path = dataset.link(biosample.alias('S'), on=(dataset.RID==biosample.dataset))
print(path.uri)
# Notice in between `link`s that we have to reset the context back to `dataset` so that the
# second join is also left joined from the dataset table instance.
path = dataset.link(experiment.alias('E'), on=dataset.RID==experiment.dataset, join_type='left') \
.dataset \
.link(biosample.alias('S'), on=dataset.RID==biosample.dataset, join_type='left') \
# Notice that we have to perform the attribute fetch from the context of the `path.dataset`
# table instance.
results = path.dataset.attributes(path.dataset.RID,
path.dataset.title,
path.E.experiment_type,
path.S.species)
print(results.uri)
len(results)
DataFrame(results)[:10]
species = pb.vocab.species
stage = pb.vocab.stage
# Here we have to use the container `columns_definitions` because `name` is reserved
path = biosample.alias('S').link(species).filter(species.column_definitions['name'] == 'Mus musculus')
print(path.uri)
path.S.link(stage).filter(stage.column_definitions['name'] == 'E10.5')
print(path.uri)
results = path.S.attributes(path.S.RID,
path.S.collection_date,
path.species.column_definitions['name'].alias('species'),
path.species.column_definitions['uri'].alias('species_uri'),
path.stage.column_definitions['name'].alias('stage'),
path.stage.column_definitions['uri'].alias('stage_uri'))
print(results.uri)
DataFrame(results)
# We need to import the `ArrayD` aggregate function for this example.
from deriva.core.datapath import ArrayD
# For convenience, get python objects for the additional tables.
dataset_organism = pb.isa.dataset_organism
dataset_experiment_type = pb.isa.dataset_experiment_type
species = pb.vocab.species
experiment_type = pb.vocab.experiment_type
# Start by doing a couple left outer joins on the dataset-term association tables, then link
# (i.e., inner join) the associated vocabulary term table, then reset the context back to the
# dataset table.
path = dataset.link(dataset_organism, on=dataset.id==dataset_organism.dataset_id, join_type='left') \
.link(species) \
.dataset \
.link(dataset_experiment_type, on=dataset.id==dataset_experiment_type.dataset_id, join_type='left') \
.link(experiment_type)
# Again, notice that we reset the context to the `dataset` table alias so that we will retrieve
# dataset entities based on the groupings to be defined next. For the groupby key we will use the
# dataset.RID, but for this example any primary key would work. Then we will get aggregate arrays
# of the linked vocabulary tables.
results = path.dataset.groupby(dataset.RID).attributes(
dataset.title,
ArrayD(path.species.column_definitions['name']).alias('species'),
ArrayD(path.experiment_type.column_definitions['name']).alias('experiment_type')
)
#results = path.dataset.entities()
print(results.uri)
print(len(results))
DataFrame(results.fetch(limit=20))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implicit DataPaths
Step2: DataPath-like methods
Step3: It is important to remember that the attributes(...) method returns a result set based on the entity type of the last elmenent of the path. In this example that means the number of results will be determined by the number of unique rows in the replicate table instance in the path created above, as the last link method used the replicate table.
Step4: If you want to base the results on a different entity, you can introduce a table instance alias into the end of the path, before calling the attributes function. In this case, even though we are asking for the same attributes, we are getting the set of datasets, not the set of replicates. Also, since we are including the attributes from dataset in our query, we know that we will not be seeing any duplicate rows.
Step5: Filtering Examples
Step6: Example
Step7: Example
Step8: Example
Step9: Example
Step10: Example
Step11: Linking Examples
Step12: IMPORTANT Not all tables are related by foreign key references. ERMrest does not allow arbitrary relational joins. Tables must be related by a foreign key reference in order to link them in a data path.
Step13: Example
Step14: Notice that we cannot use the alias right away in the on clause because it was not bound to the path until after the link(...) operation was performed.
Step15: We can see above that we have a full set of datasets whether or not they have any experiments with biosamples. For further evidence, we can convert to a DataFrame and look at a slice of its entries. Note that the biosample's 'species' and 'stage' attributes do not exist for some results (i.e., NaN) because those attributes did not exist for the join condition.
Step16: Faceting Examples
Step17: First, let's link samples with species and filter on the term "Mus musculus" (i.e., "mouse").
Step18: Now the context of the path is the species table instance, but we need to link from the biosample to the age stage table.
Step19: Now, the path context is the age stage table instance, but we wanted to get the entities for the biosample table. To do so, again we will reference the biosample table instance by the alias S we used. From there, we will call the entities(...) method to get the samples.
Step20: Grouping Examples
|
583 | <ASSISTANT_TASK:>
Python Code:
# Authors: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.minimum_norm import (make_inverse_resolution_matrix, get_cross_talk,
get_point_spread)
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects/'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fname_evo = data_path + '/MEG/sample/sample_audvis-ave.fif'
# read forward solution
forward = mne.read_forward_solution(fname_fwd)
# forward operator with fixed source orientations
mne.convert_forward_solution(forward, surf_ori=True,
force_fixed=True, copy=False)
# noise covariance matrix
noise_cov = mne.read_cov(fname_cov)
# evoked data for info
evoked = mne.read_evokeds(fname_evo, 0)
# make inverse operator from forward solution
# free source orientation
inverse_operator = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward, noise_cov=noise_cov, loose=0.,
depth=None)
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'MNE' # can be 'MNE' or 'sLORETA'
# compute resolution matrix for sLORETA
rm_lor = make_inverse_resolution_matrix(forward, inverse_operator,
method='sLORETA', lambda2=lambda2)
# get PSF and CTF for sLORETA at one vertex
sources = [1000]
stc_psf = get_point_spread(rm_lor, forward['src'], sources, norm=True)
stc_ctf = get_cross_talk(rm_lor, forward['src'], sources, norm=True)
# Which vertex corresponds to selected source
vertno_lh = forward['src'][0]['vertno']
verttrue = [vertno_lh[sources[0]]] # just one vertex
# find vertices with maxima in PSF and CTF
vert_max_psf = vertno_lh[stc_psf.data.argmax()]
vert_max_ctf = vertno_lh[stc_ctf.data.argmax()]
brain_psf = stc_psf.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir)
brain_psf.show_view('ventral')
brain_psf.add_text(0.1, 0.9, 'sLORETA PSF', 'title', font_size=16)
# True source location for PSF
brain_psf.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh',
color='green')
# Maximum of PSF
brain_psf.add_foci(vert_max_psf, coords_as_verts=True, scale_factor=1.,
hemi='lh', color='black')
brain_ctf = stc_ctf.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir)
brain_ctf.add_text(0.1, 0.9, 'sLORETA CTF', 'title', font_size=16)
brain_ctf.show_view('ventral')
brain_ctf.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh',
color='green')
# Maximum of CTF
brain_ctf.add_foci(vert_max_ctf, coords_as_verts=True, scale_factor=1.,
hemi='lh', color='black')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualize
Step2: CTF
|
584 | <ASSISTANT_TASK:>
Python Code:
%config InlineBackend.figure_format = "retina"
from matplotlib import rcParams
rcParams["savefig.dpi"] = 100
rcParams["figure.dpi"] = 100
rcParams["font.size"] = 20
import numpy as np
def log_prob(x, mu, cov):
diff = x - mu
return -0.5 * np.dot(diff, np.linalg.solve(cov, diff))
ndim = 5
np.random.seed(42)
means = np.random.rand(ndim)
cov = 0.5 - np.random.rand(ndim**2).reshape((ndim, ndim))
cov = np.triu(cov)
cov += cov.T - np.diag(cov.diagonal())
cov = np.dot(cov, cov)
nwalkers = 32
p0 = np.random.rand(nwalkers, ndim)
import emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, args=[means, cov])
log_prob(p0[0], means, cov)
state = sampler.run_mcmc(p0, 100)
sampler.reset()
sampler.run_mcmc(state, 10000);
import matplotlib.pyplot as plt
samples = sampler.get_chain(flat=True)
plt.hist(samples[:, 0], 100, color="k", histtype="step")
plt.xlabel(r"$\theta_1$")
plt.ylabel(r"$p(\theta_1)$")
plt.gca().set_yticks([]);
print(
"Mean acceptance fraction: {0:.3f}".format(
np.mean(sampler.acceptance_fraction)
)
)
print(
"Mean autocorrelation time: {0:.3f} steps".format(
np.mean(sampler.get_autocorr_time())
)
)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The easiest way to get started with using emcee is to use it for a project. To get you started, here’s an annotated, fully-functional example that demonstrates a standard usage pattern.
Step2: Then, we’ll code up a Python function that returns the density $p(\vec{x})$ for specific values of $\vec{x}$, $\vec{\mu}$ and $\Sigma^{-1}$. In fact, emcee actually requires the logarithm of $p$. We’ll call it log_prob
Step3: It is important that the first argument of the probability function is
Step4: and where cov is $\Sigma$.
Step5: Now that we've gotten past all the bookkeeping stuff, we can move on to
Step6: Remember how our function log_prob required two extra arguments when it
Step7: If we didn't provide any
Step8: You'll notice that I saved the final position of the walkers (after the
Step9: The samples can be accessed using the {func}EnsembleSampler.get_chain method.
Step10: Another good test of whether or not the sampling went well is to check
Step11: and the integrated autocorrelation time (see the {ref}autocorr tutorial for more details)
|
585 | <ASSISTANT_TASK:>
Python Code:
## from __future__ import print_function # uncomment if using python 2
from os.path import join
import pandas as pd
import numpy as np
from datetime import datetime
%matplotlib inline
url = 'http://casas.wsu.edu/datasets/twor.2009.zip'
zipfile = url.split('/')[-1]
dirname = '.'.join(zipfile.split('.')[:2])
filename = join(dirname, 'data')
print(' url: {}'.format(url))
print(' zipfile: {}'.format(zipfile))
print(' dirname: {}'.format(dirname))
print('filename: {}'.format(filename))
#from subprocess import call
#call(('wget', url));
#call(('unzip', zipfile));
from IPython.display import Image
Image("twor.2009/sensorlayout2.png")
column_headings = ('date', 'time', 'sensor', 'value', 'annotation', 'state')
df = pd.read_csv(
filename,
delim_whitespace=True, # Note, the file is delimited by both space and tab characters
names=column_headings
)
df.head()
df.columns
#df.sensor
df.dtypes
df.time[0]
df['datetime'] = pd.to_datetime(df[['date', 'time']].apply(lambda row: ' '.join(row), axis=1))
#df.ix[df.date.str.startswith('22009'), 'date'] = '2009-02-03'
df.loc[df.date.str.startswith('22009'), 'date'] = '2009-02-03'
df['datetime'] = pd.to_datetime(df[['date', 'time']].apply(lambda row: ' '.join(row), axis=1))
df.dtypes
df = df[['datetime', 'sensor', 'value', 'annotation', 'state']]
df.set_index('datetime', inplace=True)
df.head()
df.sensor.unique()
df.annotation.unique()
df.state.unique()
df.value.unique()
categorical_inds = df.sensor.str.match(r"^[^A]")
df_categorical = df.loc[categorical_inds][['sensor', 'value']]
df_categorical.head()
df_categorical.value.value_counts()
for val in ('O', 'OF', 'OFFF', 'ONF'):
df_categorical.loc[df_categorical.value == val, 'value'] = 'OFF';
df_categorical.value.value_counts()
df_categorical.loc[:, 'sensor_value'] = df_categorical[['sensor', 'value']].apply(
lambda row: '{}_{}'.format(*row).lower(),
axis=1
)
df_categorical.head()
df_categorical_exploded = pd.get_dummies(df_categorical.sensor_value)
df_categorical_exploded.head()
df_categorical_exploded.values
df_categorical_exploded['m35_off'].plot(figsize=(10,5));
kitchen_columns = ['m{}_on'.format(ii) for ii in (15,16,17,18,19,51)]
start = datetime(2009, 2, 2, 10)
end = datetime(2009, 2, 2, 11)
df_categorical_exploded[(df_categorical_exploded.index > start) & (df_categorical_exploded.index < end)][kitchen_columns].plot(figsize=(10,15), subplots=True);
start = datetime(2009, 2, 2, 15)
end = datetime(2009, 2, 2, 17)
df_categorical_exploded[(df_categorical_exploded.index > start) & (df_categorical_exploded.index < end)][kitchen_columns].plot(figsize=(10,15), subplots=True);
numeric_inds = df.sensor.str.startswith("A")
df_numeric = df.loc[numeric_inds][['sensor', 'value']]
df_numeric.head()
np.asarray(df_numeric.value)
df_numeric.value.astype(float)
f_inds = df_numeric.value.str.endswith('F')
df_numeric.loc[f_inds, 'value'] = df_numeric.loc[f_inds, 'value'].str[:-1]
df_numeric.loc[f_inds]
df_numeric.value = df_numeric.value.map(float)
unique_keys = df_numeric.sensor.unique()
unique_keys
df_numeric = pd.merge(df_numeric[['value']], pd.get_dummies(df_numeric.sensor), left_index=True, right_index=True)
df_numeric.head()
for key in unique_keys:
df_numeric[key] *= df_numeric.value
df_numeric = df_numeric[unique_keys]
# Print a larger sample of the data frame
df_numeric
#df_numeric.value.groupby(df_numeric.sensor).plot(kind='kde', legend=True, figsize=(10,5))
df_numeric[unique_keys].plot(kind='kde', legend=True, figsize=(10,5), subplots=True);
df_categorical_exploded.head()
df_numeric.head()
df_joined = pd.merge(
df_categorical_exploded,
df_numeric,
left_index=True,
right_index=True,
how='outer'
)
df_joined.head()
annotation_inds = pd.notnull(df.annotation)
df_annotation = df.loc[annotation_inds][['annotation', 'state']]
# There are some duplicated indices. Remove with
df_annotation = df_annotation.groupby(level=0).first()
df_annotation.head()
for annotation, group in df_annotation.groupby('annotation'):
counts = group.state.value_counts()
if counts.begin == counts.end:
print(' {}: equal counts ({}, {})'.format(
annotation,
counts.begin,
counts.end
))
else:
print(' *** WARNING {}: inconsistent annotation counts with {} begin and {} end'.format(
annotation,
counts.begin,
counts.end
))
df_annotation.loc[df_annotation.annotation == 'R1_Work']
def filter_annotations(anns):
left = iter(anns.index[:-1])
right = iter(anns.index[1:])
inds = []
for ii, (ll, rr) in enumerate(zip(left, right)):
try:
l = anns.loc[ll]
r = anns.loc[rr]
if l.state == 'begin' and r.state == 'end':
inds.extend([ll, rr])
except ValueError:
print(ii)
print(l)
print()
print(r)
print()
print()
asdf
return anns.loc[inds, :]
dfs = []
for annotation, group in df_annotation.groupby('annotation'):
print('{:>30} - {}'.format(annotation, group.size))
dfs.append(filter_annotations(group))
df_annotation_exploded = pd.get_dummies(df_annotation.annotation)
df_annotation_exploded.head(50)
paired = pd.concat(dfs)
left = paired.index[:-1:2]
right = paired.index[1::2]
print(df_annotation_exploded.mean())
for ll, rr in zip(left, right):
l = paired.loc[ll]
r = paired.loc[rr]
assert l.annotation == r.annotation
annotation = l.annotation
begin = l.name
end = r.name
# Another advantage of using datetime index: can slice with time ranges
df_annotation_exploded.loc[begin:end, annotation] = 1
df_annotation_exploded.head(50)
dataset = pd.merge(
df_joined,
df_annotation_exploded,
left_index=True,
right_index=True,
how='outer'
)
data_cols = df_joined.columns
annotation_cols = df_annotation_exploded.columns
dataset[data_cols] = dataset[data_cols].fillna(0)
dataset[annotation_cols] = dataset[annotation_cols].ffill()
dataset.head()
dataset[data_cols].head()
dataset[annotation_cols].head()
dataset.loc[dataset.Meal_Preparation == 1][kitchen_columns + ['AD1-A']].head()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up various parameters and variables that will be used in this script
Step2: Download the dataset, and unzip it using the following commands in shell
Step3: Create a datetime column (currently date and time are separate, and are also strings). The following
Step4: By default, the new column is added to the end of the columns. However, since the date and time are now captured by the datetime column, we no longer need the date and time columns. Additionally, we will see how it is useful to have the datetime column as an index variable
Step5: We can now inspect the unique sensor, activity and value values
Step6: We can see here that the unique values contains both numbers (eg 2.82231) and strings (ON, OFF). This is because the data recorded by all sensors is contained in one column. The next few steps will be to extract the non-numeric (ie categorical) data from the column.
Step7: Our ambition is to create a matrix where each column corresponds to the combinations of sensors and values that are availabele in the data. For example, one column would correspond to the state of M35 being ON, and another column will correspond to M35 being OFF. The reason for having two columns to represent the ON and OFF states is that diffferent information may be achieved by the combinations. For example, a sensor turning on may correspond to somebody entering a room, but correspondingly, a sensor turning off may correspond to somebody leaving the room.
Step8: And if desired, we can get a matrix form of the data with the values property
Step9: Numeric columns
Step10: Note, however, that since the value data was obtained from file that it is still in string format. We can convert these str data types to floating point data types easily as follows
Step11: We can now map all data to floating point numbers
Step12: There are only three numeric sensor types, as we can see with the unique member function
Step13: Create some new columns for the three sensors (AD1-A, AD1-B, and AD1-C), and merge with the original data frame
Step14: Merging categorical and numeric data together
Step15: We will use the pandas.merge function to join the two dataframes. In this case, we must use more of its functionality. We will merge on the index of the categorical and numeric dataframes. However, since none of these timestamps are shared (refer to the original data frame) we will do the merge with an "outer" join.
Step16: Note, that in merging the dataframes, we now have a time-ordered dataframe. This is one of the advantages of using datetimes as the index type in dataframes since pandas will understand precisely how to merge the two datasets.
Step17: It's important to ensure that the expected format of the data is consistent in this dataset. This means that there should be
Step18: We can see here that two activities have inconsistent numbers of begin and end statements for the activities. Interestingly, they both have more end conditions than begin conditions. In some sense, this is a less critical bug than having more begin statements.
Step19: Querying consecutive annotations, we can print the pair of annotations that have
Step20: Create the output dataframe
Step21: Merging the full dataset
|
586 | <ASSISTANT_TASK:>
Python Code:
!pip install -q git+https://github.com/tensorflow/docs
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow_docs.vis import embed
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import imageio
batch_size = 64
num_channels = 1
num_classes = 10
image_size = 28
latent_dim = 128
# We'll use all the available examples from both the training and test
# sets.
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_labels = np.concatenate([y_train, y_test])
# Scale the pixel values to [0, 1] range, add a channel dimension to
# the images, and one-hot encode the labels.
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
all_labels = keras.utils.to_categorical(all_labels, 10)
# Create tf.data.Dataset.
dataset = tf.data.Dataset.from_tensor_slices((all_digits, all_labels))
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
print(f"Shape of training images: {all_digits.shape}")
print(f"Shape of training labels: {all_labels.shape}")
generator_in_channels = latent_dim + num_classes
discriminator_in_channels = num_channels + num_classes
print(generator_in_channels, discriminator_in_channels)
# Create the discriminator.
discriminator = keras.Sequential(
[
keras.layers.InputLayer((28, 28, discriminator_in_channels)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
# Create the generator.
generator = keras.Sequential(
[
keras.layers.InputLayer((generator_in_channels,)),
# We want to generate 128 + num_classes coefficients to reshape into a
# 7x7x(128 + num_classes) map.
layers.Dense(7 * 7 * generator_in_channels),
layers.LeakyReLU(alpha=0.2),
layers.Reshape((7, 7, generator_in_channels)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
class ConditionalGAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super(ConditionalGAN, self).__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
self.gen_loss_tracker = keras.metrics.Mean(name="generator_loss")
self.disc_loss_tracker = keras.metrics.Mean(name="discriminator_loss")
@property
def metrics(self):
return [self.gen_loss_tracker, self.disc_loss_tracker]
def compile(self, d_optimizer, g_optimizer, loss_fn):
super(ConditionalGAN, self).compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
def train_step(self, data):
# Unpack the data.
real_images, one_hot_labels = data
# Add dummy dimensions to the labels so that they can be concatenated with
# the images. This is for the discriminator.
image_one_hot_labels = one_hot_labels[:, :, None, None]
image_one_hot_labels = tf.repeat(
image_one_hot_labels, repeats=[image_size * image_size]
)
image_one_hot_labels = tf.reshape(
image_one_hot_labels, (-1, image_size, image_size, num_classes)
)
# Sample random points in the latent space and concatenate the labels.
# This is for the generator.
batch_size = tf.shape(real_images)[0]
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
random_vector_labels = tf.concat(
[random_latent_vectors, one_hot_labels], axis=1
)
# Decode the noise (guided by labels) to fake images.
generated_images = self.generator(random_vector_labels)
# Combine them with real images. Note that we are concatenating the labels
# with these images here.
fake_image_and_labels = tf.concat([generated_images, image_one_hot_labels], -1)
real_image_and_labels = tf.concat([real_images, image_one_hot_labels], -1)
combined_images = tf.concat(
[fake_image_and_labels, real_image_and_labels], axis=0
)
# Assemble labels discriminating real from fake images.
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0
)
# Train the discriminator.
with tf.GradientTape() as tape:
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
# Sample random points in the latent space.
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
random_vector_labels = tf.concat(
[random_latent_vectors, one_hot_labels], axis=1
)
# Assemble labels that say "all real images".
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
fake_images = self.generator(random_vector_labels)
fake_image_and_labels = tf.concat([fake_images, image_one_hot_labels], -1)
predictions = self.discriminator(fake_image_and_labels)
g_loss = self.loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, self.generator.trainable_weights)
self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
# Monitor loss.
self.gen_loss_tracker.update_state(g_loss)
self.disc_loss_tracker.update_state(d_loss)
return {
"g_loss": self.gen_loss_tracker.result(),
"d_loss": self.disc_loss_tracker.result(),
}
cond_gan = ConditionalGAN(
discriminator=discriminator, generator=generator, latent_dim=latent_dim
)
cond_gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
loss_fn=keras.losses.BinaryCrossentropy(from_logits=True),
)
cond_gan.fit(dataset, epochs=20)
# We first extract the trained generator from our Conditiona GAN.
trained_gen = cond_gan.generator
# Choose the number of intermediate images that would be generated in
# between the interpolation + 2 (start and last images).
num_interpolation = 9 # @param {type:"integer"}
# Sample noise for the interpolation.
interpolation_noise = tf.random.normal(shape=(1, latent_dim))
interpolation_noise = tf.repeat(interpolation_noise, repeats=num_interpolation)
interpolation_noise = tf.reshape(interpolation_noise, (num_interpolation, latent_dim))
def interpolate_class(first_number, second_number):
# Convert the start and end labels to one-hot encoded vectors.
first_label = keras.utils.to_categorical([first_number], num_classes)
second_label = keras.utils.to_categorical([second_number], num_classes)
first_label = tf.cast(first_label, tf.float32)
second_label = tf.cast(second_label, tf.float32)
# Calculate the interpolation vector between the two labels.
percent_second_label = tf.linspace(0, 1, num_interpolation)[:, None]
percent_second_label = tf.cast(percent_second_label, tf.float32)
interpolation_labels = (
first_label * (1 - percent_second_label) + second_label * percent_second_label
)
# Combine the noise and the labels and run inference with the generator.
noise_and_labels = tf.concat([interpolation_noise, interpolation_labels], 1)
fake = trained_gen.predict(noise_and_labels)
return fake
start_class = 1 # @param {type:"slider", min:0, max:9, step:1}
end_class = 5 # @param {type:"slider", min:0, max:9, step:1}
fake_images = interpolate_class(start_class, end_class)
fake_images *= 255.0
converted_images = fake_images.astype(np.uint8)
converted_images = tf.image.resize(converted_images, (96, 96)).numpy().astype(np.uint8)
imageio.mimsave("animation.gif", converted_images, fps=1)
embed.embed_file("animation.gif")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports
Step2: Constants and hyperparameters
Step3: Loading the MNIST dataset and preprocessing it
Step4: Calculating the number of input channel for the generator and discriminator
Step5: Creating the discriminator and generator
Step6: Creating a ConditionalGAN model
Step7: Training the Conditional GAN
Step8: Interpolating between classes with the trained generator
Step9: Here, we first sample noise from a normal distribution and then we repeat that for
|
587 | <ASSISTANT_TASK:>
Python Code:
import matplotlib as mpl
mpl
# I normally prototype my code in an editor + ipy terminal.
# In those cases I import pyplot and numpy via
import matplotlib.pyplot as plt
import numpy as np
# In Jupy notebooks we've got magic functions and pylab gives you pyplot as plt and numpy as np
# %pylab
# Additionally, inline will let you plot inline of the notebook
# %pylab inline
# And notebook, as I've just found out gives you some resizing etc... tools inline.
# %pylab notebook
y = np.ones(10)
for x in range(2,10):
y[x] = y[x-2] + y[x-1]
plt.plot(y)
plt.title('This story')
plt.show()
print('I can not run this command until I close the window because interactive mode is turned off')
%pylab inline
# Set default figure size for your viewing pleasure...
pylab.rcParams['figure.figsize'] = (10.0, 7.0)
x = np.linspace(0,5,100)
y = np.random.exponential(1./3., 100)
# Make a simply plot of x vs y, Set the points to have an 'x' marker.
plt.plot(x,y, c='r',marker='x')
# Label our x and y axes and give the plot a title.
plt.xlabel('Sample time (au)')
plt.ylabel('Exponential Sample (au)')
plt.title('See the trend?')
x = np.linspace(0,6.,1000.)
# Alpha = 0.5, color = red, linstyle = dotted, linewidth = 3, label = x
plt.plot(x, x, alpha = 0.5, c = 'r', ls = ':', lw=3., label='x')
# Alpha = 0.5, color = blue, linstyle = solid, linewidth = 3, label = x**(3/2)
# Check out the LaTeX!
plt.plot(x, x**(3./2), alpha = 0.5, c = 'b', ls = '-', lw=3., label=r'x$^{3/2}$')
# And so on...
plt.plot(x, x**2, alpha = 0.5, c = 'g', ls = '--', lw=3., label=r'x$^2$')
plt.plot(x, np.log(1+x)*20., alpha = 0.5, c = 'c', ls = '-.', lw=3., label='log(1+x)')
# Add a legend (loc gives some options about where the legend is placed)
plt.legend(loc=2)
N = 50
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses
# size = area variable, c = colors variable
x = plt.scatter(x, y, s=area, c=colors, alpha=0.4)
plt.show()
N=10000
values1 = np.random.normal(25., 3., N)
values2 = np.random.normal(33., 8., N/7)
valuestot = np.concatenate([values1,values2])
binedges = np.arange(0,101,1)
bincenters = (binedges[1:] + binedges[:-1])/2.
# plt.hist gives you the ability to histogram and plot all in one command.
x1 = plt.hist(valuestot, bins=binedges, color='g', alpha=0.5, label='total')
x2 = plt.hist(values2, bins=binedges, color='r', alpha=0.5, histtype='step', linewidth=3, label='values 1')
x3 = plt.hist(values1, bins=binedges, color='b', alpha=0.5, histtype='step', linewidth=3, label='values 2')
plt.legend(loc=7)
fig = plt.figure(figsize=(10,6))
# Make an axes as if the figure had 1 row, 2 columns and it would be the first of the two sub-divisions.
ax1 = fig.add_subplot(121)
plot1 = ax1.plot([1,2,3,4,1,0])
ax1.set_xlabel('time since start of talk')
ax1.set_ylabel('interest level')
ax1.set_xbound([-1.,6.])
# Make an axes as if the figure had 1 row, 2 columns and it would be the second of the two sub-divisions.
ax2 = fig.add_subplot(122)
plot2 = ax2.scatter([1,1,1,2,2,2,3,3,3,4,4,4], [1,2,3]*4)
ax2.set_title('A commentary on chairs with wheels')
print(plot1)
print(plot2)
fig2 = plt.figure(figsize=(10,10))
ax1 = fig2.add_axes([0.1,0.1,0.8,0.4])
histvals = ax1.hist(np.random.exponential(0.5,5000), bins=np.arange(0,5, 0.1))
ax1.set_xlabel('Sampled Value')
ax1.set_ylabel('Counts per bin')
ax2 = fig2.add_axes([0.3,0.55, 0.7, 0.45])
ax2.plot([13,8,5,3,2,1,1],'r:',lw=3)
import scipy.stats as stats
# With subplots we can make all of the axes at ones.
# The axes are return in a list of lists.
f, [[ax0, ax1], [ax2, ax3]] = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=False)
# Remove the space between the top and bottom rows of plots
# wspace would do the same for left and right columns...
f.subplots_adjust(hspace=0)
ax0.plot(range(50,250), np.exp(np.arange(50,250) / 23.) )
ax2.scatter(np.random.normal(125,27,100), np.random.binomial(200,0.4,100))
ax1.plot(range(0,300), np.random.exponential(0.5,300), 'g')
ax3.plot(range(0,300), stats.norm.pdf(np.arange(0,300),150, 30) , 'g')
plt.colormaps()
cmap0 = plt.cm.cubehelix
cmap1 = plt.cm.Accent
cmap2 = plt.cm.Set1
cmap3 = plt.cm.Spectral
colmaps = [cmap0,cmap1,cmap2,cmap3]
Ncolors = 12
col0 = cmap0(np.linspace(0,1,Ncolors))
f, [[ax0, ax1], [ax2, ax3]] = plt.subplots(nrows=2, ncols=2, figsize=(13,13))
x = np.linspace(0.01,100,1000)
for idx, axis in enumerate([ax0,ax1,ax2,ax3]):
colormap = colmaps[idx]
colors = colormap(np.linspace(0,1,Ncolors))
axis.set_title(colormap.name)
for val in range(Ncolors):
axis.plot(x,x**(1.0 + 0.1 * val), c=colors[val], lw=3, label=val)
axis.loglog()
# Lets look at a two distributions on an exponential noise background...
Nnoise = 475000
Nnorm1 = 10000
Nnorm2 = 15000
# Uniform noise in x, exponential in y
xnoise = np.random.rand(Nnoise) * 100
ynoise = np.random.exponential(250,475000)
# Uniform in X, normal in Y
xnorm1 = np.random.rand(Nnorm1) * 100
ynorm1 = np.random.normal(800, 50, Nnorm1)
# Normal in X and Y
xnorm2 = np.random.normal(50, 30, 15000)
ynorm2 = np.random.normal(200, 25, 15000)
xtot = np.concatenate([xnoise, xnorm1, xnorm2])
ytot = np.concatenate([ynoise, ynorm1, ynorm2])
xbins = np.arange(0,100,10)
ybins = np.arange(0,1000,10)
H, xe, ye = np.histogram2d(xtot, ytot, bins=[xbins, ybins])
X,Y = np.meshgrid(ybins,xbins)
fig4 = plt.figure(figsize=(13,8))
ax1 = fig4.add_axes([0.1,0.1,0.35,0.4])
ax2 = fig4.add_axes([0.5,0.1,0.35,0.4])
pcolplot = ax1.pcolor(X, Y, H, cmap=cm.GnBu)
ax1.set_title('Linear Color Scale')
plt.colorbar(pcolplot, ax=ax1)
from matplotlib.colors import LogNorm
pcolplot2 = ax2.pcolor(X, Y, H, norm=LogNorm(vmin=H.min(), vmax=H.max()), cmap=cm.GnBu)
ax2.set_title('Log Color Scale')
plt.colorbar(pcolplot2, ax=ax2)
xvals = np.arange(0,120,0.1)
# Define a few functions to use
f1 = lambda x: 50. * np.exp(-x/20.)
f2 = lambda x: 30. * stats.norm.pdf(x, loc=25,scale=5)
f3 = lambda x: 200. * stats.norm.pdf(x,loc=40,scale=10)
f4 = lambda x: 25. * stats.gamma.pdf(x, 8., loc=45, scale=4.)
# Normalize to define PDFs
pdf1 = f1(xvals) / (f1(xvals)).sum()
pdf2 = f2(xvals) / (f2(xvals)).sum()
pdf3 = f3(xvals) / (f3(xvals)).sum()
pdf4 = f4(xvals) / (f4(xvals)).sum()
# Combine them and normalize again
pdftot = pdf1 + pdf2 + pdf3 + pdf4
pdftot = pdftot / pdftot.sum()
fig5 = plt.figure(figsize=(11,8))
ax3 = fig5.add_axes([0.1,0.1,0.9,0.9])
# Plot the pdfs, and the total pdf
lines = ax3.plot(xvals, pdf1,'r', xvals,pdf2,'b', xvals,pdf3,'g', xvals,pdf4,'m')
lines = ax3.plot(xvals, pdftot, 'k', lw=5.)
# Calculate the mean
mean1 = (xvals * pdf1).sum()
mean2 = (xvals * pdf2).sum()
mean3 = (xvals * pdf3).sum()
mean4 = (xvals * pdf4).sum()
fig6 = plt.figure(figsize=(11,8))
ax4 = fig6.add_axes([0.1,0.1,0.9,0.9])
# Plot the total PDF
ax4.plot(xvals, pdftot, 'k', lw=5.)
# Grabe the limits of the y-axis for defining the extent of our vertical lines
axmin, axmax = ax4.get_ylim()
# Draw vertical lines. (x location, ymin, ymax, color, linestyle)
ax4.vlines(mean1, axmin, axmax, 'r',':')
ax4.vlines(mean2, axmin, axmax, 'b',':')
ax4.vlines(mean3, axmin, axmax, 'g',':')
ax4.vlines(mean4, axmin, axmax, 'm',':')
# Add some text to figure to describe the curves
# (xloc, yloc, text, color, fontsize, rotation, ...)
ax4.text(mean1-18, 0.0028, r'mean of $f_1(X)$', color='r', fontsize=18)
ax4.text(mean2+1, 0.0005, r'mean of $f_2(X)$', color='b', fontsize=18)
ax4.text(mean3+1, 0.0002, r'mean of $f_3(X)$', color='g', fontsize=18)
ax4.text(mean4+1, 0.0028, r'mean of $f_4(X)$', color='m', fontsize=18, rotation=-25)
temp = ax4.text(50, 0.0009, r'$f_{tot}(X)$', color='k', fontsize=22)
# Compute CDFs
cdf1 = pdf1.cumsum()
cdf2 = pdf2.cumsum()
cdf3 = pdf3.cumsum()
cdf4 = pdf4.cumsum()
cdftot = pdftot.cumsum()
fig7 = plt.figure(figsize=(11,8))
ax7 = fig7.add_axes([0.1,0.1,0.9,0.9])
# Plot them
ax7.plot(xvals, cdftot, 'k', lw=3)
ax7.plot(xvals, cdf1, 'r', ls=':', lw=2)
ax7.plot(xvals, cdf2, 'b', ls=':', lw=2)
ax7.plot(xvals, cdf3, 'g', ls=':', lw=2)
ax7.plot(xvals, cdf4, 'm', ls=':', lw=2)
# Force the y limits to be (0,1)
ax7.set_ylim(0,1.)
# Add 50% and 90% lines.
ax7.hlines(0.5, 0, 120., 'k', '--', lw=2)
ax7.hlines(0.95, 0, 120., 'k', '--', lw=3)
# Add some text
ax7.set_title('CDFs of dists 1-4 and total with 50% and 95% bounds')
ax7.text(110, 0.46, r'$50\%$ ', color='k', fontsize=20)
temp = ax7.text(110, 0.91, r'$95\%$ ', color='k', fontsize=20)
import matplotlib.image as mpimg
img=mpimg.imread('Tahoe.png')
imgplot = plt.imshow(img)
f, [ax0,ax1,ax2] = plt.subplots(nrows=3, ncols=1, figsize=(10,15))
f.subplots_adjust(hspace=0.05)
for ax in [ax0,ax1,ax2]:
# ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticklabels([])
ax0.imshow(img[:,:,0], cmap=cm.Spectral)
ax1.imshow(img[:,:,1], cmap=cm.Spectral)
ax2.imshow(img[:,:,2], cmap=cm.Spectral)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Where's the plot to this story?
Step2: Interactive mode on or off is a preference. See how it works for your workflow.
Step3: Some Simple Plots
Step4: Lots of kwargs to modify your plot
Step5: Nice scatter Example from the MPL website. Note that the kwargs are different here. Quick inspection of the docs is handy (shift + tab in jupy notebooks).
Step6: Loads of examples and plot types in the Matplotlib.org Gallery
Step7: fig.add_axes is another option for adding axes as you wish.
Step8: plt.subplots gives an alternative route, creating all of the axes at once. Less flexability since you'll end up with a grid of subplots, but thats exactly what you want a lot of the time.
Step9: Colors and colormaps
Step10: Colormap normalization can also be pretty handy!
Step11: Lines and text
Step12: Lets use vertical lines to represent the means of our distributions instead of plotting all of them.
Step13: We can do the same with horizontal lines
Step14: Displaying images
Step15: Lets plot the R, G, and B components of this image.
|
588 | <ASSISTANT_TASK:>
Python Code:
# Required imports and setup
from rootpy.plotting import Hist, Canvas, set_style
import rootpy.plotting.root2matplotlib as rplt
from root_numpy import array2hist
from IPython.parallel import Client
client = Client('ipcontroller-client.json', sshserver="--redacted--.unimelb.edu.au")
set_style('ATLAS')
# check that the cluster is up and running (expect a response from three laptops)
client[:].apply_sync(lambda: "Hello world!")
def analysis(filename):
import csv
from math import sqrt
from rootpy.plotting import Hist
from root_numpy import hist2array
h = Hist(1500, 0.5, 120)
with open(filename, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
header = reader.next()
column = dict([(header[i], i) for i in range(len(header))])
for row in reader:
charge1 = int(row[column['Q1']])
charge2 = int(row[column['Q2']])
opposite_charge = charge1 * charge2 == -1
if not opposite_charge:
continue
E1 = float(row[column['E1']])
px1 = float(row[column['px1']])
py1 = float(row[column['py1']])
pz1 = float(row[column['pz1']])
E2 = float(row[column['E2']])
px2 = float(row[column['px2']])
py2 = float(row[column['py2']])
pz2 = float(row[column['pz2']])
E = E1 + E2
px = px1 + px2
py = py1 + py2
pz = pz1 + pz2
m2 = E**2 - (px**2 + py**2 + pz**2)
if m2 < 0:
# Bad event...
continue
h.Fill(sqrt(m2))
# Return the ROOT histogram as a numpy array
return hist2array(h)
def plot(arrays, hist):
# sum the histograms from each worker
total_hist = sum([array2hist(array, hist.Clone()) for array in arrays])
# plot!
canvas = Canvas()
canvas.SetLogy()
canvas.SetLogx()
total_hist.xaxis.title = 'M_{#mu#mu} [GeV]'
total_hist.yaxis.title = 'Events'
total_hist.Draw('hist')
return canvas
%time local_array = analysis('MuRun2010B.csv')
plot([local_array], Hist(1500, 0.5, 120))
%time arrays = client[:].apply_sync(analysis, 'events.csv')
plot(arrays, Hist(1500, 0.5, 120))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Write the analysis code
Step2: Execute the analysis on a single local core
Step3: That took about 1.5 minutes... Looking at the plot of the dimuon invariant mass below, we see the usual suspects. How many can you name?
Step4: Distributed execution
Step5: That took about 30 seconds. That's three times as fast as the local execution above. Of course we should expect the same plot shown below
|
589 | <ASSISTANT_TASK:>
Python Code:
from pynq import Overlay
from pynq.iop import Pmod_OLED
from pynq.iop import PMODA
ol = Overlay("base.bit")
ol.download()
pmod_oled = Pmod_OLED(PMODA)
pmod_oled.clear()
pmod_oled.write('Welcome to the\nPynq-Z1 board!')
pmod_oled.clear()
pmod_oled.write('Python and Zynq\nproductivity & performance')
def get_ip_address():
ipaddr_slist = !hostname -I
ipaddr = (ipaddr_slist.s).split(" ")[0]
return str(ipaddr)
pmod_oled.clear()
pmod_oled.write(get_ip_address())
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You should now see the text output on the OLED, so let's try another message
Step2: Finally, capture some text from IPython shell calls and print out to OLED
|
590 | <ASSISTANT_TASK:>
Python Code:
%pylab inline
import numpy as np
import matplotlib.pyplot as plt
#Import the curve fitter from the scipy optimize package
from scipy.optimize import curve_fit
#create the data to be plotted
x = np.linspace(0, 2*np.pi, 300)
y = np.sin(x)
#Now plot it
plt.plot(x,y,'b--')
plt.plot(x[110:180], y[110:180]) #subset of points that we will fit
plt.show()
#Define the fit function
def func(x, m, b):
return (m*x + b)
# Make initial guess at parameters, slope then y-intercept
p0 = [-1.0, 2.0]
#Call the curve fitter and have it return the optimized parameters (popt) and covariance matrix (pcov)
popt, pcov = curve_fit(func, x[110:180], y[110:180], p0)
#Compute the parameter uncertainties from the covariance matrix
punc = np.zeros(len(popt))
for i in np.arange(0,len(popt)):
punc[i] = np.sqrt(pcov[i,i])
#Print the result
print "optimal parameters: ", popt
print "uncertainties of parameters: ", punc
#plot the fit result with the data
fitresult = func(x,popt[0],popt[1])
plt.plot(x,y,'b--',label="data")
plt.plot(x,fitresult,'g',label="fit")
plt.legend(loc="best")
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create an array of points that represent a sine curve between 0 and 2$\pi$.
Step2: Plot the data over the full range as a dashed line and then overlay the section of the data that looks roughly linear, which we will try to fit with a straight line.
Step3: We need to define the function that we will try to fit to this data. In this example, we will use the equation for a straight line, which has two parameters, the slope $m$ and the y-intercept $b$.
Step4: Before we can fit the data we need to make an initial guess at the slope and y-intercept which we can pass to the optimizer. It will start with those values and then keep trying small variations on those values until it minimizes the linear least squared difference between the data points we are trying to fit and points on the line described by those parameters.
Step5: Now call the optimizer. It will return two arrays. The first is the set of optimized parameters and the second is a matrix that shows the covariance between the parameters. Don't worry about the details of the covariance matrix for now.
Step6: The diagonal elements of the covariance matrix are related to the uncertainties in the optimized fit parameters - they are the square of the uncertainties, actually. Any off-diagonal elements that are non-zero tell you how correlated the parameters are. Values close to zero mean the parameters are totally uncorrelated to one another. Values close to one tell you that the parameters are tightly correlated, meaning that changing the value of one of them makes the value of the other one change by a lot. In the case of a linear fit, changing the slope of the line will change where that line intersects the y-axis, so you would expect a high degree of correlation between the slope and the y-intercept. When you are trying to understand how well a theoretical model matches data and extract parameters with some physical meaning, analyzing the covariance matrix if very important. For now, we just want the best-fit parameters and their uncertainties.
Step7: Let's look at how the fit compares to the data by plotting them on top of one another. The fitresult array extends over the full range in x. You can see that a linear fit in the range of interest is pretty good, but it deviates quite significantly from the data (the sine curve) oustide that range.
|
591 | <ASSISTANT_TASK:>
Python Code:
def aquire_audio_data():
D, T = 4, 10000
y = np.random.normal(size=(D, T))
return y
y = aquire_audio_data()
start = time.perf_counter()
x = wpe(y)
end = time.perf_counter()
print(f"Time: {end-start}")
channels = 8
sampling_rate = 16000
delay = 3
iterations = 5
taps = 10
file_template = 'AMI_WSJ20-Array1-{}_T10c0201.wav'
signal_list = [
sf.read(str(project_root / 'data' / file_template.format(d + 1)))[0]
for d in range(channels)
]
y = np.stack(signal_list, axis=0)
IPython.display.Audio(y[0], rate=sampling_rate)
x = wpe(y, iterations=iterations)
IPython.display.Audio(x[0], rate=sampling_rate)
stft_options = dict(
size=512,
shift=128,
window_length=None,
fading=True,
pad=True,
symmetric_window=False
)
Y = stft(y, **stft_options).transpose(2, 0, 1)
X = stft(x, **stft_options).transpose(2, 0, 1)
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(20, 10))
im1 = ax1.imshow(20 * np.log10(np.abs(Y[ :, 0, 200:400])), origin='lower')
ax1.set_xlabel('frames')
_ = ax1.set_title('reverberated')
im2 = ax2.imshow(20 * np.log10(np.abs(X[ :, 0, 200:400])), origin='lower', vmin=-120, vmax=0)
ax2.set_xlabel('frames')
_ = ax2.set_title('dereverberated')
cb = fig.colorbar(im2)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example with real audio recordings
Step2: Audio data
Step3: iterative WPE
Step4: Power spectrum
|
592 | <ASSISTANT_TASK:>
Python Code:
try:
from google.cloud import aiplatform
except ImportError:
!pip3 install -U google-cloud-aiplatform --user
print("Please restart the kernel and re-run the notebook.")
import os
import shutil
import pandas as pd
import tensorflow as tf
from datetime import datetime
from matplotlib import pyplot as plt
from tensorflow import keras
from google.cloud import aiplatform
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, DenseFeatures
from tensorflow.keras.callbacks import TensorBoard
print(tf.__version__)
%matplotlib inline
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# For Bash Code
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
%%bash
gcloud config set project $PROJECT
gcloud config set ai/region $REGION
!ls -l ../data/taxi-traffic*
!head ../data/taxi-traffic*
CSV_COLUMNS = [
'fare_amount',
'dayofweek',
'hourofday',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'traffic_last_5min'
]
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]
def features_and_labels(row_data):
label = row_data.pop(LABEL_COLUMN)
features = row_data
return features, label
def create_dataset(pattern, batch_size=1, mode=tf.estimator.ModeKeys.EVAL):
dataset = tf.data.experimental.make_csv_dataset(
pattern, batch_size, CSV_COLUMNS, DEFAULTS)
dataset = dataset.map(features_and_labels)
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(buffer_size=1000).repeat()
# take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(1)
return dataset
INPUT_COLS = [
'dayofweek',
'hourofday',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'traffic_last_5min'
]
# Create input layer of feature columns
feature_columns = {
colname: tf.feature_column.numeric_column(colname)
for colname in INPUT_COLS
}
# Build a keras DNN model using Sequential API
def build_model(dnn_hidden_units):
model = Sequential(DenseFeatures(feature_columns=feature_columns.values()))
for num_nodes in dnn_hidden_units:
model.add(Dense(units=num_nodes, activation="relu"))
model.add(Dense(units=1, activation="linear"))
# Create a custom evaluation metric
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
# Compile the keras model
model.compile(optimizer="adam", loss="mse", metrics=[rmse, "mse"])
return model
HIDDEN_UNITS = [32, 8]
model = build_model(dnn_hidden_units=HIDDEN_UNITS)
BATCH_SIZE = 1000
NUM_TRAIN_EXAMPLES = 10000 * 6 # training dataset will repeat, wrap around
NUM_EVALS = 60 # how many times to evaluate
NUM_EVAL_EXAMPLES = 10000 # enough to get a reasonable sample
trainds = create_dataset(
pattern='../data/taxi-traffic-train*',
batch_size=BATCH_SIZE,
mode=tf.estimator.ModeKeys.TRAIN)
evalds = create_dataset(
pattern='../data/taxi-traffic-valid*',
batch_size=BATCH_SIZE,
mode=tf.estimator.ModeKeys.EVAL).take(NUM_EVAL_EXAMPLES//1000)
%%time
steps_per_epoch = NUM_TRAIN_EXAMPLES // (BATCH_SIZE * NUM_EVALS)
LOGDIR = "./taxi_trained"
history = model.fit(x=trainds,
steps_per_epoch=steps_per_epoch,
epochs=NUM_EVALS,
validation_data=evalds,
callbacks=[TensorBoard(LOGDIR)])
RMSE_COLS = ['rmse', 'val_rmse']
pd.DataFrame(history.history)[RMSE_COLS].plot()
model.predict(x={"dayofweek": tf.convert_to_tensor([6]),
"hourofday": tf.convert_to_tensor([17]),
"pickup_longitude": tf.convert_to_tensor([-73.982683]),
"pickup_latitude": tf.convert_to_tensor([40.742104]),
"dropoff_longitude": tf.convert_to_tensor([-73.983766]),
"dropoff_latitude": tf.convert_to_tensor([40.755174]),
"traffic_last_5min": tf.convert_to_tensor([114])},
steps=1)
OUTPUT_DIR = "./export/savedmodel"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(OUTPUT_DIR,
datetime.now().strftime("%Y%m%d%H%M%S"))
tf.saved_model.save(model, EXPORT_PATH) # with default serving function
os.environ['EXPORT_PATH'] = EXPORT_PATH
%%bash
TIMESTAMP=$(date -u +%Y%m%d_%H%M%S)
MODEL_NAME=taxifare_$TIMESTAMP
ENDPOINT_NAME=taxifare_endpoint_$TIMESTAMP
IMAGE_URI="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-3:latest"
ARTIFACT_DIRECTORY=gs://${BUCKET}/${MODEL_NAME}/
echo $ARTIFACT_DIRECTORY
gsutil cp -r ${EXPORT_PATH}/* ${ARTIFACT_DIRECTORY}
# Model
gcloud ai models upload \
--region=$REGION \
--display-name=$MODEL_NAME \
--container-image-uri=$IMAGE_URI \
--artifact-uri=$ARTIFACT_DIRECTORY
MODEL_ID=$(gcloud ai models list \
--region=$REGION \
--filter=display_name="$MODEL_NAME" | cut -d" " -f1 | head -n2 | tail -n1)
echo "MODEL_NAME=${MODEL_NAME}"
echo "MODEL_ID=${MODEL_ID}"
# Endpoint
gcloud ai endpoints create \
--region=$REGION \
--display-name=$ENDPOINT_NAME
ENDPOINT_ID=$(gcloud ai endpoints list \
--region=$REGION \
--filter=display_name="$ENDPOINT_NAME" | cut -d" " -f1 | head -n2 | tail -n1)
echo "ENDPOINT_NAME=${ENDPOINT_NAME}"
echo "ENDPOINT_ID=${ENDPOINT_ID}"
# Deployment
DEPLOYED_MODEL_NAME=${MODEL_NAME}_deployment
MACHINE_TYPE=n1-standard-2
MIN_REPLICA_COUNT=1
MAX_REPLICA_COUNT=3
gcloud ai endpoints deploy-model $ENDPOINT_ID \
--region=$REGION \
--model=$MODEL_ID \
--display-name=$DEPLOYED_MODEL_NAME \
--machine-type=$MACHINE_TYPE \
--min-replica-count=$MIN_REPLICA_COUNT \
--max-replica-count=$MAX_REPLICA_COUNT \
--traffic-split=0=100
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If the above command resulted in an installation, please restart the notebook kernel and re-run the notebook.
Step2: Load raw data
Step3: Use tf.data to read the CSV files
Step4: Build a simple keras DNN model
Step5: Next, we can call the build_model to create the model. Here we'll have two hidden layers before our final output layer. And we'll train with the same parameters we used before.
Step6: Export and deploy model
Step7: Note that the last gcloud call below, which deploys the mode, can take a few minutes, and you might not see the earlier echo outputs while that job is still running. If you want to make sure that your notebook is not stalled and your model is actually getting deployed, view your models in the console at https
|
593 | <ASSISTANT_TASK:>
Python Code:
f = numpy.exp(1)
f_hat = 2.71
# Error
print "Absolute Error = ", numpy.abs(f - f_hat)
print "Relative Error = ", numpy.abs(f - f_hat) / numpy.abs(f)
# Precision
p = 3
n = numpy.floor(numpy.log10(f_hat)) + 1 - p
print "%s = %s" % (f_hat, numpy.round(10**(-n) * f_hat) * 10**n)
import sympy
x = sympy.symbols('x')
f = sympy.symbols('f', cls=sympy.Function)
f = sympy.exp(x)
f.series(x0=0, n=6)
x = numpy.linspace(-1, 1, 100)
T_N = 1.0 + x + x**2 / 2.0
R_N = numpy.exp(1) * x**3 / 6.0
plt.plot(x, T_N, 'r', x, numpy.exp(x), 'k', x, R_N, 'b')
plt.xlabel("x")
plt.ylabel("$f(x)$, $T_N(x)$, $R_N(x)$")
plt.legend(["$T_N(x)$", "$f(x)$", "$R_N(x)$"], loc=2)
plt.show()
x = numpy.linspace(0.8, 2, 100)
T_N = 1.0 - (x-1) + (x-1)**2
R_N = -(x-1.0)**3 / (1.1**4)
plt.plot(x, T_N, 'r', x, 1.0 / x, 'k', x, R_N, 'b')
plt.xlabel("x")
plt.ylabel("$f(x)$, $T_N(x)$, $R_N(x)$")
plt.legend(["$T_N(x)$", "$f(x)$", "$R_N(x)$"], loc=8)
plt.show()
def eval_poly(p, x):
Evaluates polynomial given coefficients p at x
Function to evaluate a polynomial in order N operations. The polynomial is defined as
P(x) = p[0] x**n + p[1] x**(n-1) + ... + p[n-1] x + p[n]
The value x can by a NumPy ndarray.
y = numpy.ones(x.shape) * p[0]
for coefficient in p[1:]:
y = y * x + coefficient
return y
p = [1, -3, 10, 4, 5, 5]
x = numpy.linspace(-10, 10, 100)
plt.plot(x, eval_poly(p, x))
plt.show()
d_1_values = [1, 2, 3, 4, 5, 6, 7, 8, 9]
d_2_values = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
E_values = [0, -1, -2]
fig = plt.figure(figsize=(10.0, 1.0))
axes = fig.add_subplot(1, 1, 1)
for E in E_values:
for d1 in d_1_values:
for d2 in d_2_values:
axes.plot( (d1 + d2 * 0.1) * 10**E, 0.0, 'r+', markersize=20)
axes.plot(-(d1 + d2 * 0.1) * 10**E, 0.0, 'r+', markersize=20)
axes.plot(0.0, 0.0, '+', markersize=20)
axes.plot([-10.0, 10.0], [0.0, 0.0], 'k')
axes.set_title("Distribution of Values")
axes.set_yticks([])
axes.set_xlabel("x")
axes.set_ylabel("")
axes.set_xlim([-0.1, 0.1])
plt.show()
d_1_values = [1]
d_2_values = [0, 1]
E_values = [1, 0, -1]
fig = plt.figure(figsize=(10.0, 1.0))
axes = fig.add_subplot(1, 1, 1)
for E in E_values:
for d1 in d_1_values:
for d2 in d_2_values:
axes.plot( (d1 + d2 * 0.1) * 2**E, 0.0, 'r+', markersize=20)
axes.plot(-(d1 + d2 * 0.1) * 2**E, 0.0, 'r+', markersize=20)
axes.plot(0.0, 0.0, 'r+', markersize=20)
axes.plot([-4.5, 4.5], [0.0, 0.0], 'k')
axes.set_title("Distribution of Values")
axes.set_yticks([])
axes.set_xlabel("x")
axes.set_ylabel("")
axes.set_xlim([-3, 3])
plt.show()
print numpy.finfo(float).eps
print numpy.finfo(float).max
print numpy.finfo(float).min
print numpy.finfo(float).nmant
print numpy.finfo(float).precision
print numpy.finfo(float).tiny
x = numpy.linspace(0.988, 1.012, 1000)
y = x**7 - 7.0 * x**6 + 21.0 * x**5 - 35.0 * x**4 + 35.0 * x**3 - 21.0 * x**2 + 7.0 * x - 1.0
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, y, 'r')
axes.set_xlabel("x")
axes.set_ylabel("y")
plt.show()
delta_x = numpy.linspace(1e-20, 5.0, 1000)
x = 1.0
f_hat = (numpy.exp(x + delta_x) - numpy.exp(x)) / (delta_x)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.loglog(delta_x, numpy.abs(f_hat - numpy.exp(1)) / numpy.exp(1))
axes.set_xlabel("$\Delta x$")
axes.set_ylabel("Relative Error")
plt.show()
import scipy.misc
def my_exp(x, N=10):
value = 0.0
for n in xrange(N + 1):
value += x**n / scipy.misc.factorial(n)
return value
x = numpy.linspace(-2, 2, 100)
for N in range(1, 50):
error = numpy.abs((numpy.exp(x) - my_exp(x, N=N)) / numpy.exp(x))
if numpy.all(error < 100.0 * numpy.finfo(float).eps):
break
print N
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, error)
axes.set_xlabel("x")
axes.set_ylabel("Relative Error")
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Truncation Error and Taylor's Theorem
Step2: Lets plot this numerically for a section of $x$.
Step3: Example 2
Step5: Symbols and Definitions
Step9: Truncation Error vs. Floating Point Error
Step10: What is the underflow and overflow limits?
Step11: Smallest number that can be represented is the underflow
Step12: Why should we care about this?
Step13: For more examples see the "100-digit Challenge" book.
Step14: Example 2
|
594 | <ASSISTANT_TASK:>
Python Code:
import modeled.netconf
modeled.netconf.__requires__
import modeled
from modeled import member
class Input(modeled.object):
The input part of a Turing Machine program rule.
state = member[int]()
symbol = member[str]()
class Output(modeled.object):
The output part of a Turing Machine program rule.
state = member[int]()
symbol = member[str]()
head_move = member[str]['L', 'R']()
class Rule(modeled.object):
A Turing Machine program rule.
input = member[Input]()
output = member[Output]()
def __init__(self, input, output):
Expects both `input` and `output` as mappings.
self.input = Input(
# modeled.object.__init__ supports **kwargs
# for initializing modeled.member values
**dict(input))
self.output = Output(**dict(output))
class TuringMachine(modeled.object):
state = member[int]()
head_position = member[int]()
# the list of symbols on the input/output tape
tape = member.list[str](indexname='cell', itemname='symbol')
# the machine program as named rules
program = member.dict[str, Rule](keyname='name')
def __init__(self, program):
Create a Turing Machine with the given `program`.
program = dict(program)
for name, (input, output) in program.items():
self.program[name] = Rule(input, output)
def run(self):
Start the Turing Machine.
- Runs until no matching input part for current state and tape symbol
can be found in the program rules.
self.log = " %s %d\n" % (''.join(self.tape), self.state)
while True:
pos = self.head_position
if 0 <= pos < len(self.tape):
symbol = self.tape[pos]
else:
symbol = None
for name, rule in self.program.items():
if (self.state, symbol) == (rule.input.state, rule.input.symbol):
self.log += "%s^%s --> %s\n" % (
' ' * (pos + 1),
' ' * (len(self.tape) - pos),
name)
if rule.output.state is not None:
self.state = rule.output.state
if rule.output.symbol is not None:
self.tape[pos] = rule.output.symbol
self.head_position += {'L': -1, 'R': 1}[rule.output.head_move]
self.log += " %s %d\n" % (''.join(self.tape), self.state)
break
else:
break
%%file turing-machine-program.yaml
left summand:
- {state: 0, symbol: 1}
- {state: null, symbol: null, head_move: R}
separator:
- {state: 0, symbol: 0}
- {state: 1, symbol: 1, head_move: R}
right summand:
- {state: 1, symbol: 1}
- {state: null, symbol: null, head_move: R}
right end:
- {state: 1, symbol: null}
- {state: 2, symbol: null, head_move: L}
write separator:
- {state: 2, symbol: 1}
- {state: 3, symbol: 0, head_move: L}
go home:
- {state: 3, symbol: 1}
- {state: null, symbol: null, head_move: L}
final step:
- {state: 3, symbol: null}
- {state: 4, symbol: null, head_move: R}
import yaml
with open('turing-machine-program.yaml') as f:
TM_PROGRAM = yaml.load(f)
tm = TuringMachine(TM_PROGRAM)
tm.state = 0
tm.head_position = 0
tm.tape = '1011'
tm.tape
tm.run()
print(tm.log)
from modeled.netconf import YANG
YANG[TuringMachine].mro()
YANG[TuringMachine].mclass
YANG[TuringMachine] is YANG[TuringMachine]
print(YANG[TuringMachine].to_yang(
prefix='tm', namespace='http://modeled.netconf/turing-machine'))
print(YANG[TuringMachine].to_yin(
prefix='tm', namespace='http://modeled.netconf/turing-machine'))
tm = YANG[TuringMachine](TM_PROGRAM)
tm.state = 0
tm.head_position = 0
tm.tape = '1011'
tm.run()
tm.state, tm.tape
from modeled.netconf import rpc
class TM(YANG[TuringMachine]):
@rpc(argtypes={'tape_content': str})
# in Python 3 you can also use function annotations
# and write (self, tape_content: str) below
# instead of argtypes= above
def initialize(self, tape_content):
Initialize the Turing Machine.
self.state = 0
self.head_position = 0
self.tape = tape_content
@rpc(argtypes={})
def run(self):
Start the Turing Machine operation.
TuringMachine.run(self)
TM_YANG = TM.to_yang(
prefix='tm', namespace='http://modeled.netconf/turing-machine')
print(TM_YANG)
with open('turing-machine.yang', 'w') as f:
f.write(TM_YANG)
!pyang -f tree turing-machine.yang
tm = TM(TM_PROGRAM)
PORT = 12345
USERNAME = 'user'
PASSWORD = 'password'
server = tm.serve_netconf_ssh(
port=PORT, host_key='key', username=USERNAME, password=PASSWORD)
from netconf.client import NetconfSSHSession
client = NetconfSSHSession(
'localhost', port=PORT, username=USERNAME, password=PASSWORD)
reply = client.send_rpc(
'<initialize><tape-content>110111</tape-content></initialize>')
tm.tape
reply = client.send_rpc('<run/>')
tm.state, tm.tape
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step7: To install in development mode
Step8: To check if the Turing Machine works, it needs an actual program.
Step9: Instantiate the Turing Machine with the loaded program
Step10: And set the initial state for computing unary 1 + 2
Step11: The tape string gets automatically converted to a list,
Step12: Ready for turning on the Turing Machine
Step13: Final state is reached. Result is unary 3. Seems to work!
Step14: And plug it to the TuringMachine.
Step15: It also has a class attribute referencing the original modeled class
Step16: BTW
Step17: But let's take look at the really useful features now.
Step18: Or XMLified YIN
Step19: Since the modeled YANG module
Step20: Adding RPC methods
Step23: The following RPC definitions are again designed
Step24: Now the .to_yang() conversion also includes the rpc definitions,
Step25: Now is a good time to verify if that's really correct YANG.
Step26: And feed it to the pyang command.
Step27: No errors. Great!
Step28: Currently only serving NETCONF over
Step29: And it needs an SSH key.
Step30: And that's it! The created server is an instance of Python
Step31: Now the Turing Machine can be remotely initialized
Step32: The tape will be set accordingly
Step33: Now run the Turing Machine via RPC
|
595 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pycuda.gpuarray as gpuarray
from pycuda.curandom import rand as curand
from pycuda.compiler import SourceModule
import pycuda.driver as cuda
try:
ctx.pop()
ctx.detach()
except:
print ("No CTX!")
cuda.init()
device = cuda.Device(0)
ctx = device.make_context()
print (device.name(), device.compute_capability(),device.total_memory()/1024.**3,"GB")
print ("a tak wogóle to mamy tu:",cuda.Device.count(), " urządzenia")
import numpy as np
Nx = 1024
Na = 1024
a = np.linspace(3.255,4,Na).astype(np.float32)
a = np.repeat(a,Nx)
a_gpu = gpuarray.to_gpu(a)
x_gpu = curand((Na*Nx,))
from pycuda.elementwise import ElementwiseKernel
iterate = ElementwiseKernel(
"float *a, float *x",
"x[i] = a[i]*x[i]*(1.0f-x[i])",
"iterate")
%%time
Niter = 1000
for i in range(Niter):
iterate(a_gpu,x_gpu)
ctx.synchronize()
a,x = a_gpu.get(),x_gpu.get()
plt.figure(num=1, figsize=(10, 6))
every = 10
plt.plot(a[::every],x[::every],'.',markersize=1)
plt.plot([3.83,3.83],[0,1])
import pycuda.gpuarray as gpuarray
from pycuda.curandom import rand as curand
from pycuda.compiler import SourceModule
import pycuda.driver as cuda
try:
ctx.pop()
ctx.detach()
except:
print( "No CTX!")
cuda.init()
device = cuda.Device(0)
ctx = device.make_context()
mod = SourceModule(
__global__ void logistic_iterations(float *a,float *x,int Niter)
{
int idx = threadIdx.x + blockDim.x*blockIdx.x;
float a_ = a[idx];
float x_ = x[idx];
int i;
for (i=0;i<Niter;i++){
x_ = a_*x_*(1-x_);
}
x[idx] = x_;
}
)
logistic_iterations = mod.get_function("logistic_iterations")
block_size=128
Nx = 10240
Na = 1024*2
blocks = Nx*Na//block_size
a = np.linspace(3.255,4,Na).astype(np.float32)
a = np.repeat(a,Nx)
a_gpu = gpuarray.to_gpu(a)
x_gpu = curand((Na*Nx,))
%%time
logistic_iterations(a_gpu,x_gpu, np.int32(10000),block=(block_size,1,1), grid=(blocks,1,1))
ctx.synchronize()
a,x = a_gpu.get(),x_gpu.get()
plt.figure(num=1, figsize=(9, 8))
every = 100
plt.plot(a[::every],x[::every],'.',markersize=1,alpha=1)
plt.plot([3.83,3.83],[0,1])
H, xedges, yedges = np.histogram2d(a,x,bins=(1024,1024))
plt.figure(num=1, figsize=(10,10))
plt.imshow(1-np.log(H.T+5e-1),origin='lower',cmap='gray')
%load_ext Cython
%%cython
def logistic_cpu(double a = 3.56994):
cdef double x
cdef int i
x = 0.1
for i in range(1000*1024*1024):
x = a*x*(1.0-x)
return x
%%time
logistic_cpu(1.235)
print("OK")
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
a1,a2 = 3,3.56994567
Nx = 1024
Na = 1024
a = np.linspace(a1,a2,Na).astype(np.float32)
a = np.repeat(a,Nx)
a_gpu = gpuarray.to_gpu(a)
x_gpu = curand((Na*Nx,))
x = x_gpu.get()
fig = plt.figure()
every = 1
Niter = 10000
for i in range(Niter):
if i%every==0:
plt.cla()
plt.xlim(a1,a2)
plt.ylim(0,1)
fig.suptitle("iteracja: %05d"%i)
plt.plot(a,x,'.',markersize=1)
plt.savefig("/tmp/%05d.png"%i)
if i>10:
every=2
if i>30:
every=10
if i>100:
every=50
if i>1000:
every=500
iterate(a_gpu,x_gpu)
ctx.synchronize()
a,x = a_gpu.get(),x_gpu.get()
%%sh
cd /tmp
time convert -delay 20 -loop 0 *.png anim_double.gif && rm *.png
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
block_size=128
Nx = 1024*5
Na = 1024*3
blocks = Nx*Na//block_size
nframes = 22
for i,(a1,a2) in enumerate(zip(np.linspace(3,3.77,nframes),np.linspace(4,3.83,nframes))):
a = np.linspace(a1,a2,Na).astype(np.float32)
a = np.repeat(a,Nx)
a_gpu = gpuarray.to_gpu(a)
x_gpu = curand((Na*Nx,))
x = x_gpu.get()
logistic_iterations(a_gpu,x_gpu, np.int32(10000),block=(block_size,1,1), grid=(blocks,1,1))
ctx.synchronize()
a,x = a_gpu.get(),x_gpu.get()
H, xedges, yedges = np.histogram2d(a,x,bins=(np.linspace(a1,a2,1024),np.linspace(0,1,1024)))
fig, ax = plt.subplots(figsize=[10,7])
ax.imshow(1-np.log(H.T+5e-1),origin='lower',cmap='gray',extent=[a1,a2,0,1])
#plt.xlim(a1,a2)
#plt.ylim(0,1)
ax.set_aspect(7/10*(a2-a1))
#fig.set_size_inches(8, 5)
fig.savefig("/tmp/zoom%05d.png"%i)
plt.close(fig)
%%sh
cd /tmp
time convert -delay 30 -loop 0 *.png anim_zoom.gif && rm *.png
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
block_size=128
Nx = 1024*5
Na = 1024*3
blocks = Nx*Na//block_size
a1,a2 = 1,4
x1,x2 = 0., 1
a = np.linspace(a1,a2,Na).astype(np.float32)
a = np.repeat(a,Nx)
a_gpu = gpuarray.to_gpu(a)
x_gpu = curand((Na*Nx,))
x = x_gpu.get()
logistic_iterations(a_gpu,x_gpu, np.int32(10000),block=(block_size,1,1), grid=(blocks,1,1))
ctx.synchronize()
a,x = a_gpu.get(),x_gpu.get()
H, xedges, yedges = np.histogram2d(a,x,bins=(np.linspace(a1,a2,1024),np.linspace(x1,x2,1024)))
fig, ax = plt.subplots(figsize=[10,7])
ax.imshow(1-np.log(H.T+5e-1),origin='lower',cmap='gray',extent=[a1,a2,x1,x2])
#plt.xlim(a1,a2)
#plt.ylim(0,1)
ax.set_aspect(7/10*(a2-a1)/(x2-x1))
#fig.set_size_inches(8, 5)
fig.savefig("/tmp/zoom.png")
plt.close(fig)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Jądro Elementwise
Step3: Algorytm z pętlą wewnątrz jądra CUDA
Step4: Porównanie z wersją CPU
Step5: Wizualizacja wyników
|
596 | <ASSISTANT_TASK:>
Python Code:
mean = np.array([0.05/252 + 0.02/252, 0.03/252 + 0.02/252])
volatility = np.array([0.2/np.sqrt(252), 0.05/np.sqrt(252)])
variance = np.power(volatility,2)
correlation = np.array(
[
[1, 0.25],
[0.25,1]
]
)
covariance = np.zeros((2,2))
for i in range(len(variance)):
for j in range(len(variance)):
covariance[i,j] = correlation[i,j]*volatility[i]*volatility[j]
covariance
names = ['foo','bar','rf']
dates = pd.date_range(start='2015-01-01',end='2018-12-31', freq=pd.tseries.offsets.BDay())
n = len(dates)
rdf = pd.DataFrame(
np.zeros((n, len(names))),
index = dates,
columns = names
)
np.random.seed(1)
rdf.loc[:,['foo','bar']] = np.random.multivariate_normal(mean,covariance,size=n)
rdf['rf'] = 0.02/252
pdf = 100*np.cumprod(1+rdf)
pdf.plot()
runAfterDaysAlgo = bt.algos.RunAfterDays(
20*6 + 1
)
selectTheseAlgo = bt.algos.SelectThese(['foo','bar'])
# algo to set the weights so each asset contributes the same amount of risk
# with data over the last 6 months excluding yesterday
weighERCAlgo = bt.algos.WeighERC(
lookback=pd.DateOffset(days=20*6),
covar_method='standard',
risk_parity_method='slsqp',
maximum_iterations=1000,
tolerance=1e-9,
lag=pd.DateOffset(days=1)
)
rebalAlgo = bt.algos.Rebalance()
strat = bt.Strategy(
'ERC',
[
runAfterDaysAlgo,
selectTheseAlgo,
weighERCAlgo,
rebalAlgo
]
)
backtest = bt.Backtest(
strat,
pdf,
integer_positions=False
)
res_target = bt.run(backtest)
res_target.get_security_weights().plot()
res_target.prices.plot()
weights_target = res_target.get_security_weights().copy()
rolling_cov_target = pdf.loc[:,weights_target.columns].pct_change().rolling(window=252).cov()*252
trc_target = pd.DataFrame(
np.nan,
index = weights_target.index,
columns = weights_target.columns
)
for dt in pdf.index:
trc_target.loc[dt,:] = weights_target.loc[dt,:].values*(rolling_cov_target.loc[dt,:].values@weights_target.loc[dt,:].values)/np.sqrt(weights_target.loc[dt,:].values@rolling_cov_target.loc[dt,:].values@weights_target.loc[dt,:].values)
fig, ax = plt.subplots(nrows=1,ncols=1)
trc_target.plot(ax=ax)
ax.set_title('Total Risk Contribution')
ax.plot()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Build and run ERC Strategy
|
597 | <ASSISTANT_TASK:>
Python Code:
baselines= Unique cookies to view page per day: 40000
Unique cookies to click "Start free trial" per day: 3200
Enrollments per day: 660
Click-through-probability on "Start free trial": 0.08
Probability of enrolling, given click: 0.20625
Probability of payment, given enroll: 0.53
Probability of payment, given click: 0.1093125
lines = baselines.split('\n')
d_baseline = dict([(e.split(':\t')[0],float(e.split(':\t')[1])) for e in lines])
n = 5000
n_click = n * d_baseline['Click-through-probability on "Start free trial"']
n_click
p = d_baseline['Probability of enrolling, given click']
round(np.sqrt(p * (1-p) / n_click),4)
p = d_baseline['Probability of payment, given click']
round(np.sqrt(p * (1-p) / n_click),4)
(27411 * 2) / d_baseline['Click-through-probability on "Start free trial"']
control = pd.read_csv('control_data.csv')
experiment = pd.read_csv('experiment.csv')
control.head()
experiment.head()
control_views = control.Pageviews.sum()
control_clicks = control.Clicks.sum()
experiment_views = experiment.Pageviews.sum()
experiment_clicks = experiment.Clicks.sum()
def sanity_check_CI(control,experiment,expected):
SE = np.sqrt((expected*(1-expected))/(control + experiment))
ME = 1.96 * SE
return (expected-ME,expected+ME)
sanity_check_CI(control_views,experiment_views,0.5)
float(control_views)/(control_views+experiment_views)
sanity_check_CI(control_clicks,experiment_clicks,0.5)
float(control_clicks)/(control_clicks+experiment_clicks)
ctp_control = float(control_clicks)/control_views
ctp_experiment = float(experiment_clicks)/experiment_views
# %%R
c = 28378
n = 345543
CL = 0.95
pe = c/n
SE = sqrt(pe*(1-pe)/n)
z_star = round(qnorm((1-CL)/2,lower.tail=F),digits=2)
ME = z_star * SE
c(pe-ME, pe+ME)
ctp_experiment
get_gross = lambda group: float(group.dropna().Enrollments.sum())/ group.Clicks.sum()
get_net = lambda group: float(group.dropna().Payments.sum())/ group.Clicks.sum()
print('N_cont = %i'%control.dropna().Clicks.sum())
print('X_cont = %i'%control.dropna().Enrollments.sum())
print('N_exp = %i'%experiment.dropna().Clicks.sum())
print('X_exp = %i'%experiment.dropna().Enrollments.sum())
X_exp/N_exp
X_cont/N_cont
#%%R
N_cont = 17293
X_cont = 3785
N_exp = 17260
X_exp = 3423
observed_diff = X_exp/N_exp - X_cont/N_cont
# print(observed_diff)
p_pool = (X_cont+X_exp)/(N_cont+N_exp)
SE = sqrt( (p_pool*(1-p_pool)) * ((1/N_cont) + (1/N_exp)))
ME = 1.96 * SE
# print(p_pool)
c(observed_diff-ME, observed_diff+ME)
observed_diff
print('N_cont = %i'%control.dropna().Clicks.sum())
print('X_cont = %i'%control.dropna().Payments.sum())
print('N_exp = %i'%experiment.dropna().Clicks.sum())
print('X_exp = %i'%experiment.dropna().Payments.sum())
X_exp/N_exp
X_cont/N_cont
#%%R
N_cont = 17293
X_cont = 2033
N_exp = 17260
X_exp = 1945
observed_diff = X_exp/N_exp - X_cont/N_cont
# print(observed_diff)
p_pool = (X_cont+X_exp)/(N_cont+N_exp)
SE = sqrt( (p_pool*(1-p_pool)) * ((1/N_cont) + (1/N_exp)))
ME = 1.96 * SE
# print(p_pool)
c(observed_diff-ME, observed_diff+ME)
observed_diff
compare_prob = lambda col: ((control.dropna()[col] / control.dropna().Clicks) <
(experiment.dropna()[col]/experiment.dropna().Clicks))
compare_prob('Enrollments').value_counts()
compare_prob('Payments').value_counts()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: Since we have 5000 sample cookies instead of the original 40000, we can adjust accordingly using calculate probability. For these two evaluation metric, we need number of users who click "Start Now" button, and calculated as
Step3: Next, standard deviation for Gross conversion is
Step4: and for Net Conversion,
Step5: Gross Conversion and Net Conversion, their empirical variance should approximate analytical variance, because the unit of analysis and unit of diversion is the same, cookie-ids/user-ids.
Step6: Duration vs. Exposure
Step7: Next, we count the total views and clicks for both control and experiment groups.
Step8: For count like number of cookies and number of clicks in "Start free trial" button, we can do confidence interval around the fraction we expect in control group, and actual fraction as the observed outcome. Since we expect control and experiment to have equal proportion, we set the the expected proportion to be 0.5. Both invariant metrics, the confidence interval for sanity checks use the function below.
Step9: Now for sanity checks confidence interval of number of cookies who views the page,
Step10: The actual proportion is
Step11: Since we know that 0.5006 is within the interval, then experiment pass sanity checks for number of cookies.
Step12: And the actual proportion,
Step13: Again 0.5006 is within the interval, so our experiment also pass the sanity check.
Step14: And as you can see, click-through-probability of the experiment is still within the confidence interval of click-through-probability control groups. Since we have passed all of the sanity checks, we can continue to analyze the experiment.
Step15: Gross Conversion
Step16: The observed difference is outside the confidence interval. And the observed difference also above 0.01 dmin, minimum detectable effect. We should definitely launch.
Step17: The observed difference is within the confidence interval so it's not statiscally significant and also not practically significant. We may fail or continue with our results.
Step18: Gross Conversion
Step19: Net Conversion
|
598 | <ASSISTANT_TASK:>
Python Code:
offset = [-190., -47.]*u.arcsec
for ind, orbit in enumerate(orbits):
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
print("Orbit: {}".format(ind))
print("Orbit start: {} Orbit end: {}".format(orbit[0].isoformat(), orbit[1].isoformat()))
print('Aim time: {} RA (deg): {} Dec (deg): {}'.format(midTime.isoformat(), sky_pos[0], sky_pos[1]))
print("")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loop over each orbit and correct the pointing for the same heliocentric pointing position.
|
599 | <ASSISTANT_TASK:>
Python Code:
import phoebe
from phoebe import u
b = phoebe.default_binary()
print(b.filter(qualifier='teff'))
lhs = b.get_parameter(qualifier='teff', component='secondary')
rhs = 0.5 * b.get_parameter(qualifier='teff', component='primary')
rhs
b.add_constraint(lhs, rhs)
print(b.filter(qualifier='teff'))
b.set_value(qualifier='teff', component='primary', value=7000)
print(b.filter(qualifier='teff'))
b = phoebe.default_binary()
b.filter(qualifier='teffratio')
teffratio_def = phoebe.parameters.FloatParameter(qualifier='teffratio',
default_unit=u.dimensionless_unscaled,
value=1,
description='effective temperature ratio')
teffratio_param, created = b.get_or_create('teffratio', teffratio_def, context='component', component='binary')
print(b.filter(qualifier='teffratio'))
print(b.get_parameter(qualifier='teffratio').tags)
lhs = b.get_parameter(qualifier='teff', component='secondary')
rhs = teffratio_param * b.get_parameter(qualifier='teff', component='primary')
rhs
b.add_constraint(lhs, rhs)
print(b.filter(qualifier=['teff', 'teffratio']))
b.set_value('teffratio', 0.5)
print(b.filter(qualifier=['teff', 'teffratio']))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this case, the two positional arguments to b.add_constraint must be the left-hand side of the expression (which will become the constrained parameter) and the right-hand side of the expression (either another parameter or a ConstraintParameter).
Step2: Now, as with any other constraint, if we change the value of a parameter in the constraint, the constrained value will automatically adjust.
Step3: Complex Case with New Parameter
Step4: First we need to create all the parameters that we need that do not already exist, and attach them to the bundle with appropriate tags. Ultimately the choice of tags is inconsequential, but here it makes some sense to apply our new teffratio to the parent orbit that contains both stars.
Step5: We'll attach the new parameter by calling b.get_or_create. The first argument here is the qualifier, the second is the parameter object itself, and additional keyword arguments are tags to be applied. If a parameter already exists that matches the filter (including the qualifier) then that parameter will be returned (and the new parameter will be ignored), otherwise the new parameter will be attached to the bundle and returned.
Step6: Our new parameter is available through filtering as is any other parameter.
Step7: We can now define our constraint as before, but replace the hardcoded 0.5 with the teffratio parameter.
|