Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
---|---|---|
14,300 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[300]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
len(mnist.train.images)
learning_rate = 0.001
image_size = 28
inputs_ = tf.placeholder(shape= [None,image_size,image_size,1], dtype=tf.float32)
targets_ = tf.placeholder(shape= [None,image_size,image_size,1], dtype=tf.float32)
### Encoder
filter1 = tf.Variable(tf.random_normal([7,7,1,16],stddev=0.1))
conv1 = tf.nn.conv2d(inputs_, filter1, strides=[1, 1, 1, 1], padding='SAME')
# Now 28x28x16
maxpool1 = tf.nn.max_pool(conv1,ksize=[1,2,2,1],strides=[1, 2, 2, 1],padding='SAME')
# Now 14x14x16
filter2 = tf.Variable(tf.random_normal([2,2,16,8],stddev=0.1))
conv2 = tf.nn.conv2d(maxpool1, filter2, strides=[1, 1, 1, 1], padding='SAME')
# Now 14x14x8
maxpool2 = tf.nn.max_pool(conv2,ksize=[1,2,2,1],strides=[1, 2, 2, 1],padding='SAME')
# Now 7x7x8
filter3 = tf.Variable(tf.random_normal([2,2,8,8],stddev=0.1))
conv3 = tf.nn.conv2d(maxpool2, filter3, strides=[1, 1, 1, 1], padding='SAME')
# Now 7x7x8
encoded = tf.nn.max_pool(conv3,ksize=[1,2,2,1],strides=[1, 2, 2, 1],padding='SAME')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded,size=[7,7])
# Now 7x7x8
filter4 = tf.Variable(tf.random_normal([2,2,8,8],stddev=0.1))
conv4 = tf.nn.conv2d(upsample1, filter4, strides=[1, 1, 1, 1], padding='SAME')
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(conv4,size=[14,14])
# Now 14x14x8
conv5 = tf.nn.conv2d(upsample2, filter4, strides=[1, 1, 1, 1], padding='SAME')
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(conv5,size=[28,28])
# Now 28x28x8
filter5 = tf.Variable(tf.random_normal([2,2,8,16],stddev=0.1))
conv6 = tf.nn.conv2d(upsample3, filter5, strides=[1, 1, 1, 1], padding='SAME')
# Now 28x28x16
filter_logits = tf.Variable(tf.random_normal([2,2,16,1],stddev=0.1))
logits = tf.nn.conv2d(conv6, filter_logits, strides=[1, 1, 1, 1], padding='SAME')
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits)
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits = logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 5
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 =
# Now 28x28x32
maxpool1 =
# Now 14x14x32
conv2 =
# Now 14x14x32
maxpool2 =
# Now 7x7x32
conv3 =
# Now 7x7x16
encoded =
# Now 4x4x16
### Decoder
upsample1 =
# Now 7x7x16
conv4 =
# Now 7x7x16
upsample2 =
# Now 14x14x16
conv5 =
# Now 14x14x32
upsample3 =
# Now 28x28x32
conv6 =
# Now 28x28x32
logits =
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded =
# Pass logits through sigmoid and calculate the cross-entropy loss
loss =
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 100
batch_size = 200
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Network Architecture
Step2: Training
Step3: Denoising
Step4: Checking out the performance
|
14,301 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'niwa', 'ukesm1-0-ll', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
14,302 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
X = [['asdf', '1'], ['asdf', '0']]
clf = DecisionTreeClassifier()
from sklearn.feature_extraction import DictVectorizer
X = [dict(enumerate(x)) for x in X]
vect = DictVectorizer(sparse=False)
new_X = vect.fit_transform(X)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
14,303 | <ASSISTANT_TASK:>
Python Code:
%load_ext rpy2.ipython
%%R
workDir = '/home/nick/notebook/SIPSim/dev/fullCyc/'
physeqDir = '/home/nick/notebook/SIPSim/dev/fullCyc_trim/'
physeqBulkCore = 'bulk-core_trm'
physeqSIP = 'SIP-core_unk_trm'
ampFragFile = '/home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_kde.pkl'
import os
%%R
library(ggplot2)
library(dplyr)
library(tidyr)
library(phyloseq)
library(fitdistrplus)
library(sads)
%%R
dir.create(workDir, showWarnings=FALSE)
%%R
# bulk core samples
F = file.path(physeqDir, physeqBulkCore)
physeq.bulk = readRDS(F)
#physeq.bulk.m = physeq.bulk %>% sample_data
physeq.bulk %>% names
%%R
# SIP core samples
F = file.path(physeqDir, physeqSIP)
physeq.SIP = readRDS(F)
#physeq.SIP.m = physeq.SIP %>% sample_data
physeq.SIP %>% names
%%R
physeq2otu.long = function(physeq){
df.OTU = physeq %>%
transform_sample_counts(function(x) x/sum(x)) %>%
otu_table %>%
as.matrix %>%
as.data.frame
df.OTU$OTU = rownames(df.OTU)
df.OTU = df.OTU %>%
gather('sample', 'abundance', 1:(ncol(df.OTU)-1))
return(df.OTU)
}
df.OTU.l = lapply(physeq.bulk, physeq2otu.long)
df.OTU.l %>% names
#df.OTU = do.call(rbind, lapply(physeq.bulk, physeq2otu.long))
#df.OTU$Day = gsub('.+\\.D([0-9]+)\\.R.+', '\\1', df.OTU$sample)
#df.OTU %>% head(n=3)
%%R -w 450 -h 400
lapply(df.OTU.l, function(x) descdist(x$abundance, boot=1000))
%%R
fitdists = function(x){
fit.l = list()
#fit.l[['norm']] = fitdist(x$abundance, 'norm')
fit.l[['exp']] = fitdist(x$abundance, 'exp')
fit.l[['logn']] = fitdist(x$abundance, 'lnorm')
fit.l[['gamma']] = fitdist(x$abundance, 'gamma')
fit.l[['beta']] = fitdist(x$abundance, 'beta')
# plotting
plot.legend = c('exponential', 'lognormal', 'gamma', 'beta')
par(mfrow = c(2,1))
denscomp(fit.l, legendtext=plot.legend)
qqcomp(fit.l, legendtext=plot.legend)
# fit summary
gofstat(fit.l, fitnames=plot.legend) %>% print
return(fit.l)
}
fits.l = lapply(df.OTU.l, fitdists)
fits.l %>% names
%%R
# getting summaries for lognormal fits
get.summary = function(x, id='logn'){
summary(x[[id]])
}
fits.s = lapply(fits.l, get.summary)
fits.s %>% names
%%R
# listing estimates for fits
df.fits = do.call(rbind, lapply(fits.s, function(x) x$estimate)) %>% as.data.frame
df.fits$Sample = rownames(df.fits)
df.fits$Day = gsub('.+D([0-9]+)\\.R.+', '\\1', df.fits$Sample) %>% as.numeric
df.fits
%%R -w 650 -h 300
ggplot(df.fits, aes(Day, meanlog,
ymin=meanlog-sdlog,
ymax=meanlog+sdlog)) +
geom_pointrange() +
geom_line() +
theme_bw() +
theme(
text = element_text(size=16)
)
%%R
# mean of estimaates
apply(df.fits, 2, mean)
%%R -w 800
df.OTU = do.call(rbind, df.OTU.l) %>%
mutate(abundance = abundance * 100) %>%
group_by(sample) %>%
mutate(rank = row_number(desc(abundance))) %>%
ungroup() %>%
filter(rank < 10)
ggplot(df.OTU, aes(rank, abundance, color=sample, group=sample)) +
geom_point() +
geom_line() +
labs(y = '% rel abund')
%%R -w 800 -h 300
df.OTU = do.call(rbind, df.OTU.l) %>%
mutate(abundance = abundance * 100) %>%
group_by(sample) %>%
mutate(rank = row_number(desc(abundance))) %>%
group_by(rank) %>%
summarize(mean_abundance = mean(abundance)) %>%
ungroup() %>%
mutate(library = 1,
mean_abundance = mean_abundance / sum(mean_abundance) * 100) %>%
rename('rel_abund_perc' = mean_abundance) %>%
dplyr::select(library, rel_abund_perc, rank) %>%
as.data.frame
df.OTU %>% nrow %>% print
ggplot(df.OTU, aes(rank, rel_abund_perc)) +
geom_point() +
geom_line() +
labs(y = 'mean % rel abund')
ret = !SIPSim KDE_info -t /home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_kde.pkl
ret = ret[1:]
ret[:5]
%%R
F = '/home/nick/notebook/SIPSim/dev/fullCyc_trim//ampFrags_kde_amplified.txt'
ret = read.delim(F, sep='\t')
ret = ret$genomeID
ret %>% length %>% print
ret %>% head
%%R
ret %>% length %>% print
df.OTU %>% nrow
%%R -i ret
# randomize
ret = ret %>% sample %>% sample %>% sample
# adding to table
df.OTU$taxon_name = ret[1:nrow(df.OTU)]
df.OTU = df.OTU %>%
dplyr::select(library, taxon_name, rel_abund_perc, rank)
df.OTU %>% head
%%R
#-- debug -- #
df.gc = read.delim('~/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_parsed_kde_info.txt',
sep='\t', row.names=)
top.taxa = df.gc %>%
filter(KDE_ID == 1, median > 1.709, median < 1.711) %>%
dplyr::select(taxon_ID) %>%
mutate(taxon_ID = taxon_ID %>% sample) %>%
head
top.taxa = top.taxa$taxon_ID %>% as.vector
top.taxa
%%R
#-- debug -- #
p1 = df.OTU %>%
filter(taxon_name %in% top.taxa)
p2 = df.OTU %>%
head(n=length(top.taxa))
p3 = anti_join(df.OTU, rbind(p1, p2), c('taxon_name' = 'taxon_name'))
df.OTU %>% nrow %>% print
p1 %>% nrow %>% print
p2 %>% nrow %>% print
p3 %>% nrow %>% print
p1 = p2$taxon_name
p2$taxon_name = top.taxa
df.OTU = rbind(p2, p1, p3)
df.OTU %>% nrow %>% print
df.OTU %>% head
%%R
F = file.path(workDir, 'fullCyc_12C-Con_trm_comm.txt')
write.table(df.OTU, F, sep='\t', quote=FALSE, row.names=FALSE)
cat('File written:', F, '\n')
ampFragFile
!ls -thlc
!tail -n +2 /home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm.txt | \
cut -f 2 > /home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm_taxa.txt
outFile = os.path.splitext(ampFragFile)[0] + '_parsed.pkl'
!SIPSim KDE_parse \
$ampFragFile \
/home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm_taxa.txt \
> $outFile
print 'File written {}'.format(outFile)
!SIPSim KDE_info -n $outFile
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Init
Step2: Loading phyloseq list datasets
Step3: Infer abundance distribution of each bulk soil community
Step4: Relative abundance of most abundant taxa
Step5: Making a community file for the simulations
Step6: Adding reference genome taxon names
Step7: Writing file
Step8: parsing amp-Frag file to match comm file
|
14,304 | <ASSISTANT_TASK:>
Python Code:
from ceo import data_cleaning
from ceo import missing_data
from ceo import svr_prediction
from ceo import ridge_prediction
data_cleaning.clean_all_data()
missing_data.predict_all()
ridge_prediction.ridge_predict_all()
svr_prediction.SVR_predict_all()
%%HTML
<div class='tableauPlaceholder' id='viz1489609724011' style='position: relative'><noscript><a href='#'><img alt='Clean Energy Production in the contiguous United States(in million kWh) ' src='https://public.tableau.com/static/images/PB/PB87S38NW/1_rss.png' style='border: none' /></a></noscript><object class='tableauViz' style='display:none;'><param name='host_url' value='https%3A%2F%2Fpublic.tableau.com%2F' /> <param name='path' value='shared/PB87S38NW' /> <param name='toolbar' value='yes' /><param name='static_image' value='https://public.tableau.com/static/images/PB/PB87S38NW/1.png' /> <param name='animate_transition' value='yes' /><param name='display_static_image' value='yes' /><param name='display_spinner' value='yes' /><param name='display_overlay' value='yes' /><param name='display_count' value='yes' /></object></div> <script type='text/javascript'> var divElement = document.getElementById('viz1489609724011'); var vizElement = divElement.getElementsByTagName('object')[0]; vizElement.style.width='1004px';vizElement.style.height='869px'; var scriptElement = document.createElement('script'); scriptElement.src = 'https://public.tableau.com/javascripts/api/viz_v1.js'; vizElement.parentNode.insertBefore(scriptElement, vizElement); </script>
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: datacleaning
Step2: missing_data
Step3: ridge_prediction
Step4: svr_prediction
Step5: plots
|
14,305 | <ASSISTANT_TASK:>
Python Code:
x = [10, 20, 30, 40, 50]
for item in x:
print "Item is ", item
#IPython is what you are using now to run the notebook
import IPython
print ("IPython version: %6.6s (need at least 1.0)" % IPython.__version__)
# Numpy is a library for working with Arrays
import numpy as np
print ("Numpy version: %6.6s (need at least 1.7.1)" % np.__version__)
# SciPy implements many different numerical algorithms
import scipy as sp
print ("SciPy version: %6.6s (need at least 0.12.0)" % sp.__version__)
# Pandas makes working with data tables easier
import pandas as pd
print ("Pandas version: %6.6s (need at least 0.11.0)" % pd.__version__)
# Module for plotting
import matplotlib
print ("Mapltolib version: %6.6s (need at least 1.2.1)" % matplotlib.__version__)
# SciKit Learn implements several Machine Learning algorithms
import sklearn
print ("Scikit-Learn version: %6.6s (need at least 0.13.1)" % sklearn.__version__)
# Requests is a library for getting data from the Web
import requests
print ("requests version: %6.6s (need at least 1.2.3)" % requests.__version__)
# Networkx is a library for working with networks
import networkx as nx
print ("NetworkX version: %6.6s (need at least 1.7)" % nx.__version__)
#BeautifulSoup is a library to parse HTML and XML documents
#import beautifulsoup4
import bs4
print ("BeautifulSoup version:%6.6s (need at least 3.2)" % bs4.__version__)
#MrJob is a library to run map reduce jobs on Amazon's computers
import mrjob
print ("Mr Job version: %6.6s (need at least 0.4)" % mrjob.__version__)
#Pattern has lots of tools for working with data from the internet
import pattern
print ("Pattern version: %6.6s (need at least 2.6)" % pattern.__version__)
#this line prepares IPython for working with matplotlib
%matplotlib inline
# this actually imports matplotlib
import matplotlib.pyplot as plt
x = np.linspace(0, 10, 30) #array of 30 points from 0 to 10
y = np.sin(x)
z = y + np.random.normal(size=30) * .2
plt.plot(x, y, 'ro-', label='A sine wave')
plt.plot(x, z, 'b-', label='Noisy sine')
plt.legend(loc = 'lower right')
plt.xlabel("X axis")
plt.ylabel("Y axis")
print ("Make a 3 row x 4 column array of random numbers")
x = np.random.random((3, 4))
print (x)
print
print ("Add 1 to every element")
x = x + 1
print (x)
print
print ("Get the element at row 1, column 2")
print (x[1, 2])
print
# The colon syntax is called "slicing" the array.
print ("Get the first row")
print (x[0, :])
print
print ("Get every 2nd column of the first row")
print (x[0, ::2])
print
x = np.random
#your code here
print (x.max())
print (x.min())
print (x.mean())
#your code here
x.max(axis = 1)
x = np.random.binomial(500, .5)
x = np.random.binomial?
print ("number of heads:", x)
x = np.random.binomial
#your code here
heads = []
for i in range(1,500):
heads.append(np.random.binomial(500, .5))
plt.hist(heads, bins =500)
Function
--------
simulate_prizedoor
Generate a random array of 0s, 1s, and 2s, representing
hiding a prize between door 0, door 1, and door 2
Parameters
----------
nsim : int
The number of simulations to run
Returns
-------
sims : array
Random array of 0s, 1s, and 2s
Example
-------
>>> print simulate_prizedoor(3)
array([0, 0, 2])
def simulate_prizedoor(nsim):
#compute here
answer =np.random.random_integers(0,2,nsim)
return answer
#your code here
print (simulate_prizedoor(10))
Function
--------
simulate_guess
Return any strategy for guessing which door a prize is behind. This
could be a random strategy, one that always guesses 2, whatever.
Parameters
----------
nsim : int
The number of simulations to generate guesses for
Returns
-------
guesses : array
An array of guesses. Each guess is a 0, 1, or 2
Example
-------
>>> print simulate_guess(5)
array([0, 0, 0, 0, 0])
#your code here
def simulate_guess(nsim):
#compute here
answer =np.random.random_integers(0,1,nsim)
return answer
#your code here
print (simulate_guess(10))
Function
--------
goat_door
Simulate the opening of a "goat door" that doesn't contain the prize,
and is different from the contestants guess
Parameters
----------
prizedoors : array
The door that the prize is behind in each simulation
guesses : array
THe door that the contestant guessed in each simulation
Returns
-------
goats : array
The goat door that is opened for each simulation. Each item is 0, 1, or 2, and is different
from both prizedoors and guesses
Examples
--------
>>> print goat_door(np.array([0, 1, 2]), np.array([1, 1, 1]))
>>> array([2, 2, 0])
def goat_door(prizedoors, guesses):
#strategy: generate random answers, and
#keep updating until they satisfy the rule
#that they aren't a prizedoor or a guess
result = np.random.randint(0, 3, prizedoors.size)
while True:
bad = (result == prizedoors) | (result == guesses)
if not bad.any():
return result
result[bad] = np.random.randint(0, 3, bad.sum())
print (goat_door(np.array([0, 1, 2,1]), np.array([1, 1, 1,1])))
Function
--------
switch_guess
The strategy that always switches a guess after the goat door is opened
Parameters
----------
guesses : array
Array of original guesses, for each simulation
goatdoors : array
Array of revealed goat doors for each simulation
Returns
-------
The new door after switching. Should be different from both guesses and goatdoors
Examples
--------
>>> print switch_guess(np.array([0, 1, 2]), np.array([1, 2, 1]))
>>> array([2, 0, 0])
#your code here
def switch_guess(guesses, goatdoors):
#strategy: generate random answers, and
#keep updating until they satisfy the rule
#that they aren't a guess or a goatdoor
result = np.random.randint(0, 3, guesses.size)
while True:
bad = (result == guesses) | (result == goatdoors)
if not bad.any():
return result
result[bad] = np.random.randint(0, 3, bad.sum())
print (switch_guess(np.array([0, 1, 2]), np.array([1, 2, 1])))
Function
--------
win_percentage
Calculate the percent of times that a simulation of guesses is correct
Parameters
-----------
guesses : array
Guesses for each simulation
prizedoors : array
Location of prize for each simulation
Returns
--------
percentage : number between 0 and 100
The win percentage
Examples
---------
>>> print win_percentage(np.array([0, 1, 2]), np.array([0, 0, 0]))
33.333
#your code here
def win_percentage(guesses, prizedoors):
correct_guesses = guesses == prizedoors
answer = correct_guesses.sum()/guesses.size*100
return answer
print (win_percentage(np.array([0, 1, 2]), np.array([0, 2, 2])))
#your code here
#simulate prizedoors, guesses, goatdoors
prizedoors = simulate_prizedoor(10000)
guesses = simulate_guess(10000)
goatdoors = goat_door(prizedoors, guesses)
#compute win percent if keep original
print("Win Percentage - keep original")
print(win_percentage(guesses,prizedoors))
#compute win percent if switch guess
switchedguesses = switch_guess(guesses,goatdoors)
print("Win Percentage - switch guess")
print(win_percentage(switchedguesses,prizedoors))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python Libraries
Step2: If any of these libraries are missing or out of date, you will need to install them and restart IPython
Step3: If that last cell complained about the %matplotlib line, you need to update IPython to v1.0, and restart the notebook. See the installation page
Step4: Print the maximum, minimum, and mean of the array. This does not require writing a loop. In the code cell below, type x.m<TAB>, to find built-in operations for common array statistics like this
Step5: Call the x.max function again, but use the axis keyword to print the maximum of each row in x.
Step6: Here's a way to quickly simulate 500 coin "fair" coin tosses (where the probabily of getting Heads is 50%, or 0.5)
Step7: Repeat this simulation 500 times, and use the plt.hist() function to plot a histogram of the number of Heads (1s) in each simulation
Step9: The Monty Hall Problem
Step11: Next, write a function that simulates the contestant's guesses for nsim simulations. Call this function simulate_guess. The specs
Step13: Next, write a function, goat_door, to simulate randomly revealing one of the goat doors that a contestant didn't pick.
Step15: Write a function, switch_guess, that represents the strategy of always switching a guess after the goat door is opened.
Step17: Last function
Step18: Now, put it together. Simulate 10000 games where contestant keeps his original guess, and 10000 games where the contestant switches his door after a goat door is revealed. Compute the percentage of time the contestant wins under either strategy. Is one strategy better than the other?
|
14,306 | <ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import numpy as np
import matplotlib.pyplot as plt
from cs231n.gradient_check import eval_numerical_gradient_array, eval_numerical_gradient
from cs231n.layers import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Test the affine_forward function
num_inputs = 2
input_shape = (4, 5, 6)
output_dim = 3
input_size = num_inputs * np.prod(input_shape)
weight_size = output_dim * np.prod(input_shape)
x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)
w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)
b = np.linspace(-0.3, 0.1, num=output_dim)
out, _ = affine_forward(x, w, b)
correct_out = np.array([[ 1.49834967, 1.70660132, 1.91485297],
[ 3.25553199, 3.5141327, 3.77273342]])
# Compare your output with ours. The error should be around 1e-9.
print 'Testing affine_forward function:'
print 'difference: ', rel_error(out, correct_out)
# Test the affine_backward function
x = np.random.randn(10, 2, 3)
w = np.random.randn(6, 5)
b = np.random.randn(5)
dout = np.random.randn(10, 5)
dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)
_, cache = affine_forward(x, w, b)
dx, dw, db = affine_backward(dout, cache)
# The error should be less than 1e-10
print 'Testing affine_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
# Test the relu_forward function
x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)
out, _ = relu_forward(x)
correct_out = np.array([[ 0., 0., 0., 0., ],
[ 0., 0., 0.04545455, 0.13636364,],
[ 0.22727273, 0.31818182, 0.40909091, 0.5, ]])
# Compare your output with ours. The error should be around 1e-8
print 'Testing relu_forward function:'
print 'difference: ', rel_error(out, correct_out)
x = np.random.randn(10, 10)
dout = np.random.randn(*x.shape)
dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)
_, cache = relu_forward(x)
dx = relu_backward(dout, cache)
# The error should be around 1e-12
print 'Testing relu_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
num_classes, num_inputs = 10, 50
x = 0.001 * np.random.randn(num_inputs, num_classes)
y = np.random.randint(num_classes, size=num_inputs)
dx_num = eval_numerical_gradient(lambda x: svm_loss(x, y)[0], x, False)
loss, dx = svm_loss(x, y)
# Test svm_loss function. Loss should be around 9 and dx error should be 1e-9
print 'Testing svm_loss:'
print 'loss: ', loss
print 'dx error: ', rel_error(dx_num, dx)
dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, False)
loss, dx = softmax_loss(x, y)
# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8
print '\nTesting softmax_loss:'
print 'loss: ', loss
print 'dx error: ', rel_error(dx_num, dx)
x_shape = (2, 3, 4, 4)
w_shape = (3, 3, 4, 4)
x = np.linspace(-0.1, 0.5, num=np.prod(x_shape)).reshape(x_shape)
w = np.linspace(-0.2, 0.3, num=np.prod(w_shape)).reshape(w_shape)
b = np.linspace(-0.1, 0.2, num=3)
conv_param = {'stride': 2, 'pad': 1}
out, _ = conv_forward_naive(x, w, b, conv_param)
correct_out = np.array([[[[[-0.08759809, -0.10987781],
[-0.18387192, -0.2109216 ]],
[[ 0.21027089, 0.21661097],
[ 0.22847626, 0.23004637]],
[[ 0.50813986, 0.54309974],
[ 0.64082444, 0.67101435]]],
[[[-0.98053589, -1.03143541],
[-1.19128892, -1.24695841]],
[[ 0.69108355, 0.66880383],
[ 0.59480972, 0.56776003]],
[[ 2.36270298, 2.36904306],
[ 2.38090835, 2.38247847]]]]])
# Compare your output to ours; difference should be around 1e-8
print 'Testing conv_forward_naive'
print 'difference: ', rel_error(out, correct_out)
x = np.random.randn(4, 3, 5, 5)
w = np.random.randn(2, 3, 3, 3)
b = np.random.randn(2,)
dout = np.random.randn(4, 2, 5, 5)
conv_param = {'stride': 1, 'pad': 1}
dx_num = eval_numerical_gradient_array(lambda x: conv_forward_naive(x, w, b, conv_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_forward_naive(x, w, b, conv_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_forward_naive(x, w, b, conv_param)[0], b, dout)
out, cache = conv_forward_naive(x, w, b, conv_param)
dx, dw, db = conv_backward_naive(dout, cache)
# Your errors should be around 1e-9'
print 'Testing conv_backward_naive function'
print 'dx error: ', rel_error(dx, dx_num)
print 'dw error: ', rel_error(dw, dw_num)
print 'db error: ', rel_error(db, db_num)
x_shape = (2, 3, 4, 4)
x = np.linspace(-0.3, 0.4, num=np.prod(x_shape)).reshape(x_shape)
pool_param = {'pool_width': 2, 'pool_height': 2, 'stride': 2}
out, _ = max_pool_forward_naive(x, pool_param)
correct_out = np.array([[[[-0.26315789, -0.24842105],
[-0.20421053, -0.18947368]],
[[-0.14526316, -0.13052632],
[-0.08631579, -0.07157895]],
[[-0.02736842, -0.01263158],
[ 0.03157895, 0.04631579]]],
[[[ 0.09052632, 0.10526316],
[ 0.14947368, 0.16421053]],
[[ 0.20842105, 0.22315789],
[ 0.26736842, 0.28210526]],
[[ 0.32631579, 0.34105263],
[ 0.38526316, 0.4 ]]]])
# Compare your output with ours. Difference should be around 1e-8.
print 'Testing max_pool_forward_naive function:'
print 'difference: ', rel_error(out, correct_out)
x = np.random.randn(3, 2, 8, 8)
dout = np.random.randn(3, 2, 4, 4)
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
dx_num = eval_numerical_gradient_array(lambda x: max_pool_forward_naive(x, pool_param)[0], x, dout)
out, cache = max_pool_forward_naive(x, pool_param)
dx = max_pool_backward_naive(dout, cache)
# Your error should be around 1e-12
print 'Testing max_pool_backward_naive function:'
print 'dx error: ', rel_error(dx, dx_num)
from cs231n.fast_layers import conv_forward_fast, conv_backward_fast
from time import time
x = np.random.randn(100, 3, 31, 31)
w = np.random.randn(25, 3, 3, 3)
b = np.random.randn(25,)
dout = np.random.randn(100, 25, 16, 16)
conv_param = {'stride': 2, 'pad': 1}
t0 = time()
out_naive, cache_naive = conv_forward_naive(x, w, b, conv_param)
t1 = time()
out_fast, cache_fast = conv_forward_fast(x, w, b, conv_param)
t2 = time()
print 'Testing conv_forward_fast:'
print 'Naive: %fs' % (t1 - t0)
print 'Fast: %fs' % (t2 - t1)
print 'Speedup: %fx' % ((t1 - t0) / (t2 - t1))
print 'Difference: ', rel_error(out_naive, out_fast)
t0 = time()
dx_naive, dw_naive, db_naive = conv_backward_naive(dout, cache_naive)
t1 = time()
dx_fast, dw_fast, db_fast = conv_backward_fast(dout, cache_fast)
t2 = time()
print '\nTesting conv_backward_fast:'
print 'Naive: %fs' % (t1 - t0)
print 'Fast: %fs' % (t2 - t1)
print 'Speedup: %fx' % ((t1 - t0) / (t2 - t1))
print 'dx difference: ', rel_error(dx_naive, dx_fast)
print 'dw difference: ', rel_error(dw_naive, dw_fast)
print 'db difference: ', rel_error(db_naive, db_fast)
from cs231n.fast_layers import max_pool_forward_fast, max_pool_backward_fast
x = np.random.randn(100, 3, 32, 32)
dout = np.random.randn(100, 3, 16, 16)
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
t0 = time()
out_naive, cache_naive = max_pool_forward_naive(x, pool_param)
t1 = time()
out_fast, cache_fast = max_pool_forward_fast(x, pool_param)
t2 = time()
print 'Testing pool_forward_fast:'
print 'Naive: %fs' % (t1 - t0)
print 'fast: %fs' % (t2 - t1)
print 'speedup: %fx' % ((t1 - t0) / (t2 - t1))
print 'difference: ', rel_error(out_naive, out_fast)
t0 = time()
dx_naive = max_pool_backward_naive(dout, cache_naive)
t1 = time()
dx_fast = max_pool_backward_fast(dout, cache_fast)
t2 = time()
print '\nTesting pool_backward_fast:'
print 'Naive: %fs' % (t1 - t0)
print 'Fast: %fs' % (t2 - t1)
print 'speedup: %fx' % ((t1 - t0) / (t2 - t1))
print 'dx difference: ', rel_error(dx_naive, dx_fast)
from cs231n.layer_utils import conv_relu_pool_forward, conv_relu_pool_backward
x = np.random.randn(2, 3, 16, 16) # N, C, H, W = X.shape
w = np.random.randn(3, 3, 3, 3)
b = np.random.randn(3,)
dout = np.random.randn(2, 3, 8, 8)
conv_param = {'stride': 1, 'pad': 1}
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
out, cache = conv_relu_pool_forward(x, w, b, conv_param, pool_param)
dx, dw, db = conv_relu_pool_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], b, dout)
print 'Testing conv_relu_pool_forward:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
from cs231n.layer_utils import conv_relu_forward, conv_relu_backward
x = np.random.randn(2, 3, 8, 8)
w = np.random.randn(3, 3, 3, 3)
b = np.random.randn(3,)
dout = np.random.randn(2, 3, 8, 8)
conv_param = {'stride': 1, 'pad': 1}
out, cache = conv_relu_forward(x, w, b, conv_param)
dx, dw, db = conv_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: conv_relu_forward(x, w, b, conv_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_relu_forward(x, w, b, conv_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_relu_forward(x, w, b, conv_param)[0], b, dout)
print 'Testing conv_relu_forward:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
from cs231n.layer_utils import affine_relu_forward, affine_relu_backward
x = np.random.randn(2, 3, 4)
w = np.random.randn(12, 10)
b = np.random.randn(10)
dout = np.random.randn(2, 10)
out, cache = affine_relu_forward(x, w, b)
dx, dw, db = affine_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout)
print 'Testing affine_relu_forward:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Modular neural nets
Step2: Affine layer
Step3: Affine layer
Step4: ReLU layer
Step5: ReLU layer
Step6: Loss layers
Step7: Convolution layer
Step8: Convolution layer
Step9: Max pooling layer
Step10: Max pooling layer
Step11: Fast layers
Step12: Sandwich layers
|
14,307 | <ASSISTANT_TASK:>
Python Code:
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
from ipywidgets import interact
import ipywidgets as widgets
from common import common_vcu_demo_transcode_to_streamout
import os
from ipywidgets import HBox, VBox, Text, Layout
input_path=widgets.Text(value='',
placeholder='Insert file path',
description='Input File:',
#style={'description_width': 'initial'},
disabled=False)
address_path=widgets.Text(value='',
placeholder='192.168.1.101 ',
description='Client IP:',
disabled=False)
port_number=widgets.Text(value='',
placeholder='(optional) 50000',
description='Port No:',
disabled=False)
HBox([input_path, address_path, port_number])
codec_type=widgets.RadioButtons(
options=['avc', 'hevc'],
description='Video Codec:',
disabled=False)
codec_type
periodicity_idr=widgets.Text(value='',
placeholder='(optional) 30, 40, 50',
description='Periodicity Idr:',
style={'description_width': 'initial'},
#layout=Layout(width='35%', height='30px'),
disabled=False)
cpb_size=widgets.Text(value='',
placeholder='(optional) 1000,2000',
description='CPB Size:',
#style={'description_width': 'initial'},
#layout=Layout(width='35%', height='30px'),
disabled=False)
HBox([periodicity_idr, cpb_size])
gop_length=widgets.Text(value='',
placeholder='(optional) 30, 60',
description='Gop Length:',
disabled=False)
bit_rate=widgets.Text(value='',
placeholder='(optional) 1000, 20000',
description='Bit Rate(Kbps):',
style={'description_width': 'initial'},
disabled=False)
HBox([bit_rate, gop_length])
entropy_buffers=widgets.Dropdown(
options=['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15'],
value='5',
description='Entropy Buffers Nos:',
style={'description_width': 'initial'},
disabled=False,)
entropy_buffers
HBox([entropy_buffers])
#HBox([port_number, optional])
from IPython.display import clear_output
from IPython.display import Javascript
def run_all(ev):
display(Javascript('IPython.notebook.execute_cells_below()'))
def clear_op(event):
clear_output(wait=True)
return
button1 = widgets.Button(
description='Clear Output',
style= {'button_color':'lightgreen'},
#style= {'button_color':'lightgreen', 'description_width': 'initial'},
layout={'width': '300px'}
)
button2 = widgets.Button(
description='',
style= {'button_color':'white'},
#style= {'button_color':'lightgreen', 'description_width': 'initial'},
layout={'width': '82px'},
disabled=True
)
button1.on_click(run_all)
button1.on_click(clear_op)
def start_demo(event):
#clear_output(wait=True)
arg = common_vcu_demo_transcode_to_streamout.cmd_line_args_generator(input_path.value, bit_rate.value, codec_type.value, address_path.value, port_number.value, entropy_buffers.value, gop_length.value, periodicity_idr.value, cpb_size.value);
#!sh vcu-demo-transcode-to-streamout.sh $arg > logs.txt 2>&1
!sh vcu-demo-transcode-to-streamout.sh $arg
return
button = widgets.Button(
description='click to start vcu-transcode-to-streamout demo',
style= {'button_color':'lightgreen'},
#style= {'button_color':'lightgreen', 'description_width': 'initial'},
layout={'width': '300px'}
)
button.on_click(start_demo)
HBox([button, button2, button1])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run the Demo
Step2: Insert input file path and host IP
Step3: Output Format
Step4: Advanced options
|
14,308 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import faps as fp
import matplotlib.pylab as plt
import pandas as pd
from time import time, localtime, asctime
np.random.seed(37)
allele_freqs = np.random.uniform(0.2, 0.5, 50)
adults = fp.make_parents(10, allele_freqs, family_name='adult')
family1 = fp.make_offspring(parents = adults, noffs=5)
family1.parents
family2 = fp.make_offspring(parents = adults, dam_list=[7,1,8,8,0], sire_list=[2,6,3,0,7])
family2.parents
family3 = fp.make_sibships(parents=adults, dam=0, sires=[1,2,3,4], family_size=5)
family3.parents
family4 = fp.make_sibships(parents=adults, dam=0, sires=[1,2,3,4], family_size=[5,4,3,2])
family4.parents
np.random.seed(85)
allele_freqs = np.random.uniform(0.2, 0.5, 50)
adults = fp.make_parents(10, allele_freqs, family_name='adult')
progeny = fp.make_sibships(parents=adults, dam=0, sires=[1,2,3,4], family_size=5)
d, mu= 0.01, 0.0015 # values for dropout and error rate.
# add genotyping errors
adults_mu = adults.mutations(mu)
progeny_mu = progeny.mutations(mu)
# add dropouts (to the mutated data)
adults_mu = adults_mu.dropouts(d)
progeny_mu = progeny.dropouts(d)
print(adults.missing_data().mean())
print(adults_mu.missing_data().mean())
np.random.seed(85)
allele_freqs = np.random.uniform(0.4, 0.5, 50)
adults = fp.make_parents(10, allele_freqs, family_name='adult')
progeny = fp.make_sibships(parents=adults, dam=0, sires=[1,2,3,4], family_size=5)
mothers = adults.subset(progeny.parent_index('m', adults.names))
patlik = fp.paternity_array(progeny, mothers, adults, mu=0.0015)
sc = fp.sibship_clustering(patlik)
sc.accuracy(progeny, adults)
patlik.prob_array = patlik.adjust_prob_array(purge = 1, missing_parents=0.25)
sc = fp.sibship_clustering(patlik)
sc.accuracy(progeny, adults)
patlik.prob_array = patlik.adjust_prob_array(selfing_rate=0.5)
sc = fp.sibship_clustering(patlik)
sc.accuracy(progeny, adults)
# Common simulation parameters
r = 10 # number of replicates
nloci = [30,40,50] # number of loci
allele_freqs = [0.25, 0.5] # draw allele frequencies
nadults = [100,250,500] # size of the adults population
mu = 0.0015 #genotype error rates
sires = 4
offspring = 5
np.random.seed(614)
eventab = fp.make_power(r, nloci, allele_freqs, nadults, sires, offspring, 0, mu)
fp.make_power(r, nloci, allele_freqs, nadults, sires, offspring, 0,
mu_input= 0.003,
mu_real=0.0015,
unsampled_real=0.1,
unsampled_input = 0.05);
eventab, evenclusters = fp.make_power(r, nloci, allele_freqs, nadults, sires, offspring, 0, mu, return_clusters=True, verbose=False)
even_famsizes = np.array([evenclusters[i].family_size() for i in range(len(evenclusters))])
plt.plot(even_famsizes.mean(0))
plt.show()
# Common simulation parameters
nreps = 10 # number of replicates
nloci = [50] # number of loci
allele_freqs = [0.1, 0.2, 0.3, 0.4, 0.5] # draw allele frequencies
nadults = [10, 100, 250, 500, 750, 1000] # size of the adults population
mu_list = [0.0015] #genotype error rates
nsims = nreps * len(nloci) * len(allele_freqs) * len(nadults) * len(mu) # total number of simulations to run
dt = np.zeros([nsims, 7]) # empty array to store data
t0 = time()
counter = 0
print("Beginning simulations on {}.".format(asctime(localtime(time()) )))
for r in range(nreps):
for l in range(len(nloci)):
for a in range(len(allele_freqs)):
for n in range(len(nadults)):
for m in range(len(mu_list)):
af = np.repeat(allele_freqs[a], nloci[l])
adults = fp.make_parents(nadults[n], af)
progeny = fp.make_offspring(adults, 100)
mi = progeny.parent_index('m', adults.names) # maternal index
mothers = adults.subset(mi)
patlik = fp.paternity_array(progeny, mothers, adults, mu_list[m])
# Find the rank of the missing term within the array.
rank = [np.where(np.sort(patlik.prob_array[i]) == patlik.prob_array[i,-1])[0][0] for i in range(progeny.size)]
rank = np.array(rank).mean() / nadults[n]
# get the posterior probabilty fir the missing term.
prob_misisng = np.exp(patlik.prob_array[:, -1]).mean()
#export data
dt[counter] = np.array([r, nloci[l], allele_freqs[a], nadults[n], mu[m], rank, prob_misisng])
# update counters
counter += 1
print("Completed in {} hours.".format(round((time() - t0)/3600,2)))
head = ['rep', 'nloci', 'allele_freqs', 'nadults', 'mu', 'rank', 'prob_missing']
dt = pd.DataFrame(dt, columns=head)
dt.groupby('allele_freqs').mean()
dt.groupby('nadults').mean()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There are multiple ways to mate adults to generate offspring. If you supply a set of adults and an integer number of offspring, make_offspring mates adults at random.
Step2: You can also supply an explicit list of dams and sires, in which case the adults are paired in the order they appear in each list.
Step3: Usually we really want to simulate half sib arrays. This can be done using make_sibships, which mates a single mother to a set of males.
Step4: For uneven sibship sizes, give a list of sizes for each family of the same length as sires.
Step5: Adding errors
Step6: It is best to create the progeny before adding errors. Set the error rates and add errors at random.
Step7: mutations and dropouts make copies of the genotypeArray, so the original data remains unchanged. For example
Step8: Paternity and sibships
Step9: A very useful tool is the accuracy subfunction for sibshipCluster objects.
Step10: In this example, accuracy is high, but the probability of a missing sire is NaN because all the sires are present, and this number of calculated only for offspring whose sire was absent.
Step11: In contrast, imagine we had an idea that selfing was strong. How would this affect things?
Step12: The results are identical to the unmodified case; FAPS has correctly identifed the correct partition structure in spite of the (incorrect) strong prior for high selfing.
Step13: For convenience, make_power provides a summary of the input parameters.
Step14: If you want to perform downstream analysis, you can tell make_power to also export each paternity_Array and/or sibshipCluster object. This is done by setting return_paternities and return_clusters to True. For example, this code pulls out the distribution of family sizes from each sibshipArray, and plots it.
Step15: Custom simulations
Step16: This cell simulates genotype data and clusters the offspring into full sibships.
Step17: There is a strong dependency on minor allele frequency. As MAF goes from zero to 0.5, the effectiveness of identifying a missing sire using this likelihood estimator goes from 'basically useless' to 'useful'.
Step18: In contrast, there is no effect of the number of adults.
|
14,309 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'snu', 'sandbox-1', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
14,310 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# NO NEED TO DO MAGIC INLINE COMMAND ON QUANTOPIAN!
mcdon = get_pricing('MCD',
start_date = '2017-01-01',
end_date = '2017-02-01',
frequency = 'minute')
mcdon.head()
mcdon.info()
# Can only go about 12 years back
# which is really all you need for algo trading,
# going back further probably is more noise than signal.
mcdon = get_pricing('MCD',
start_date = '2005-01-01',
end_date = '2017-01-01',
frequency = 'daily')
mcdon['close_price'].plot()
mcdon['close_price'].pct_change(1).hist(bins = 100,
figsize = (12, 8))
mcdon_eq_info = symbols('MCD')
type(mcdon_eq_info)
for key in mcdon_eq_info.to_dict():
print(key)
print(mcdon_eq_info.to_dict()[key])
print('\n')
# Have to do this first in the notebook:
fundamentals = init_fundamentals()
#fundamentals. # call tab here as in the video!
# Market Cap
my_query = query(fundamentals.valuation.market_cap)
my_funds = get_fundamentals(my_query,
'2017-01-01')
my_funds.info()
# Basically just returns the market cap of everything
# for 2017-01-01
my_funds.head()
# What you usualy do is filter by other qualities after the query!
# Only get companies worth 500 billion or more (that's a lot of dough!)
big_companies = (query(fundamentals.valuation.market_cap).
filter(fundamentals.valuation.market_cap > 500000000000) )
my_big_funds = get_fundamentals(big_companies,
'2017-07-19')
# On
my_big_funds
7.82 * 10 ** 11
#get_fundamentals()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting Information
Step2: symbols()
Step3: get_fundamentals()
Step4: The get_fundamentals() function takes in a SQLAlchemy query which can be quite complicated and strange looking at first. Basically it allows you to filter by a variety of fundamentals (things like Market Cap, P/E Ratio, or even city of HQ). Check out the link above for all the things you can filter by!
|
14,311 | <ASSISTANT_TASK:>
Python Code:
!curl -Lo conda_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py
import conda_installer
conda_installer.install()
!/root/miniconda/bin/conda info -e
!pip install --pre deepchem
import deepchem as dc
dc.__version__
tasks, datasets, transformers = dc.molnet.load_delaney(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = datasets
print(test_dataset)
test_dataset.y
for X, y, w, id in test_dataset.itersamples():
print(y, id)
for X, y, w, ids in test_dataset.iterbatches(batch_size=50):
print(y.shape)
test_dataset.to_dataframe()
import numpy as np
X = np.random.random((10, 5))
y = np.random.random((10, 2))
dataset = dc.data.NumpyDataset(X=X, y=y)
print(dataset)
dataset.to_dataframe()
import tempfile
with tempfile.TemporaryDirectory() as data_dir:
disk_dataset = dc.data.DiskDataset.from_numpy(X=X, y=y, data_dir=data_dir)
print(disk_dataset)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can now import the deepchem package to play with.
Step2: Anatomy of a Dataset
Step3: We now have three Dataset objects
Step4: There's a lot of information there, so let's start at the beginning. It begins with the label "DiskDataset". Dataset is an abstract class. It has a few subclasses that correspond to different ways of storing data.
Step5: This is a very easy way to access data, but you should be very careful about using it. This requires the data for all samples to be loaded into memory at once. That's fine for small datasets like this one, but for large datasets it could easily take more memory than you have.
Step6: Most deep learning models can process a batch of multiple samples all at once. You can use iterbatches() to iterate over batches of samples.
Step7: iterbatches() has other features that are useful when training models. For example, iterbatches(batch_size=100, epochs=10, deterministic=False) will iterate over the complete dataset ten times, each time with the samples in a different random order.
Step8: Creating Datasets
Step9: Notice that we did not specify weights or IDs. These are optional, as is y for that matter. Only X is required. Since we left them out, it automatically built w and ids arrays for us, setting all weights to 1 and setting the IDs to integer indices.
Step10: What about creating a DiskDataset? If you have the data in NumPy arrays, you can call DiskDataset.from_numpy() to save it to disk. Since this is just a tutorial, we will save it to a temporary directory.
|
14,312 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import time
class ArtificialDataset(tf.data.Dataset):
def _generator(num_samples):
# Opening the file
time.sleep(0.03)
for sample_idx in range(num_samples):
# Reading data (line, record) from the file
time.sleep(0.015)
yield (sample_idx,)
def __new__(cls, num_samples=3):
return tf.data.Dataset.from_generator(
cls._generator,
output_signature = tf.TensorSpec(shape = (1,), dtype = tf.int64),
args=(num_samples,)
)
def benchmark(dataset, num_epochs=2):
start_time = time.perf_counter()
for epoch_num in range(num_epochs):
for sample in dataset:
# Performing a training step
time.sleep(0.01)
print("Execution time:", time.perf_counter() - start_time)
benchmark(ArtificialDataset())
benchmark(
ArtificialDataset()
.prefetch(tf.data.AUTOTUNE)
)
benchmark(
tf.data.Dataset.range(2)
.interleave(lambda _: ArtificialDataset())
)
benchmark(
tf.data.Dataset.range(2)
.interleave(
lambda _: ArtificialDataset(),
num_parallel_calls=tf.data.AUTOTUNE
)
)
def mapped_function(s):
# Do some hard pre-processing
tf.py_function(lambda: time.sleep(0.03), [], ())
return s
benchmark(
ArtificialDataset()
.map(mapped_function)
)
benchmark(
ArtificialDataset()
.map(
mapped_function,
num_parallel_calls=tf.data.AUTOTUNE
)
)
benchmark(
ArtificialDataset()
.map( # Apply time consuming operations before cache
mapped_function
).cache(
),
5
)
fast_dataset = tf.data.Dataset.range(10000)
def fast_benchmark(dataset, num_epochs=2):
start_time = time.perf_counter()
for _ in tf.data.Dataset.range(num_epochs):
for _ in dataset:
pass
tf.print("Execution time:", time.perf_counter() - start_time)
def increment(x):
return x+1
fast_benchmark(
fast_dataset
# Apply function one item at a time
.map(increment)
# Batch
.batch(256)
)
fast_benchmark(
fast_dataset
.batch(256)
# Apply function on a batch of items
# The tf.Tensor.__add__ method already handle batches
.map(increment)
)
import itertools
from collections import defaultdict
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
class TimeMeasuredDataset(tf.data.Dataset):
# OUTPUT: (steps, timings, counters)
OUTPUT_TYPES = (tf.dtypes.string, tf.dtypes.float32, tf.dtypes.int32)
OUTPUT_SHAPES = ((2, 1), (2, 2), (2, 3))
_INSTANCES_COUNTER = itertools.count() # Number of datasets generated
_EPOCHS_COUNTER = defaultdict(itertools.count) # Number of epochs done for each dataset
def _generator(instance_idx, num_samples):
epoch_idx = next(TimeMeasuredDataset._EPOCHS_COUNTER[instance_idx])
# Opening the file
open_enter = time.perf_counter()
time.sleep(0.03)
open_elapsed = time.perf_counter() - open_enter
for sample_idx in range(num_samples):
# Reading data (line, record) from the file
read_enter = time.perf_counter()
time.sleep(0.015)
read_elapsed = time.perf_counter() - read_enter
yield (
[("Open",), ("Read",)],
[(open_enter, open_elapsed), (read_enter, read_elapsed)],
[(instance_idx, epoch_idx, -1), (instance_idx, epoch_idx, sample_idx)]
)
open_enter, open_elapsed = -1., -1. # Negative values will be filtered
def __new__(cls, num_samples=3):
return tf.data.Dataset.from_generator(
cls._generator,
output_types=cls.OUTPUT_TYPES,
output_shapes=cls.OUTPUT_SHAPES,
args=(next(cls._INSTANCES_COUNTER), num_samples)
)
def timelined_benchmark(dataset, num_epochs=2):
# Initialize accumulators
steps_acc = tf.zeros([0, 1], dtype=tf.dtypes.string)
times_acc = tf.zeros([0, 2], dtype=tf.dtypes.float32)
values_acc = tf.zeros([0, 3], dtype=tf.dtypes.int32)
start_time = time.perf_counter()
for epoch_num in range(num_epochs):
epoch_enter = time.perf_counter()
for (steps, times, values) in dataset:
# Record dataset preparation informations
steps_acc = tf.concat((steps_acc, steps), axis=0)
times_acc = tf.concat((times_acc, times), axis=0)
values_acc = tf.concat((values_acc, values), axis=0)
# Simulate training time
train_enter = time.perf_counter()
time.sleep(0.01)
train_elapsed = time.perf_counter() - train_enter
# Record training informations
steps_acc = tf.concat((steps_acc, [["Train"]]), axis=0)
times_acc = tf.concat((times_acc, [(train_enter, train_elapsed)]), axis=0)
values_acc = tf.concat((values_acc, [values[-1]]), axis=0)
epoch_elapsed = time.perf_counter() - epoch_enter
# Record epoch informations
steps_acc = tf.concat((steps_acc, [["Epoch"]]), axis=0)
times_acc = tf.concat((times_acc, [(epoch_enter, epoch_elapsed)]), axis=0)
values_acc = tf.concat((values_acc, [[-1, epoch_num, -1]]), axis=0)
time.sleep(0.001)
tf.print("Execution time:", time.perf_counter() - start_time)
return {"steps": steps_acc, "times": times_acc, "values": values_acc}
def draw_timeline(timeline, title, width=0.5, annotate=False, save=False):
# Remove invalid entries (negative times, or empty steps) from the timelines
invalid_mask = np.logical_and(timeline['times'] > 0, timeline['steps'] != b'')[:,0]
steps = timeline['steps'][invalid_mask].numpy()
times = timeline['times'][invalid_mask].numpy()
values = timeline['values'][invalid_mask].numpy()
# Get a set of different steps, ordered by the first time they are encountered
step_ids, indices = np.stack(np.unique(steps, return_index=True))
step_ids = step_ids[np.argsort(indices)]
# Shift the starting time to 0 and compute the maximal time value
min_time = times[:,0].min()
times[:,0] = (times[:,0] - min_time)
end = max(width, (times[:,0]+times[:,1]).max() + 0.01)
cmap = mpl.cm.get_cmap("plasma")
plt.close()
fig, axs = plt.subplots(len(step_ids), sharex=True, gridspec_kw={'hspace': 0})
fig.suptitle(title)
fig.set_size_inches(17.0, len(step_ids))
plt.xlim(-0.01, end)
for i, step in enumerate(step_ids):
step_name = step.decode()
ax = axs[i]
ax.set_ylabel(step_name)
ax.set_ylim(0, 1)
ax.set_yticks([])
ax.set_xlabel("time (s)")
ax.set_xticklabels([])
ax.grid(which="both", axis="x", color="k", linestyle=":")
# Get timings and annotation for the given step
entries_mask = np.squeeze(steps==step)
serie = np.unique(times[entries_mask], axis=0)
annotations = values[entries_mask]
ax.broken_barh(serie, (0, 1), color=cmap(i / len(step_ids)), linewidth=1, alpha=0.66)
if annotate:
for j, (start, width) in enumerate(serie):
annotation = "\n".join([f"{l}: {v}" for l,v in zip(("i", "e", "s"), annotations[j])])
ax.text(start + 0.001 + (0.001 * (j % 2)), 0.55 - (0.1 * (j % 2)), annotation,
horizontalalignment='left', verticalalignment='center')
if save:
plt.savefig(title.lower().translate(str.maketrans(" ", "_")) + ".svg")
def map_decorator(func):
def wrapper(steps, times, values):
# Use a tf.py_function to prevent auto-graph from compiling the method
return tf.py_function(
func,
inp=(steps, times, values),
Tout=(steps.dtype, times.dtype, values.dtype)
)
return wrapper
_batch_map_num_items = 50
def dataset_generator_fun(*args):
return TimeMeasuredDataset(num_samples=_batch_map_num_items)
@map_decorator
def naive_map(steps, times, values):
map_enter = time.perf_counter()
time.sleep(0.001) # Time consuming step
time.sleep(0.0001) # Memory consuming step
map_elapsed = time.perf_counter() - map_enter
return (
tf.concat((steps, [["Map"]]), axis=0),
tf.concat((times, [[map_enter, map_elapsed]]), axis=0),
tf.concat((values, [values[-1]]), axis=0)
)
naive_timeline = timelined_benchmark(
tf.data.Dataset.range(2)
.flat_map(dataset_generator_fun)
.map(naive_map)
.batch(_batch_map_num_items, drop_remainder=True)
.unbatch(),
5
)
@map_decorator
def time_consuming_map(steps, times, values):
map_enter = time.perf_counter()
time.sleep(0.001 * values.shape[0]) # Time consuming step
map_elapsed = time.perf_counter() - map_enter
return (
tf.concat((steps, tf.tile([[["1st map"]]], [steps.shape[0], 1, 1])), axis=1),
tf.concat((times, tf.tile([[[map_enter, map_elapsed]]], [times.shape[0], 1, 1])), axis=1),
tf.concat((values, tf.tile([[values[:][-1][0]]], [values.shape[0], 1, 1])), axis=1)
)
@map_decorator
def memory_consuming_map(steps, times, values):
map_enter = time.perf_counter()
time.sleep(0.0001 * values.shape[0]) # Memory consuming step
map_elapsed = time.perf_counter() - map_enter
# Use tf.tile to handle batch dimension
return (
tf.concat((steps, tf.tile([[["2nd map"]]], [steps.shape[0], 1, 1])), axis=1),
tf.concat((times, tf.tile([[[map_enter, map_elapsed]]], [times.shape[0], 1, 1])), axis=1),
tf.concat((values, tf.tile([[values[:][-1][0]]], [values.shape[0], 1, 1])), axis=1)
)
optimized_timeline = timelined_benchmark(
tf.data.Dataset.range(2)
.interleave( # Parallelize data reading
dataset_generator_fun,
num_parallel_calls=tf.data.AUTOTUNE
)
.batch( # Vectorize your mapped function
_batch_map_num_items,
drop_remainder=True)
.map( # Parallelize map transformation
time_consuming_map,
num_parallel_calls=tf.data.AUTOTUNE
)
.cache() # Cache data
.map( # Reduce memory usage
memory_consuming_map,
num_parallel_calls=tf.data.AUTOTUNE
)
.prefetch( # Overlap producer and consumer works
tf.data.AUTOTUNE
)
.unbatch(),
5
)
draw_timeline(naive_timeline, "Naive", 15)
draw_timeline(optimized_timeline, "Optimized", 15)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Better performance with the tf.data API
Step2: Throughout this guide, you will iterate across a dataset and measure the performance.
Step3: This dataset is similar to the tf.data.Dataset.range one, adding a fixed delay at the beginning of and in-between each sample.
Step4: Optimize performance
Step5: Under the hood, this is how your execution time was spent
Step6: Now, as the data execution time plot shows, while the training step is running for sample 0, the input pipeline is reading the data for the sample 1, and so on.
Step7: This data execution time plot allows to exhibit the behavior of the interleave transformation, fetching samples alternatively from the two datasets available.
Step8: This time, as the data execution time plot shows, the reading of the two datasets is parallelized, reducing the global data processing time.
Step9: Sequential mapping
Step10: As for the naive approach, here, as the plot shows, the times spent for opening, reading, pre-processing (mapping) and training steps sum together for a single iteration.
Step11: As the data plot demonstrates, the pre-processing steps overlap, reducing the overall time for a single iteration.
Step12: Here, the data execution time plot shows that when you cache a dataset, the transformations before the cache one (like the file opening and data reading) are executed only during the first epoch.
Step13: Scalar mapping
Step14: The plot above illustrates what is going on (with less samples) using the scalar mapping method.
Step15: This time, the mapped function is called once and applies to a batch of sample.
Step16: The dataset
Step17: This dataset provides samples of shape [[2, 1], [2, 2], [2, 3]] and of type [tf.dtypes.string, tf.dtypes.float32, tf.dtypes.int32].
Step18: The plotting method
Step19: Use wrappers for mapped function
Step20: Pipelines comparison
Step21: Naive
Step22: Optimized
|
14,313 | <ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
class Distiller(keras.Model):
def __init__(self, student, teacher):
super(Distiller, self).__init__()
self.teacher = teacher
self.student = student
def compile(
self,
optimizer,
metrics,
student_loss_fn,
distillation_loss_fn,
alpha=0.1,
temperature=3,
):
Configure the distiller.
Args:
optimizer: Keras optimizer for the student weights
metrics: Keras metrics for evaluation
student_loss_fn: Loss function of difference between student
predictions and ground-truth
distillation_loss_fn: Loss function of difference between soft
student predictions and soft teacher predictions
alpha: weight to student_loss_fn and 1-alpha to distillation_loss_fn
temperature: Temperature for softening probability distributions.
Larger temperature gives softer distributions.
super(Distiller, self).compile(optimizer=optimizer, metrics=metrics)
self.student_loss_fn = student_loss_fn
self.distillation_loss_fn = distillation_loss_fn
self.alpha = alpha
self.temperature = temperature
def train_step(self, data):
# Unpack data
x, y = data
# Forward pass of teacher
teacher_predictions = self.teacher(x, training=False)
with tf.GradientTape() as tape:
# Forward pass of student
student_predictions = self.student(x, training=True)
# Compute losses
student_loss = self.student_loss_fn(y, student_predictions)
distillation_loss = self.distillation_loss_fn(
tf.nn.softmax(teacher_predictions / self.temperature, axis=1),
tf.nn.softmax(student_predictions / self.temperature, axis=1),
)
loss = self.alpha * student_loss + (1 - self.alpha) * distillation_loss
# Compute gradients
trainable_vars = self.student.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the metrics configured in `compile()`.
self.compiled_metrics.update_state(y, student_predictions)
# Return a dict of performance
results = {m.name: m.result() for m in self.metrics}
results.update(
{"student_loss": student_loss, "distillation_loss": distillation_loss}
)
return results
def test_step(self, data):
# Unpack the data
x, y = data
# Compute predictions
y_prediction = self.student(x, training=False)
# Calculate the loss
student_loss = self.student_loss_fn(y, y_prediction)
# Update the metrics.
self.compiled_metrics.update_state(y, y_prediction)
# Return a dict of performance
results = {m.name: m.result() for m in self.metrics}
results.update({"student_loss": student_loss})
return results
# Create the teacher
teacher = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(256, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"),
layers.Conv2D(512, (3, 3), strides=(2, 2), padding="same"),
layers.Flatten(),
layers.Dense(10),
],
name="teacher",
)
# Create the student
student = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(16, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"),
layers.Conv2D(32, (3, 3), strides=(2, 2), padding="same"),
layers.Flatten(),
layers.Dense(10),
],
name="student",
)
# Clone student for later comparison
student_scratch = keras.models.clone_model(student)
# Prepare the train and test dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Normalize data
x_train = x_train.astype("float32") / 255.0
x_train = np.reshape(x_train, (-1, 28, 28, 1))
x_test = x_test.astype("float32") / 255.0
x_test = np.reshape(x_test, (-1, 28, 28, 1))
# Train teacher as usual
teacher.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train and evaluate teacher on data.
teacher.fit(x_train, y_train, epochs=5)
teacher.evaluate(x_test, y_test)
# Initialize and compile distiller
distiller = Distiller(student=student, teacher=teacher)
distiller.compile(
optimizer=keras.optimizers.Adam(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
student_loss_fn=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
distillation_loss_fn=keras.losses.KLDivergence(),
alpha=0.1,
temperature=10,
)
# Distill teacher to student
distiller.fit(x_train, y_train, epochs=3)
# Evaluate student on test dataset
distiller.evaluate(x_test, y_test)
# Train student as doen usually
student_scratch.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train and evaluate student trained from scratch.
student_scratch.fit(x_train, y_train, epochs=3)
student_scratch.evaluate(x_test, y_test)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Construct Distiller() class
Step3: Create student and teacher models
Step4: Prepare the dataset
Step5: Train the teacher
Step6: Distill teacher to student
Step7: Train student from scratch for comparison
|
14,314 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
from qiskit_aqua.operator import Operator
from qiskit_aqua import get_initial_state_instance
num_qubits = 2
evo_time = 1
temp = np.random.random((2 ** num_qubits, 2 ** num_qubits))
h1 = temp + temp.T
qubitOp = Operator(matrix=h1)
state_in = get_initial_state_instance('CUSTOM')
state_in.init_args(num_qubits, state='random')
from scipy.linalg import expm
state_in_vec = state_in.construct_circuit('vector')
groundtruth = expm(-1.j * h1 * evo_time) @ state_in_vec
print('The directly computed groundtruth evolution result state is\n{}.'.format(groundtruth))
groundtruth_evolution = qubitOp.evolve(state_in_vec, evo_time, 'matrix', 0)
print('The groundtruth evolution result as computed by the Dynamics algorithm is\n{}.'.format(groundtruth_evolution))
np.testing.assert_allclose(groundtruth_evolution, groundtruth)
from qiskit import QuantumCircuit, QuantumRegister
quantum_registers = QuantumRegister(qubitOp.num_qubits)
circuit = state_in.construct_circuit('circuit', quantum_registers)
circuit += qubitOp.evolve(
None, evo_time, 'circuit', 1,
quantum_registers=quantum_registers,
expansion_mode='suzuki',
expansion_order=3
)
from qiskit.wrapper import execute as q_execute
from qiskit import Aer
backend = Aer.get_backend('statevector_simulator')
job = q_execute(circuit, backend)
circuit_execution_result = np.asarray(job.result().get_statevector(circuit))
print('The evolution result state from executing the Dynamics circuit is\n{}.'.format(circuit_execution_result))
from qiskit.tools.qi.qi import state_fidelity
print('Fidelity between the groundtruth and the circuit result states is {}.'.format(
state_fidelity(groundtruth, circuit_execution_result)
))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: With the operator and the initial state, we can easily compute the groundtruth evolution result as follows.
Step2: The evolve method as provided by the Operator class also provides the ability to compute the evolution groundtruth via the same matrix and vector multiplication. Therefore, we can also compute the evolution's groundtruth result state as follows, which we can easily verify to be the same as the groundtruth we just computed.
Step3: Next, let's actually build the quantum circuit, which involves the circuit for putting the system in the specified initial state, and the actual evolution circuit corresponding to the operator we generated, for which, let's, for example, use the 3rd order suzuki expansion.
Step4: With the circuit built, we can now execute the circuit to get the evolution result. We use the statevector_simulator backend for the purpose of this demonstration.
Step5: We can then check the fidelity between the groundtruth and the circuit_execution_result.
|
14,315 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas
import statsmodels
import statsmodels.formula.api
import statsmodels.stats.api
import statsmodels.stats
import statsmodels.stats.outliers_influence
import statsmodels.graphics.regressionplots
import sklearn.preprocessing
import matplotlib.pyplot as plt
import seaborn
%matplotlib inline
np.random.seed(seed=1234)
bidenFname = 'data/biden.csv'
df = pandas.read_csv(bidenFname).dropna()
model1 = statsmodels.formula.api.ols('biden ~ female + age + educ', data=df).fit()
print(model1.summary())
outliersDf = statsmodels.stats.outliers_influence.OLSInfluence(model1).summary_frame()
outliersDf.max()
outliersDf.min()
fig, ax = plt.subplots(figsize = (20, 7))
fig = statsmodels.graphics.regressionplots.plot_leverage_resid2(model1, ax = ax)
plt.show()
fig, axes = plt.subplots(ncols=2, figsize = (20, 7))
outliersDf[['dfb_Intercept', 'dfb_female', 'dfb_age', 'dfb_educ', 'cooks_d']].boxplot(ax = axes[0])
axes[0].set_title('$DFBETA$ and Cook\'s D boxplots')
outliersDf[['cooks_d']].plot(ax = axes[1])
axes[1].set_title('Cook\'s D per point')
plt.show()
names = ['$\chi^2_2$', 'p-value', 'Skew', 'Kurtosis']
test = statsmodels.stats.api.jarque_bera(model1.resid)
nonNormDF = pandas.DataFrame({n : [test[i]] for i, n in enumerate(names)})
nonNormDF
names = ['Lagrange multiplier statistic', 'p-value', 'f-value', 'f p-value']
test = statsmodels.stats.api.het_breushpagan(model1.resid, model1.model.exog)
heteroTestDF = pandas.DataFrame({n : [test[i]] for i, n in enumerate(names)})
heteroTestDF
names = ['Intercept','female','age','educ']
multicollinearityDF = pandas.DataFrame({n : [model1.eigenvals[i]] for i, n in enumerate(names)})
multicollinearityDF
model2 = statsmodels.formula.api.ols('biden ~ age + educ + age * educ', data=df).fit()
print(model2.summary())
print(model2.wald_test('age + age:educ').summary())
print(model2.wald_test('educ + age:educ').summary())
fullDF = pandas.read_csv(bidenFname)
imputer = sklearn.preprocessing.Imputer(strategy='mean')
imputedMeanDF = pandas.DataFrame(imputer.fit_transform(fullDF), columns = ['biden','female','age','educ','dem','rep'])
modelMean = statsmodels.formula.api.ols('biden ~ female + age + educ', data=imputedMeanDF).fit()
print(modelMean.summary())
imputer = sklearn.preprocessing.Imputer(strategy='median')
imputedMedianDF = pandas.DataFrame(imputer.fit_transform(fullDF), columns = ['biden','female','age','educ','dem','rep'])
modelMedian = statsmodels.formula.api.ols('biden ~ female + age + educ', data=imputedMedianDF).fit()
print(modelMedian.summary())
betasDF = pandas.DataFrame([model1.params, modelMean.params,modelMedian.params], index =['Base', 'Mean', 'Median'])
betasDF
errsDF = pandas.DataFrame([model1.bse, modelMean.bse,modelMedian.bse], index =['Base', 'Mean', 'Median'])
errsDF
errsDF.plot(logy=1)
plt.title('Errors per model')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Regression diagnostics
Step2: Part 1
Step3: We can see fomt the tables above that $DFBETA$ values for some of the data points are quite signifcant and at least one has a cook's D much greater than $4/n$.
Step4: Plotting them shows that there are a fair number of influential points (the point labels are original indices of the points). We would first need to determine the criteria for classifying them as outliers. Cook's D above a certain value would be a good starting point. Once we identify them we could drop them. We could also check if they are only influential in one dimension and normalize that dimension to the mean or some other less significant values, while keeping the other values.
Step5: As shown by the large $\chi^2_2$ value of the Jarque Bera test the p-value is much to low for the errors to be normally distributed. The fix for this depends on the distribution of the errors, there may be a simple transform that makes them normal, if so we can apply it. If not we may need to rethink our regression
Step6: As shown in the table the Breusch–Pagan test indicates (p-value $< .05$) there is some heteroskedasticity in the data. This could greatly affect our inference since some regions have lower error than others and as such our accuracy is dependant on the input.
Step7: By looking at the eigenvalues of the correlation matrix we can see that there is likely no multicollinearity since they are all quite large and thus independent.
Step8: Part 1
Step9: We can see from the Wald test of for this marginal effect that the null hypothesis of no effect is soundly defeated with $p < .05$
Step10: We can see from the Wald test of for this marginal effect that the null hypothesis of no effect is barely defeated for $p < .05$
Step11: Above is the summary table for the model trained with imputed data, where missing values are replaced with their mean.
Step12: Above is the summary table for the model trained with imputed data, where missing values are replaced with their median.
Step13: Table of parameters for the base (row wise dropping) and imputed models
|
14,316 | <ASSISTANT_TASK:>
Python Code:
class A(object):
pass
a_object = A()
print(type(a_object))
class B(object):
value = 1
b_object = B()
print(b_object.value)
class B(object):
value = 1
def show_value(self, another_arg):
print('self.value is {}'.format(self.value))
b1 = B()
b1.show_value(12)
b1.value = 999
b1.show_value(132)
b2 = B()
class C(object):
def __init__(self, value):
self.var = value
c1 = C("Python!")
c2 = C("Hello")
print(c1.var)
print(c2.var)
class Counter(object):
def __init__(self, start=0):
self.value = start
def increment(self):
self.value += 1
counter1 = Counter()
print(counter1.value)
counter1.increment()
print(counter1.value)
counter2 = Counter(start=10)
counter2.increment()
counter2.increment()
print(counter2.value)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note the reference to object, this means that our new class inherits from object. We won't be going into too much detail about inheritance, but for now you should always inherit from object when defining a class.
Step2: A class can store some data (after all, an empty class isn't very interesting!)
Step3: We can access variables stored in a class by writing the name of the instance followed by a dot and then the name of the variable
Step4: Classes can also contain functions. Functions attached to classes are called methods
Step5: The first argument to every method automatically refers to the object we're calling the method on, by convention we call that argument self.
Step6: Notice we don't have to pass the self argument, Python's object system does this for you.
Step7: The __init__ method is called when we create an instance of a class. Now when we call the class name we can pass the arguments required by __init__
Step8: Methods on an object have acces to the variables defined on the object
|
14,317 | <ASSISTANT_TASK:>
Python Code:
# crie uma matriz 8 x 8 com valores em forma de xadrez 0,1
import numpy as np
print (matriz)
print ()
print (matriz2 )
# normalize uma matriz
matriz = np.array( [[3.,2.,1.,5.,1.],[2.,1.,4.,5.,1.],[5.,2.,1.,5.,3.] ] )
print (matriz)
# converter matriz N x 2 representando coordenadas cartesianas em polares
Z = np.array([[10.,2.],[5.,3.5],[1.2,4.1],[3.2,3.14]])
Z2 = np.zeros_like(Z)
Z3 = np.zeros_like(Z)
print (Z2)
print ()
print (Z3)
# função que substitui o maior valor por 0 e retorna o maior valor
Z = np.array([1.,2.,3.,4.,5.,6.])
print (Z.max())
# calcule a distância euclidiana entre x e uma matriz de pontos Y
X = np.array([ 0.17, 0.59, 0.75, 0.78,0.47, 0.10, 0.06,0.37,0.66,0.10])
Y = np.array( [[ 0.82, 0.96, 0.53, 0.61, 0.76, 0.3, 0.95, 0.81, 0.53, 0.84],
[ 0.7, 0.49, 0.07, 0.12, 0.87, 0.73, 0.73, 0.39, 0.46, 0.37],
[ 0.54, 0.79, 0.66, 0.27, 0.79, 0.33, 0.36, 0.83, 0.22, 0.17],
[ 0.96, 0.63, 0.5 , 0.88, 0.45, 0.9, 0.12, 0.92, 0.7 , 0.79],
[ 0.6 , 0.02, 0.39, 0.29, 0.33, 0.07, 0.35, 0.31, 0.38, 0.84]])
print (D)
# encontre o ponto mais próximo do exercicio anterior
# antes de tentar esse exercício, digite no prompt de comando: conda install scikit-image e reinicie o notebook
%matplotlib inline
import matplotlib.pyplot as plt
from skimage import data, io, filters
mario = io.imread('mario.gif')
plt.figure(figsize=(4, 4))
plt.imshow(mario)
plt.axis('on')
plt.grid(True)
plt.show()
plt.figure(figsize=(4, 4))
plt.imshow(mario)
plt.axis('off')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercício 02
Step2: Exercício 03
Step3: Exercício 04
Step4: Exercício 05
Step5: Exercício 06
Step6: Exercício 07
Step7: Ele é representado por uma matriz tridimensional de dimensões $133 \times 100 \times 3$, representando a altura, largura e as cores. A dimensão das cores representa a intensidade das cores Vermelho, Verde e Azul, respectivamente, com um valorde 0 a 255.
|
14,318 | <ASSISTANT_TASK:>
Python Code:
import math
import torch
import gpytorch
import pyro
import tqdm
import matplotlib.pyplot as plt
%matplotlib inline
intensity_max = 50
true_intensity_function = lambda times: torch.cos(times * 2 * math.pi).add(1).mul(intensity_max / 2.)
max_time = 2
times = torch.linspace(0, max_time, 128)
num_samples = int(pyro.distributions.Poisson(true_intensity_function(times).mean() * max_time).sample().item())
print(f"Number of sampled arrivals: {num_samples}")
def log_prob_accept(val):
intensities = true_intensity_function(val)
res = torch.log(intensities / (true_intensity_function(times).mean() * max_time))
return res
arrival_times = pyro.distributions.Rejector(
propose=pyro.distributions.Uniform(times.min(), times.max()),
log_prob_accept=log_prob_accept,
log_scale=0.
)(torch.Size([num_samples]))
fig, ax = plt.subplots(1, 1)
ax.plot(times, true_intensity_function(times), "--", label=r"True $\lambda$")
ax.set_xlabel("Time")
ax.set_ylabel("Intensity ($\lambda$)")
ax.scatter(arrival_times, torch.zeros_like(arrival_times), label=r"Observed Arrivals")
ax.legend(loc="best")
None
class GPModel(gpytorch.models.ApproximateGP):
def __init__(self, num_arrivals, max_time, num_inducing=32, name_prefix="cox_gp_model"):
self.name_prefix = name_prefix
self.max_time = max_time
self.mean_intensity = (num_arrivals / max_time)
# Define the variational distribution and strategy of the GP
# We will initialize the inducing points to lie on a grid from 0 to T
inducing_points = torch.linspace(0, max_time, num_inducing).unsqueeze(-1)
variational_distribution = gpytorch.variational.CholeskyVariationalDistribution(num_inducing_points=num_inducing)
variational_strategy = gpytorch.variational.VariationalStrategy(self, inducing_points, variational_distribution)
# Define model
super().__init__(variational_strategy=variational_strategy)
# Define mean and kernel
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def forward(self, times):
mean = self.mean_module(times)
covar = self.covar_module(times)
return gpytorch.distributions.MultivariateNormal(mean, covar)
def guide(self, arrival_times, quadrature_times):
function_distribution = self.pyro_guide(torch.cat([arrival_times, quadrature_times], -1))
# Draw samples from q(f) at arrival_times
# Also draw samples from q(f) at evenly-spaced points (quadrature_times)
with pyro.plate(self.name_prefix + ".times_plate", dim=-1):
pyro.sample(
self.name_prefix + ".function_samples",
function_distribution
)
def model(self, arrival_times, quadrature_times):
pyro.module(self.name_prefix + ".gp", self)
function_distribution = self.pyro_model(torch.cat([arrival_times, quadrature_times], -1))
# Draw samples from p(f) at arrival times
# Also draw samples from p(f) at evenly-spaced points (quadrature_times)
with pyro.plate(self.name_prefix + ".times_plate", dim=-1):
function_samples = pyro.sample(
self.name_prefix + ".function_samples",
function_distribution
)
####
# Convert function samples into intensity samples, using the function above
####
intensity_samples = function_samples.exp() * self.mean_intensity
# Divide the intensity samples into arrival_intensity_samples and quadrature_intensity_samples
arrival_intensity_samples, quadrature_intensity_samples = intensity_samples.split([
arrival_times.size(-1), quadrature_times.size(-1)
], dim=-1)
####
# Compute the log_likelihood, using the method described above
####
arrival_log_intensities = arrival_intensity_samples.log().sum(dim=-1)
est_num_arrivals = quadrature_intensity_samples.mean(dim=-1).mul(self.max_time)
log_likelihood = arrival_log_intensities - est_num_arrivals
pyro.factor(self.name_prefix + ".log_likelihood", log_likelihood)
model = GPModel(arrival_times.numel(), max_time)
quadrature_times = torch.linspace(0, max_time, 64)
# this is for running the notebook in our testing framework
import os
smoke_test = ('CI' in os.environ)
num_iter = 2 if smoke_test else 200
num_particles = 1 if smoke_test else 32
def train(lr=0.01):
optimizer = pyro.optim.Adam({"lr": lr})
loss = pyro.infer.Trace_ELBO(num_particles=num_particles, vectorize_particles=True, retain_graph=True)
infer = pyro.infer.SVI(model.model, model.guide, optimizer, loss=loss)
model.train()
loader = tqdm.notebook.tqdm(range(num_iter))
for i in loader:
loss = infer.step(arrival_times, quadrature_times)
loader.set_postfix(loss=loss)
train()
# Here's a quick helper function for getting smoothed percentile values from samples
def percentiles_from_samples(samples, percentiles=[0.05, 0.5, 0.95]):
num_samples = samples.size(0)
samples = samples.sort(dim=0)[0]
# Get samples corresponding to percentile
percentile_samples = [samples[int(num_samples * percentile)] for percentile in percentiles]
# Smooth the samples
kernel = torch.full((1, 1, 5), fill_value=0.2)
percentiles_samples = [
torch.nn.functional.conv1d(percentile_sample.view(1, 1, -1), kernel, padding=2).view(-1)
for percentile_sample in percentile_samples
]
return percentile_samples
# Get the average predicted intensity function, and the intensity confidence region
model.eval()
with torch.no_grad():
function_dist = model(quadrature_times)
intensity_samples = function_dist(torch.Size([1000])).exp() * model.mean_intensity
lower, mean, upper = percentiles_from_samples(intensity_samples)
# Plot the predicted intensity function
fig, ax = plt.subplots(1, 1)
line, = ax.plot(quadrature_times, mean, label=r"True $\lambda$")
ax.fill_between(quadrature_times, lower, upper, color=line.get_color(), alpha=0.5)
ax.plot(quadrature_times, true_intensity_function(quadrature_times), "--", color="k", label=r"Pred. $\lambda$")
ax.legend(loc="best")
ax.set_xlabel("Time")
ax.set_ylabel("Intensity ($\lambda$)")
ax.scatter(arrival_times, torch.zeros_like(arrival_times), label=r"Observed Arrivals")
None
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create sample training set
Step2: Determine how many arrivals there are
Step3: Determine when the arrivals occur
Step4: The result
Step5: Parameterizing the intensity function using a GP
Step6: Performing inference
Step7: The Pyro SVI inference loop
Step8: Evaluate the inferred intensity function
|
14,319 | <ASSISTANT_TASK:>
Python Code:
import statsmodels
import statsmodels.formula.api as smf
import pandas as pd
# Load data
data_url = "https://raw.githubusercontent.com/nguyen-toan/ISLR/07fd968ea484b5f6febc7b392a28eb64329a4945/dataset/Advertising.csv"
df = pd.read_csv(data_url).drop('Unnamed: 0', axis=1)
df.head()
# Fitting linear model
res = smf.ols(formula= "Sales ~ TV + Radio + Newspaper", data=df).fit()
res.summary()
# base code
import numpy as np
import seaborn as sns
from statsmodels.tools.tools import maybe_unwrap_results
from statsmodels.graphics.gofplots import ProbPlot
from statsmodels.stats.outliers_influence import variance_inflation_factor
import matplotlib.pyplot as plt
from typing import Type
style_talk = 'seaborn-talk' #refer to plt.style.available
class Linear_Reg_Diagnostic():
Diagnostic plots to identify potential problems in a linear regression fit.
Mainly,
a. non-linearity of data
b. Correlation of error terms
c. non-constant variance
d. outliers
e. high-leverage points
f. collinearity
Author:
Prajwal Kafle (p33ajkafle@gmail.com, where 3 = r)
Does not come with any sort of warranty.
Please test the code one your end before using.
def __init__(self,
results: Type[statsmodels.regression.linear_model.RegressionResultsWrapper]) -> None:
For a linear regression model, generates following diagnostic plots:
a. residual
b. qq
c. scale location and
d. leverage
and a table
e. vif
Args:
results (Type[statsmodels.regression.linear_model.RegressionResultsWrapper]):
must be instance of statsmodels.regression.linear_model object
Raises:
TypeError: if instance does not belong to above object
Example:
>>> import numpy as np
>>> import pandas as pd
>>> import statsmodels.formula.api as smf
>>> x = np.linspace(-np.pi, np.pi, 100)
>>> y = 3*x + 8 + np.random.normal(0,1, 100)
>>> df = pd.DataFrame({'x':x, 'y':y})
>>> res = smf.ols(formula= "y ~ x", data=df).fit()
>>> cls = Linear_Reg_Diagnostic(res)
>>> cls(plot_context="seaborn-paper")
In case you do not need all plots you can also independently make an individual plot/table
in following ways
>>> cls = Linear_Reg_Diagnostic(res)
>>> cls.residual_plot()
>>> cls.qq_plot()
>>> cls.scale_location_plot()
>>> cls.leverage_plot()
>>> cls.vif_table()
if isinstance(results, statsmodels.regression.linear_model.RegressionResultsWrapper) is False:
raise TypeError("result must be instance of statsmodels.regression.linear_model.RegressionResultsWrapper object")
self.results = maybe_unwrap_results(results)
self.y_true = self.results.model.endog
self.y_predict = self.results.fittedvalues
self.xvar = self.results.model.exog
self.xvar_names = self.results.model.exog_names
self.residual = np.array(self.results.resid)
influence = self.results.get_influence()
self.residual_norm = influence.resid_studentized_internal
self.leverage = influence.hat_matrix_diag
self.cooks_distance = influence.cooks_distance[0]
self.nparams = len(self.results.params)
def __call__(self, plot_context='seaborn-paper'):
# print(plt.style.available)
with plt.style.context(plot_context):
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(10,10))
self.residual_plot(ax=ax[0,0])
self.qq_plot(ax=ax[0,1])
self.scale_location_plot(ax=ax[1,0])
self.leverage_plot(ax=ax[1,1])
plt.show()
self.vif_table()
return fig, ax
def residual_plot(self, ax=None):
Residual vs Fitted Plot
Graphical tool to identify non-linearity.
(Roughly) Horizontal red line is an indicator that the residual has a linear pattern
if ax is None:
fig, ax = plt.subplots()
sns.residplot(
x=self.y_predict,
y=self.residual,
lowess=True,
scatter_kws={'alpha': 0.5},
line_kws={'color': 'red', 'lw': 1, 'alpha': 0.8},
ax=ax)
# annotations
residual_abs = np.abs(self.residual)
abs_resid = np.flip(np.sort(residual_abs))
abs_resid_top_3 = abs_resid[:3]
for i, _ in enumerate(abs_resid_top_3):
ax.annotate(
i,
xy=(self.y_predict[i], self.residual[i]),
color='C3')
ax.set_title('Residuals vs Fitted', fontweight="bold")
ax.set_xlabel('Fitted values')
ax.set_ylabel('Residuals')
return ax
def qq_plot(self, ax=None):
Standarized Residual vs Theoretical Quantile plot
Used to visually check if residuals are normally distributed.
Points spread along the diagonal line will suggest so.
if ax is None:
fig, ax = plt.subplots()
QQ = ProbPlot(self.residual_norm)
QQ.qqplot(line='45', alpha=0.5, lw=1, ax=ax)
# annotations
abs_norm_resid = np.flip(np.argsort(np.abs(self.residual_norm)), 0)
abs_norm_resid_top_3 = abs_norm_resid[:3]
for r, i in enumerate(abs_norm_resid_top_3):
ax.annotate(
i,
xy=(np.flip(QQ.theoretical_quantiles, 0)[r], self.residual_norm[i]),
ha='right', color='C3')
ax.set_title('Normal Q-Q', fontweight="bold")
ax.set_xlabel('Theoretical Quantiles')
ax.set_ylabel('Standardized Residuals')
return ax
def scale_location_plot(self, ax=None):
Sqrt(Standarized Residual) vs Fitted values plot
Used to check homoscedasticity of the residuals.
Horizontal line will suggest so.
if ax is None:
fig, ax = plt.subplots()
residual_norm_abs_sqrt = np.sqrt(np.abs(self.residual_norm))
ax.scatter(self.y_predict, residual_norm_abs_sqrt, alpha=0.5);
sns.regplot(
x=self.y_predict,
y=residual_norm_abs_sqrt,
scatter=False, ci=False,
lowess=True,
line_kws={'color': 'red', 'lw': 1, 'alpha': 0.8},
ax=ax)
# annotations
abs_sq_norm_resid = np.flip(np.argsort(residual_norm_abs_sqrt), 0)
abs_sq_norm_resid_top_3 = abs_sq_norm_resid[:3]
for i in abs_sq_norm_resid_top_3:
ax.annotate(
i,
xy=(self.y_predict[i], residual_norm_abs_sqrt[i]),
color='C3')
ax.set_title('Scale-Location', fontweight="bold")
ax.set_xlabel('Fitted values')
ax.set_ylabel(r'$\sqrt{|\mathrm{Standardized\ Residuals}|}$');
return ax
def leverage_plot(self, ax=None):
Residual vs Leverage plot
Points falling outside Cook's distance curves are considered observation that can sway the fit
aka are influential.
Good to have none outside the curves.
if ax is None:
fig, ax = plt.subplots()
ax.scatter(
self.leverage,
self.residual_norm,
alpha=0.5);
sns.regplot(
x=self.leverage,
y=self.residual_norm,
scatter=False,
ci=False,
lowess=True,
line_kws={'color': 'red', 'lw': 1, 'alpha': 0.8},
ax=ax)
# annotations
leverage_top_3 = np.flip(np.argsort(self.cooks_distance), 0)[:3]
for i in leverage_top_3:
ax.annotate(
i,
xy=(self.leverage[i], self.residual_norm[i]),
color = 'C3')
xtemp, ytemp = self.__cooks_dist_line(0.5) # 0.5 line
ax.plot(xtemp, ytemp, label="Cook's distance", lw=1, ls='--', color='red')
xtemp, ytemp = self.__cooks_dist_line(1) # 1 line
ax.plot(xtemp, ytemp, lw=1, ls='--', color='red')
ax.set_xlim(0, max(self.leverage)+0.01)
ax.set_title('Residuals vs Leverage', fontweight="bold")
ax.set_xlabel('Leverage')
ax.set_ylabel('Standardized Residuals')
ax.legend(loc='upper right')
return ax
def vif_table(self):
VIF table
VIF, the variance inflation factor, is a measure of multicollinearity.
VIF > 5 for a variable indicates that it is highly collinear with the
other input variables.
vif_df = pd.DataFrame()
vif_df["Features"] = self.xvar_names
vif_df["VIF Factor"] = [variance_inflation_factor(self.xvar, i) for i in range(self.xvar.shape[1])]
print(vif_df
.sort_values("VIF Factor")
.round(2))
def __cooks_dist_line(self, factor):
Helper function for plotting Cook's distance curves
p = self.nparams
formula = lambda x: np.sqrt((factor * p * (1 - x)) / x)
x = np.linspace(0.001, max(self.leverage), 50)
y = formula(x)
return x, y
cls = Linear_Reg_Diagnostic(res)
cls.residual_plot();
cls.qq_plot();
cls.scale_location_plot();
cls.leverage_plot();
cls.vif_table()
# Alternatively, all diagnostics can be generated in one go as follows.
# Fig and ax can be used to modify axes or plot properties after the fact.
cls = Linear_Reg_Diagnostic(res)
fig, ax = cls()
#fig.savefig('../../docs/source/_static/images/linear_regression_diagnostics_plots.png')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple multiple linear regression
Step10: Diagnostic Figures/Table
Step11: Making use of the
Step12: A. Residual vs Fitted values
Step13: B. Standarized Residual vs Theoretical Quantile
Step14: C. Sqrt(Standarized Residual) vs Fitted values
Step15: D. Residual vs Leverage
Step16: E. VIF
|
14,320 | <ASSISTANT_TASK:>
Python Code:
a=1
b=3.14
# Assigning value 1 to variable a and 3.14 to variable b
a = 20
b = 10
c = 15
d = 5
e = 0
e = (a + b) * c / d #( 30 * 15 ) / 5
print ("Value of (a + b) * c / d is ", e)
e = ((a + b) * c) / d # (30 * 15 ) / 5
print ("Value of ((a + b) * c) / d is ", e)
e = (a + b) * (c / d); # (30) * (15/5)
print ("Value of (a + b) * (c / d) is ", e)
e = a + (b * c) / d; # 20 + (150/5)
print ("Value of a + (b * c) / d is ", e)
course_name = "Introduction to Programming"
question = "Having a good time ? ;)"
print(course_name)
print(question)
string_1 = "Hello World!"
n = len(string_1) # "len" gives us the number of characters in the string
print(string_1 + " has", n , "characters")
print(string_1[0])
print(string_1[1])
print(string_1[-2])
print(string_1[0:2])
print(string_1[5 : len(string_1)])
print(string_1[0:4]+string_1[4:len(string_1)])
primes = [2,3,5,8,11]
print(primes)
print(primes[0])
print(len(primes))
classroom = ['L','T', 1]
print(classroom)
print((classroom[2]+ 4))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Mathematical Operations on Variables
Step2: In case you are using Python 2 and want floating point division (e.g
Step3: Operations on Strings
Step4: Indexing
Step5: Negative Indexing
Step6: Lists
|
14,321 | <ASSISTANT_TASK:>
Python Code:
def doit(a,b):
return a+b
x = 4
y = 3
z = doit(x,x)
print(z)
x = int(input())
z = 0
for i in range(x):
z = z + i
print(z)
def x(a,b):
return b
w = int(input())
y = 2
for i in range(w):
z = x(i,y)
t = x(y,i)
print(t)
x = "Mike"
y = x.upper().replace("I","K")
y
text = "This is mike"
for word in text.split("i"):
print(word[1], end='')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A. 8
Step2: A. 3
Step3: A. 8
Step4: A. 'Miie'
|
14,322 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from figures import make_dataset
x, y = make_dataset()
X = x.reshape(-1, 1)
from sklearn.tree import DecisionTreeRegressor
reg = DecisionTreeRegressor(max_depth=5)
reg.fit(X, y)
X_fit = np.linspace(-3, 3, 1000).reshape((-1, 1))
y_fit_1 = reg.predict(X_fit)
plt.plot(X_fit.ravel(), y_fit_1, color='blue', label="prediction")
plt.plot(X.ravel(), y, '.k', label="training data")
plt.legend(loc="best")
from sklearn.datasets import make_blobs
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
from figures import plot_2d_separator
X, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=100)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf = DecisionTreeClassifier(max_depth=5)
clf.fit(X_train, y_train)
plot_2d_separator(clf, X, fill=True)
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, s=60, alpha=.7)
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, s=60)
from figures import plot_tree_interactive
plot_tree_interactive()
from figures import plot_forest_interactive
plot_forest_interactive()
from sklearn import grid_search
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
digits = load_digits()
X, y = digits.data, digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
rf = RandomForestClassifier(n_estimators=200)
parameters = {'max_features':['sqrt', 'log2', 10],
'max_depth':[5, 7, 9]}
clf_grid = grid_search.GridSearchCV(rf, parameters, n_jobs=-1)
clf_grid.fit(X_train, y_train)
clf_grid.score(X_train, y_train)
clf_grid.score(X_test, y_test)
from sklearn.ensemble import GradientBoostingRegressor
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5, learning_rate=.2)
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we'll explore a class of algorithms based on decision trees.
Step2: A single decision tree allows us to estimate the signal in a non-parametric way,
Step3: There are many parameter that control the complexity of a tree, but the one that might be easiest to understand is the maximum depth. This limits how finely the tree can partition the input space, or how many "if-else" questions can be asked before deciding which class a sample lies in.
Step4: Decision trees are fast to train, easy to understand, and often lead to interpretable models. However, single trees often tend to overfit the training data. Playing with the slider above you might notice that the model starts to overfit even before it has a good separation between the classes.
Step5: Selecting the Optimal Estimator via Cross-Validation
Step6: Another option
|
14,323 | <ASSISTANT_TASK:>
Python Code:
def strip_id(s):
try:
index = s.index(':')
except ValueError:
index = len(s)
return s[:index]
columns = [
'agency_id',
'service_date_id', 'service_date_date',
'route_id', 'route_short_name', 'route_long_name',
'trip_id', 'trip_headsign', 'trip_short_name',
'stop_time_id', 'stop_time_arrival_time', 'stop_time_departure_time', 'stop_time_stop_sequence',
'stop_id', 'stop_stop_id', 'stop_name',
'capacity_path_id', 'capacity_path_path',
'capacity_capacity_id', 'capacity_capacity_capacity1st', 'capacity_capacity_capacity2nd'
]
in_dir = "in_data/"
out_dir = "out_data/"
dates = ['2017-01-30','2017-01-31','2017-02-01','2017-02-02','2017-02-03','2017-02-04','2017-02-05']
for date in dates:
file = in_dir + date + '.csv'
df = pd.read_csv(file)
df.columns = columns
df['stop_stop_id'] = df['stop_stop_id'].apply(lambda x: strip_id(x))
df.to_csv(out_dir + date + '_processed.csv')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We process the CSV to stem the stop_id as they are currently not in the official form. The geops dataset add a suffix to each stop_id if they correspond to differente route.
|
14,324 | <ASSISTANT_TASK:>
Python Code:
a = list(range(10))
b = list(range(100))
a.append('ich bin keine Zahl')
b.append('ich bin keine Zahl')
a.pop()
b.pop()
anew = []
for x in a:
x = str(x)
anew.append(x)
anew
bnew = []
for x in b:
x = str(x)
bnew.append(x)
bnew[:10]
b[-11:-1]
lst = [4,6,2328,926,323,21,4442,21,45,301,23,12, 1000]
lst.sort()
lst[-2]
for x in lst:
if x >= 1000:
continue
elif x > 100:
print(str(x))
else:
print(x*100)
stadt_dic = [{'Kanton': 'GE', 'Stadt': 'Genf', 'Bevölkerung': 194565},
{'Kanton': 'ZH', 'Stadt': 'Zürich', 'Bevölkerung': 396027},
{'Kanton': 'BS', 'Stadt': 'Basel', 'Bevölkerung': 175131},
{'Kanton': 'BE', 'Stadt': 'Bern', 'Bevölkerung': 140634},
{'Kanton': 'VD', 'Stadt': 'Lausanne', 'Bevölkerung': 135629}
]
stadt_dic[0]['Bevölkerung']
z = 0
for x in stadt_dic:
y = x['Bevölkerung']
z += y
print(z)
ch = 8372000
for x in stadt_dic:
y = x['Bevölkerung']
s = x['Stadt']
print(s , round(y/ch*100, 2), '%')
winti = {'Kanton': 'ZH', 'Stadt': 'Winterthur', 'Bevölkerung': 106778}
luzern = {'Kanton': 'LU', 'Stadt': 'Luzern', 'Bevölkerung': 81284}
stadt_dic.append(winti)
stadt_dic.append(luzern)
ch = 8372000
for x in stadt_dic:
y = x['Bevölkerung']
s = x['Stadt']
print(s , round(y/ch*100, 2), '%')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.Mache dasselbe mit einer Liste aus 100 Elementen und ordne sie der Variabel b zu.
Step2: 3.Füge beiden Listen folgenden String an
Step3: 4.Lösche diese letzter Eintrag in der Liste wieder
Step4: 5.Verwandle jede Zahl in den Listen a und b von int in str?
Step5: 6.Von der list b, zeige nur die letzten zehn Nummern an
Step6: 7.Von der folgenden Liste, zeige den zweit grössten Wert an
Step7: 8.Multipliziere jede Nummer in dieser Liste, die kleiner ist als 100, mit 100; wenn die Nummern zwischen 100 und 1000 verwandle sie in eine String, und wenn sie grösser oder gleich 1000 ist, lösche sie.
Step8: 9.Schreibe eine Dictionary-Liste der fünf grössten Schweizer Städte, ihrer Bevölkerung und dem jeweiligen Kanton.
Step9: 10.Zeige nur die Bevölkerung der Stadt Genf an
Step10: 11.Drucke (print) das Total der Bevölkerungen aller Städte aus
Step11: 12.Rechne den Anteil der Bevölkerung der jeweiligen Städte an der Gesamtbevölkerung der Schweiz aus und "print" die Ergebnisse neben dem Städtenamen
Step12: 13.Füge noch die Städte Winterthur und St. Gallen hinzu
Step13: 14.Ergänze die Städte-Dictionary-Liste mit diesen Angaben
Step14: 15.Wiederhole den loop von oben, um den Anteil der Bevölkerung an der Gesamtbevölkerung auszurechnen.
|
14,325 | <ASSISTANT_TASK:>
Python Code:
solow.Model.output?
# define model variables
A, K, L = sym.symbols('A, K, L')
# define production parameters
alpha, sigma = sym.symbols('alpha, sigma')
# define a production function
cobb_douglas_output = K**alpha * (A * L)**(1 - alpha)
rho = (sigma - 1) / sigma
ces_output = (alpha * K**rho + (1 - alpha) * (A * L)**rho)**(1 / rho)
solow.Model.params?
# these parameters look fishy...why?
default_params = {'A0': 1.0, 'L0': 1.0, 'g': 0.0, 'n': -0.03, 's': 0.15,
'delta': 0.01, 'alpha': 0.33}
# ...raises an AttributeError
model = solowpy.Model(output=cobb_douglas_output, params=default_params)
cobb_douglas_params = {'A0': 1.0, 'L0': 1.0, 'g': 0.02, 'n': 0.03, 's': 0.15,
'delta': 0.05, 'alpha': 0.33}
cobb_douglas_model = solow.Model(output=cobb_douglas_output,
params=cobb_douglas_params)
ces_params = {'A0': 1.0, 'L0': 1.0, 'g': 0.02, 'n': 0.03, 's': 0.15,
'delta': 0.05, 'alpha': 0.33, 'sigma': 0.95}
ces_model = solowpy.Model(output=ces_output, params=ces_params)
solowpy.Model.intensive_output?
ces_model.intensive_output
ces_model.evaluate_intensive_output(np.linspace(1.0, 10.0, 25))
solowpy.Model.marginal_product_capital?
ces_model.marginal_product_capital
ces_model.evaluate_mpk(np.linspace(1.0, 10.0, 25))
solowpy.Model.k_dot?
ces_model.k_dot
ces_model.evaluate_k_dot(np.linspace(1.0, 10.0, 25))
solowpy.cobb_douglas?
cobb_douglas_model = solowpy.CobbDouglasModel(params=cobb_douglas_params)
solowpy.ces?
ces_model = solowpy.CESModel(params=ces_params)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examples
Step2: 1.2 Defining model parameters
Step3: In addition to the standard parameters $g, n, s, \delta$, one will also need to specify any required parameters for the production function. In order to make sure that parameter values are consistent with the models assumptions some basic validation of the solow.Model.params attribute is done when ever the attribute is set.
Step4: Examples
Step5: 1.3 Other attributes of the solow.Model class
Step6: One can numerically evaluate the intensive output for various values of capital stock (per unit effective labor) as follows...
Step7: The marginal product of capital
Step8: One can numerically evaluate the marginal product of capital for various values of capital stock (per unit effective labor) as follows...
Step9: Equation of motion for capital (per unit effective labor)
Step10: One can numerically evaluate the equation of motion for capital (per unit effective labor) for various values of capital stock (per unit effective labor) as follows...
Step11: 1.4 Sub-classing the solow.Model class
|
14,326 | <ASSISTANT_TASK:>
Python Code:
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
%%capture
#@title Setup Environment
# Install the latest Tensorflow version.
!pip install tensorflow_text
!pip install bokeh
!pip install simpleneighbors[annoy]
!pip install tqdm
#@title Setup common imports and functions
import bokeh
import bokeh.models
import bokeh.plotting
import numpy as np
import os
import pandas as pd
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
from tensorflow_text import SentencepieceTokenizer
import sklearn.metrics.pairwise
from simpleneighbors import SimpleNeighbors
from tqdm import tqdm
from tqdm import trange
def visualize_similarity(embeddings_1, embeddings_2, labels_1, labels_2,
plot_title,
plot_width=1200, plot_height=600,
xaxis_font_size='12pt', yaxis_font_size='12pt'):
assert len(embeddings_1) == len(labels_1)
assert len(embeddings_2) == len(labels_2)
# arccos based text similarity (Yang et al. 2019; Cer et al. 2019)
sim = 1 - np.arccos(
sklearn.metrics.pairwise.cosine_similarity(embeddings_1,
embeddings_2))/np.pi
embeddings_1_col, embeddings_2_col, sim_col = [], [], []
for i in range(len(embeddings_1)):
for j in range(len(embeddings_2)):
embeddings_1_col.append(labels_1[i])
embeddings_2_col.append(labels_2[j])
sim_col.append(sim[i][j])
df = pd.DataFrame(zip(embeddings_1_col, embeddings_2_col, sim_col),
columns=['embeddings_1', 'embeddings_2', 'sim'])
mapper = bokeh.models.LinearColorMapper(
palette=[*reversed(bokeh.palettes.YlOrRd[9])], low=df.sim.min(),
high=df.sim.max())
p = bokeh.plotting.figure(title=plot_title, x_range=labels_1,
x_axis_location="above",
y_range=[*reversed(labels_2)],
plot_width=plot_width, plot_height=plot_height,
tools="save",toolbar_location='below', tooltips=[
('pair', '@embeddings_1 ||| @embeddings_2'),
('sim', '@sim')])
p.rect(x="embeddings_1", y="embeddings_2", width=1, height=1, source=df,
fill_color={'field': 'sim', 'transform': mapper}, line_color=None)
p.title.text_font_size = '12pt'
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_standoff = 16
p.xaxis.major_label_text_font_size = xaxis_font_size
p.xaxis.major_label_orientation = 0.25 * np.pi
p.yaxis.major_label_text_font_size = yaxis_font_size
p.min_border_right = 300
bokeh.io.output_notebook()
bokeh.io.show(p)
# The 16-language multilingual module is the default but feel free
# to pick others from the list and compare the results.
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/3' #@param ['https://tfhub.dev/google/universal-sentence-encoder-multilingual/3', 'https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3']
model = hub.load(module_url)
def embed_text(input):
return model(input)
# Some texts of different lengths in different languages.
arabic_sentences = ['كلب', 'الجراء لطيفة.', 'أستمتع بالمشي لمسافات طويلة على طول الشاطئ مع كلبي.']
chinese_sentences = ['狗', '小狗很好。', '我喜欢和我的狗一起沿着海滩散步。']
english_sentences = ['dog', 'Puppies are nice.', 'I enjoy taking long walks along the beach with my dog.']
french_sentences = ['chien', 'Les chiots sont gentils.', 'J\'aime faire de longues promenades sur la plage avec mon chien.']
german_sentences = ['Hund', 'Welpen sind nett.', 'Ich genieße lange Spaziergänge am Strand entlang mit meinem Hund.']
italian_sentences = ['cane', 'I cuccioli sono carini.', 'Mi piace fare lunghe passeggiate lungo la spiaggia con il mio cane.']
japanese_sentences = ['犬', '子犬はいいです', '私は犬と一緒にビーチを散歩するのが好きです']
korean_sentences = ['개', '강아지가 좋다.', '나는 나의 개와 해변을 따라 길게 산책하는 것을 즐긴다.']
russian_sentences = ['собака', 'Милые щенки.', 'Мне нравится подолгу гулять по пляжу со своей собакой.']
spanish_sentences = ['perro', 'Los cachorros son agradables.', 'Disfruto de dar largos paseos por la playa con mi perro.']
# Multilingual example
multilingual_example = ["Willkommen zu einfachen, aber", "verrassend krachtige", "multilingüe", "compréhension du langage naturel", "модели.", "大家是什么意思" , "보다 중요한", ".اللغة التي يتحدثونها"]
multilingual_example_in_en = ["Welcome to simple yet", "surprisingly powerful", "multilingual", "natural language understanding", "models.", "What people mean", "matters more than", "the language they speak."]
# Compute embeddings.
ar_result = embed_text(arabic_sentences)
en_result = embed_text(english_sentences)
es_result = embed_text(spanish_sentences)
de_result = embed_text(german_sentences)
fr_result = embed_text(french_sentences)
it_result = embed_text(italian_sentences)
ja_result = embed_text(japanese_sentences)
ko_result = embed_text(korean_sentences)
ru_result = embed_text(russian_sentences)
zh_result = embed_text(chinese_sentences)
multilingual_result = embed_text(multilingual_example)
multilingual_in_en_result = embed_text(multilingual_example_in_en)
visualize_similarity(multilingual_in_en_result, multilingual_result,
multilingual_example_in_en, multilingual_example, "Multilingual Universal Sentence Encoder for Semantic Retrieval (Yang et al., 2019)")
visualize_similarity(en_result, ar_result, english_sentences, arabic_sentences, 'English-Arabic Similarity')
visualize_similarity(en_result, ru_result, english_sentences, russian_sentences, 'English-Russian Similarity')
visualize_similarity(en_result, es_result, english_sentences, spanish_sentences, 'English-Spanish Similarity')
visualize_similarity(en_result, it_result, english_sentences, italian_sentences, 'English-Italian Similarity')
visualize_similarity(it_result, es_result, italian_sentences, spanish_sentences, 'Italian-Spanish Similarity')
visualize_similarity(en_result, zh_result, english_sentences, chinese_sentences, 'English-Chinese Similarity')
visualize_similarity(en_result, ko_result, english_sentences, korean_sentences, 'English-Korean Similarity')
visualize_similarity(zh_result, ko_result, chinese_sentences, korean_sentences, 'Chinese-Korean Similarity')
corpus_metadata = [
('ar', 'ar-en.txt.zip', 'News-Commentary.ar-en.ar', 'Arabic'),
('zh', 'en-zh.txt.zip', 'News-Commentary.en-zh.zh', 'Chinese'),
('en', 'en-es.txt.zip', 'News-Commentary.en-es.en', 'English'),
('ru', 'en-ru.txt.zip', 'News-Commentary.en-ru.ru', 'Russian'),
('es', 'en-es.txt.zip', 'News-Commentary.en-es.es', 'Spanish'),
]
language_to_sentences = {}
language_to_news_path = {}
for language_code, zip_file, news_file, language_name in corpus_metadata:
zip_path = tf.keras.utils.get_file(
fname=zip_file,
origin='http://opus.nlpl.eu/download.php?f=News-Commentary/v11/moses/' + zip_file,
extract=True)
news_path = os.path.join(os.path.dirname(zip_path), news_file)
language_to_sentences[language_code] = pd.read_csv(news_path, sep='\t', header=None)[0][:1000]
language_to_news_path[language_code] = news_path
print('{:,} {} sentences'.format(len(language_to_sentences[language_code]), language_name))
# Takes about 3 minutes
batch_size = 2048
language_to_embeddings = {}
for language_code, zip_file, news_file, language_name in corpus_metadata:
print('\nComputing {} embeddings'.format(language_name))
with tqdm(total=len(language_to_sentences[language_code])) as pbar:
for batch in pd.read_csv(language_to_news_path[language_code], sep='\t',header=None, chunksize=batch_size):
language_to_embeddings.setdefault(language_code, []).extend(embed_text(batch[0]))
pbar.update(len(batch))
%%time
# Takes about 8 minutes
num_index_trees = 40
language_name_to_index = {}
embedding_dimensions = len(list(language_to_embeddings.values())[0][0])
for language_code, zip_file, news_file, language_name in corpus_metadata:
print('\nAdding {} embeddings to index'.format(language_name))
index = SimpleNeighbors(embedding_dimensions, metric='dot')
for i in trange(len(language_to_sentences[language_code])):
index.add_one(language_to_sentences[language_code][i], language_to_embeddings[language_code][i])
print('Building {} index with {} trees...'.format(language_name, num_index_trees))
index.build(n=num_index_trees)
language_name_to_index[language_name] = index
%%time
# Takes about 13 minutes
num_index_trees = 60
print('Computing mixed-language index')
combined_index = SimpleNeighbors(embedding_dimensions, metric='dot')
for language_code, zip_file, news_file, language_name in corpus_metadata:
print('Adding {} embeddings to mixed-language index'.format(language_name))
for i in trange(len(language_to_sentences[language_code])):
annotated_sentence = '({}) {}'.format(language_name, language_to_sentences[language_code][i])
combined_index.add_one(annotated_sentence, language_to_embeddings[language_code][i])
print('Building mixed-language index with {} trees...'.format(num_index_trees))
combined_index.build(n=num_index_trees)
sample_query = 'The stock market fell four points.' #@param ["Global warming", "Researchers made a surprising new discovery last week.", "The stock market fell four points.", "Lawmakers will vote on the proposal tomorrow."] {allow-input: true}
index_language = 'English' #@param ["Arabic", "Chinese", "English", "French", "German", "Russian", "Spanish"]
num_results = 10 #@param {type:"slider", min:0, max:100, step:10}
query_embedding = embed_text(sample_query)[0]
search_results = language_name_to_index[index_language].nearest(query_embedding, n=num_results)
print('{} sentences similar to: "{}"\n'.format(index_language, sample_query))
search_results
sample_query = 'The stock market fell four points.' #@param ["Global warming", "Researchers made a surprising new discovery last week.", "The stock market fell four points.", "Lawmakers will vote on the proposal tomorrow."] {allow-input: true}
num_results = 40 #@param {type:"slider", min:0, max:100, step:10}
query_embedding = embed_text(sample_query)[0]
search_results = language_name_to_index[index_language].nearest(query_embedding, n=num_results)
print('{} sentences similar to: "{}"\n'.format(index_language, sample_query))
search_results
query = 'The stock market fell four points.' #@param {type:"string"}
num_results = 30 #@param {type:"slider", min:0, max:100, step:10}
query_embedding = embed_text(sample_query)[0]
search_results = combined_index.nearest(query_embedding, n=num_results)
print('{} sentences similar to: "{}"\n'.format(index_language, query))
search_results
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 使用 Multilingual Universal Sentence Encoder 研究跨语言相似度和构建语义搜索引擎
Step2: 下面是附加的样板代码,我们在其中导入了预训练的 ML 模型,在此笔记本中我们将用它来对文本进行编码。
Step3: 可视化语言之间的文本相似度
Step4: 可视化相似度
Step5: 英语-阿拉伯语相似度
Step6: 英语-俄语相似度
Step7: 英语-西班牙语相似度
Step8: 英语-意大利语相似度
Step9: 意大利语-西班牙语相似度
Step10: 英语-中文相似度
Step11: 英语-韩语相似度
Step12: 中文-韩语相似度
Step13: 以及更多…
Step14: 使用预训练的模型将句子转换为向量
Step15: 构建语义向量的索引
Step16: 验证语义相似度搜索引擎是否有效
Step17: 混合语料库功能
Step18: 尝试您自己的查询:
|
14,327 | <ASSISTANT_TASK:>
Python Code:
# Set things up
%matplotlib inline
# Importing CartoPy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
# Works with matplotlib's built-in transform support.
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.Robinson())
# Sets the extent to cover the whole globe
ax.set_global()
# Adds standard background map
ax.stock_img()
# Set up a globe with a specific radius
globe = ccrs.Globe(semimajor_axis=6371000.)
# Set up a Lambert Conformal projection
proj = ccrs.LambertConformal(standard_parallels=[25.0], globe=globe)
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Sets the extent using a lon/lat box
ax.set_extent([-130, -60, 20, 55])
ax.stock_img()
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
ax.stock_img()
ax.add_feature(cfeature.COASTLINE)
ax.set_extent([-130, -60, 20, 55])
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
# Add variety of features
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.COASTLINE)
# Can also supply matplotlib kwargs
ax.add_feature(cfeature.BORDERS, linestyle=':')
ax.add_feature(cfeature.STATES, linestyle=':')
ax.add_feature(cfeature.LAKES, alpha=0.5)
ax.add_feature(cfeature.RIVERS, edgecolor='tab:green')
ax.set_extent([-130, -60, 20, 55])
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
# Add variety of features
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.COASTLINE)
# Can also supply matplotlib kwargs
ax.add_feature(cfeature.BORDERS.with_scale('50m'), linestyle=':')
ax.add_feature(cfeature.STATES.with_scale('50m'), linestyle=':')
ax.add_feature(cfeature.LAKES.with_scale('50m'), alpha=0.5)
ax.add_feature(cfeature.RIVERS.with_scale('50m'), edgecolor='tab:green')
ax.set_extent([-130, -60, 20, 55])
from metpy.plots import USCOUNTIES
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
axis.add_feature(USCOUNTIES.with_scale(scale), edgecolor='black')
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.BORDERS, linewidth=2)
ax.add_feature(cfeature.STATES, linestyle='--', edgecolor='black')
ax.plot(-105, 40, marker='o', color='tab:red')
ax.set_extent([-130, -60, 20, 55])
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.BORDERS, linewidth=2)
ax.add_feature(cfeature.STATES, linestyle='--', edgecolor='black')
data_projection = ccrs.PlateCarree()
ax.plot(-105, 40, marker='o', color='tab:red', transform=data_projection)
ax.set_extent([-130, -60, 20, 55])
# Create some synthetic gridded wind data
import numpy as np
from metpy.calc import wind_speed
from metpy.units import units
# Note that all of these winds have u = 0 -> south wind
v = np.full((5, 5), 10, dtype=np.float64) + 10 * np.arange(5) * units.knots
u = np.zeros_like(v) * units.knots
speed = wind_speed(u, v)
# Create arrays of longitude and latitude
x = np.linspace(-120, -60, 5)
y = np.linspace(30, 55, 5)
# Plot as normal
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.BORDERS)
# Plot wind barbs--CartoPy handles reprojecting the vectors properly for the
# coordinate system
ax.barbs(x, y, u.m, v.m, transform=ccrs.PlateCarree(), color='tab:blue')
ax.set_extent([-130, -60, 20, 55])
# YOUR CODE GOES HERE
# %load solutions/map.py
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The simplest plot we can make sets a projection with no parameters. The one below uses the Robinson projection
Step2: We also have fine-tuned control over the globe used in the projection as well as lots of standard parameters, which depend on individual projections
Step3: <a href="#top">Top</a>
Step4: Cartopy also has a lot of built-in support for a variety of map features
Step5: The map features are available at several different scales depending on how large the area you are covering is. The scales can be accessed using the with_scale method. Natural Earth features are available at 110m, 50m and 10m.
Step6: You can also grab other features from the Natural Earth project
Step7: <a href="#top">Top</a>
Step8: So that did not succeed at putting a marker at -105 longitude, 40 latitude (Boulder, CO). Instead, what actually happened is that it put the marker at (-105, 40) in the map projection coordinate system; in this case that's a Lambert Conformal projection, and x,y are assumed in meters relative to the origin of that coordinate system. To get CartoPy to treat it as longitude/latitude, we need to tell it that's what we're doing. We do this through the use of the transform argument to all of the plotting functions.
Step9: This approach by CartoPy separates the data coordinate system from the coordinate system of the plot. It allows you to take data in any coordinate system (lon/lat, Lambert Conformal) and display it in any map you want. It also allows you to combine data from various coordinate systems seamlessly. This extends to all plot types, not just plot
Step10: Exercise
Step11: Solution
|
14,328 | <ASSISTANT_TASK:>
Python Code:
# Find the city in a weather related query
train_x = [
"What is the weather like in Paris ?",
"What kind of weather will it do in London ?",
"Give me the weather forecast in Berlin please .",
"Tell me the forecast in New York !",
"Give me the weather in San Francisco ...",
"I want the forecast in Dublin ."
]
train_y = [
('Paris',),
('London',),
('Berlin',),
('New', 'York'),
('San', 'Francisco'),
('Dublin',)
]
tokenizer = lambda x: x.split(' ')
vocabulary = sorted(set(tokenizer(' '.join(train_x) + ' <unknown>')))
import torch
from torch.autograd import Variable
from torch import nn, optim
import torch.nn.functional as F
import numpy as np
class SCatNetwork(nn.Module):
def __init__(self, vocabulary_size, embedding_dimension, hidden_size):
super(SCatNetwork, self).__init__()
self.embeddings = nn.Embedding(vocabulary_size, embedding_dimension)
self.encoder = nn.LSTM( # a LSTM layer to encode features
embedding_dimension,
hidden_size,
batch_first=True,
)
self.decoder = nn.Linear(hidden_size, 2)
def forward(self, inputs):
hc = (Variable(torch.ones(1, 1, 10)), Variable(torch.ones(1, 1, 10)))
outputs = self.embeddings(inputs)
outputs, _ = self.encoder(outputs, hc)
outputs = self.decoder(outputs)
return outputs
def vectorizer(sentence):
tokens = tokenizer(sentence)
vector = [vocabulary.index(token) if token in vocabulary else vocabulary.index('<unknown>') for token in tokens]
return torch.LongTensor(vector)
print(train_x[0])
test_vec = Variable(vectorizer(train_x[0]).view(1, len(tokenizer(train_x[0]))))
test_net = SCatNetwork(len(vocabulary), 20, 10)
print(test_vec)
print(str(test_net))
print()
test_net(test_vec)
n_epoch = 801
learning_rate = 0.01
mean_ratio = 0.75
min_tolerance = 1e-04
n_range = (1, 10+1)
model = SCatNetwork(len(vocabulary), 20, 10)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
for epoch in range(n_epoch):
epoch_losses = []
for sentence, goal in zip(train_x, train_y):
sentence_lenght = len(tokenizer(sentence))
goal = [1 if word in goal else 0 for word in tokenizer(sentence)]
x = Variable(vectorizer(sentence).view(1, sentence_lenght))
y = Variable(torch.LongTensor(goal))
model.zero_grad()
preds = model(x)[0]
loss = criterion(preds, y)
epoch_losses.append(float(loss))
loss.backward()
optimizer.step()
if epoch % 80 == 0:
mean_loss = torch.FloatTensor(epoch_losses).mean()
print("Epoch {} - Loss : {}".format(epoch, float(mean_loss)))
x = "Give me the latest weather forecast in Los Angeles"
tokens = tokenizer(x)
x_vec = Variable(vectorizer(x).view(1, len(tokens)))
pred = F.softmax(model(x_vec), dim=2)[0, :,1].data
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure()
plt.bar(range(len(pred)), pred.tolist())
# Apply the special transformation
pred = (pred - min_tolerance - mean_ratio * pred.mean()) / pred.std()
plt.figure()
plt.bar(range(len(pred)), pred.tolist())
word_with_scores = list(zip(tokens, pred.tolist()))
word_with_scores
grams_with_scores = sum([list(zip(*[word_with_scores[i:] for i in range(n)])) for n in range(*n_range)], [])
grams_with_scores.append([('', 0)])
summed_gram_scores = [sum(list(zip(*gram))[1]) for gram in grams_with_scores]
best_gram = list(grams_with_scores[summed_gram_scores.index(max(summed_gram_scores))])
best_gram
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now import all the modules we will need.
Step2: It's now time to write our network as described before.
Step3: We will also need a function to transform a sentence into a list (LongTensor) of index.
Step4: Before training, let's test the whole network.
Step5: Now let's train the network with those hyperparameters
Step6: Now let's test the model with this sentence
Step7: Visualize the output of the network and apply a special transformation to it.
Step8: Finally, find the most scored n-gram in the sentence.
|
14,329 | <ASSISTANT_TASK:>
Python Code:
# %load Source/fastICA_0.py
import numpy as np
from sklearn import preprocessing
def sym_decorrelation(W):
Symmetric decorrelation
K = np.dot(W, W.T)
s, u = np.linalg.eigh(K)
W = (u @ np.diag(1.0/np.sqrt(s)) @ u.T) @ W
return W
def g_logcosh(wx,alpha):
derivatives of logcosh
return np.tanh(alpha * wx)
def gprime_logcosh(wx,alpha):
second derivatives of logcosh
return alpha * (1-np.square(np.tanh(alpha*wx)))
# exp
def g_exp(wx,alpha):
derivatives of exp
return wx * np.exp(-np.square(wx)/2)
def gprime_exp(wx,alpha):
second derivatives of exp
return (1-np.square(wx)) * np.exp(-np.square(wx)/2)
def fastICA_0(X, f,alpha=None, n_comp=None,maxit=200, tol=1e-04):
FastICA algorithm for several units
n,p = X.shape
#check if n_comp is valid
if n_comp is None:
n_comp = min(n,p)
elif n_comp > min(n,p):
print("n_comp is too large")
n_comp = min(n,p)
#centering
#by subtracting the mean of each column of X (array).
X = preprocessing.scale(X,axis = 0,with_std=False)
X = X.T
#whitening
svd = np.linalg.svd(X @ (X.T) / n)
k = np.diag(1/np.sqrt(svd[1])) @ (svd[0].T)
k = k[:n_comp,:]
X1 = k @ X
# initial random weght vector
w_init = np.random.normal(size=(n_comp, n_comp))
W = sym_decorrelation(w_init)
lim = 1
it = 0
# The FastICA algorithm
if f == "logcosh":
while lim > tol and it < maxit :
wx = W @ X1
gwx = g_logcosh(wx,alpha)
g_wx = gprime_logcosh(wx,alpha)
W1 = np.dot(gwx,X1.T)/X1.shape[1] - np.dot(np.diag(g_wx.mean(axis=1)),W)
W1 = sym_decorrelation(W1)
it = it +1
lim = np.max(np.abs(np.abs(np.diag(W1 @ W.T)) - 1.0))
W = W1
S = W @ X1
A = np.linalg.inv(W @ k)
X_re = A @ S
return{'X':X1.T,'X_re':X_re.T,'A':A.T,'S':S.T}
elif f == "exp":
while lim > tol and it < maxit :
wx = W @ X1
gwx = g_exp(wx,alpha)
g_wx = gprime_exp(wx,alpha)
W1 = np.dot(gwx,X1.T)/X1.shape[1] - np.dot(np.diag(g_wx.mean(axis=1)),W)
W1 = sym_decorrelation(W1)
it = it +1
lim = np.max(np.abs(np.abs(np.diag(W1 @ W.T)) - 1.0))
W = W1
S = W @ X1
A = np.linalg.inv(W @ k)
X_re = A @ S
return{'X':X1.T,'X_re':X_re.T,'A':A.T,'S':S.T}
else:
print("doesn't support this approximation negentropy function")
!python -m line_profiler fastICA_00.py.lprof
import numpy as np
import numexpr as ne
# "big" array
TT = np.random.normal(size=(10**5,10**3))
%%time
res = np.tanh(TT)
from concurrent.futures import ThreadPoolExecutor
import multiprocessing as mp
%%time
with ThreadPoolExecutor(max_workers=4) as pool:
res = pool.map(np.tanh, [i for i in TT])
%%time
with mp.Pool(processes=4) as pool:
res = pool.map(np.tanh, [i for i in TT])
ne.use_vml=False
%%time
res = ne.evaluate('tanh(TT)')
ne.use_vml=True
%%time
res = ne.evaluate('tanh(TT)')
ne.set_num_threads(1)
%%time
res = ne.evaluate('tanh(TT)')
ne.detect_number_of_cores()
ne.set_num_threads(ne.detect_number_of_cores())
%%time
res = ne.evaluate('tanh(TT)')
%run Test/fMRI.py
%run Source/fastICA_0.py
%run Source/fastICA_1.py
%run Source/fastICA_3.py
%run Source/fastICA_scipy.py
%run Source/fastICA_jit.py
%run Source/fastICA_ne.py
%timeit -r2 -n4 fastICA_0(fMRI,f = "logcosh",n_comp =20, alpha = 1,maxit = 200, tol = 0.0001)
%timeit -r2 -n4 fastICA_1(fMRI,f = "logcosh",n_comp =20, alpha = 1,maxit = 200, tol = 0.0001)
%timeit -r2 -n4 fastICA_scipy(fMRI,f = "logcosh",n_comp =20, alpha = 1,maxit = 200, tol = 0.0001)
%timeit -r2 -n4 fastICA_3(fMRI,f = "logcosh",n_comp =20, alpha = 1,maxit = 200, tol = 0.0001)
%timeit -r2 -n4 fastICA_jit(fMRI,f = "logcosh",n_comp =20, alpha = 1,maxit = 200, tol = 0.0001)
%timeit -r2 -n4 fastICA_ne(fMRI,f = "logcosh",n_comp =20, alpha = 1,maxit = 200, tol = 0.0001)
%run Test/test.py
test(S_test)
test(fastICA_ne(X_test,f = "logcosh",n_comp =2, alpha = 1,maxit = 200, tol = 0.0001)['S'])
fMRI.shape
fig = plt.figure(figsize=(10,6))
for i in range(20):
ax = plt.subplot(5,4,i+1)
plt.plot(np.arange(30000)+1, fMRI[:,i])
plt.xticks([])
plt.yticks([])
fig.suptitle('Visualization of the Original Data', fontsize=15)
pass
ica_fMRI = fastICA_ne(fMRI,f = "logcosh", alpha = 1,maxit = 200, tol = 0.0001)
S_fMRI = ica_fMRI['S']
fig = plt.figure(figsize=(10,6))
for i in range(20):
ax = plt.subplot(5,4,i+1)
plt.plot(np.arange(30000)+1, S_fMRI[:,i])
plt.xticks([])
plt.yticks([])
fig.suptitle('Visualization of the Independent Components by ICA', fontsize=15)
pass
%run Test/finance.py
X_finance.shape
np.random.seed(663)
ica_finance = fastICA_ne(np.array(X_finance),f = "logcosh",n_comp=6, alpha = 1,maxit = 200, tol = 0.0001)
%run Test/plot_finance.py
plot_finance(X_finance,ica_finance)
%run Test/comparedata.py
S_ica = fastICA_ne(X_compare,f = "logcosh", alpha = 1,maxit = 200, tol = 0.0001)['S']
%run Test/plot_compare.py
plot_compare(X_compare,S_compare,S_ica)
!pip install .
from fastica_lz import fastica_lz as lz
test(lz.fastica_s(X_test,f = "logcosh",n_comp =2, alpha = 1,maxit = 200, tol = 0.0001)['S'])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step6: Implementation and Optimization for Independent Component Analysis
Step7: 3. Code Profiling and Optimization
Step8: 3.2 Bottlenecks
Step9: The results above suggested no significant improvement. Although multiprocessing sped up CPU times, the large amount of system time offset its advantage. Therefore, we shifted to Numexpr. Numexpr is a fast numerical expression evaluator for NumPy. It avoids allocating memory for intermediate results, which results in better cache utilization and reduces memory access in general. Since ICA algorithm always deals with signals data (large arrays), we thought it would be a good fit. Specifically, we noticed that Numexpr has support for Intel's VML in order to accelerate the evaluation of transcendental functions on Intel CPUs. Here we’d like to try both with and without VML
Step10: Numexpr worked surprisingly well. As you can see, Numexpr using MKL can be up to 5x faster than numpy, successfully solving our "tanh" bottleneck. For most running times, VML did accelerate computations, and more threads made computations faster (although the results aren't stable).
Step11: According the results, changing functions (i.e. from numpy to scipy) didn’t give us significant improvement. However, after changing the order of while and for loop (better algorithm), the running time suddenly dropped (from fastICA_0 to fastICA_3). JIT (just-in-time compiler) didn't show any improvement. The reason maybe some functions are not supported in nopython mode. Cython didn't do a good job either (see our Test.ipybn). Numexpr further enhanced efficiency, as it successfully solved our tanh bottleneck.
Step12: 4.1.1 True Source Components
Step13: 4.1.2 ICs by FastICA
Step14: 4.2 Real World Data
Step15: Before applying the fastICA, a visualization of the original data
Step16: After appplying FastICA algorithm, we visualize each independent component in the following plot
Step17: 4.2.2 Finance Data
Step18: 5. Comparative Analysis with Competing Algorihtms
Step19: 6. Package
|
14,330 | <ASSISTANT_TASK:>
Python Code:
pd.read_csv("../datasets/google_trends_datascience.csv", index_col=0).plot();
import pandas as pd
log = pd.read_csv("../datasets/git_log_intellij.csv.gz")
log.head()
log.info()
log['timestamp'] = pd.to_datetime(log['timestamp'])
log.head()
# use log['timestamp'].max() instead of pd.Timedelta('today') to avoid outdated data in the future
recent = log[log['timestamp'] > log['timestamp'].max() - pd.Timedelta('30 days')]
recent.head()
java = recent[recent['filename'].str.endswith(".java")].copy()
java.head()
changes = java.groupby('filename')[['sha']].count()
changes.head()
loc = pd.read_csv("../datasets/cloc_intellij.csv.gz", index_col=1)
loc.head()
hotspots = changes.join(loc[['code']]).dropna(subset=['code'])
hotspots.head()
top10 = hotspots.sort_values(by="sha", ascending=False).head(10)
top10
ax = top10.plot.scatter('sha', 'code');
for k, v in top10.iterrows():
ax.annotate(k.split("/")[-1], v)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: "100" == max. popularity!
Step2: We explore some basic key elements of the dataset
Step3: <b>1</b> DataFrame (~ programmable Excel worksheet), <b>6</b> Series (= columns), <b>1128819</b> rows (= entries)
Step4: We filter out older changes.
Step5: We keep just code written in Java.
Step6: III. Formal Modeling
Step7: We add additional information about the number of lines of all currently existing files...
Step8: ...and join this data with the existing dataset.
Step9: VI. Interpretation
Step10: V. Communication
|
14,331 | <ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TensorFlow is an open source machine learning library
import tensorflow as tf
# Numpy is a math library
import numpy as np
# Matplotlib is a graphing library
import matplotlib.pyplot as plt
# math is Python's math library
import math
# We'll generate this many sample datapoints
SAMPLES = 1000
# Set a "seed" value, so we get the same random numbers each time we run this
# notebook
np.random.seed(1337)
# Generate a uniformly distributed set of random numbers in the range from
# 0 to 2π, which covers a complete sine wave oscillation
x_values = np.random.uniform(low=0, high=2*math.pi, size=SAMPLES)
# Shuffle the values to guarantee they're not in order
np.random.shuffle(x_values)
# Calculate the corresponding sine values
y_values = np.sin(x_values)
# Plot our data. The 'b.' argument tells the library to print blue dots.
plt.plot(x_values, y_values, 'b.')
plt.show()
# Add a small random number to each y value
y_values += 0.1 * np.random.randn(*y_values.shape)
# Plot our data
plt.plot(x_values, y_values, 'b.')
plt.show()
# We'll use 60% of our data for training and 20% for testing. The remaining 20%
# will be used for validation. Calculate the indices of each section.
TRAIN_SPLIT = int(0.6 * SAMPLES)
TEST_SPLIT = int(0.2 * SAMPLES + TRAIN_SPLIT)
# Use np.split to chop our data into three parts.
# The second argument to np.split is an array of indices where the data will be
# split. We provide two indices, so the data will be divided into three chunks.
x_train, x_test, x_validate = np.split(x_values, [TRAIN_SPLIT, TEST_SPLIT])
y_train, y_test, y_validate = np.split(y_values, [TRAIN_SPLIT, TEST_SPLIT])
# Double check that our splits add up correctly
assert (x_train.size + x_validate.size + x_test.size) == SAMPLES
# Plot the data in each partition in different colors:
plt.plot(x_train, y_train, 'b.', label="Train")
plt.plot(x_test, y_test, 'r.', label="Test")
plt.plot(x_validate, y_validate, 'y.', label="Validate")
plt.legend()
plt.show()
# We'll use Keras to create a simple model architecture
from tensorflow.keras import layers
model_1 = tf.keras.Sequential()
# First layer takes a scalar input and feeds it through 16 "neurons". The
# neurons decide whether to activate based on the 'relu' activation function.
model_1.add(layers.Dense(16, activation='relu', input_shape=(1,)))
# Final layer is a single neuron, since we want to output a single value
model_1.add(layers.Dense(1))
# Compile the model using a standard optimizer and loss function for regression
model_1.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
# Train the model on our training data while validating on our validation set
history_1 = model_1.fit(x_train, y_train, epochs=1000, batch_size=16,
validation_data=(x_validate, y_validate))
# Draw a graph of the loss, which is the distance between
# the predicted and actual values during training and validation.
loss = history_1.history['loss']
val_loss = history_1.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'g.', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Exclude the first few epochs so the graph is easier to read
SKIP = 50
plt.plot(epochs[SKIP:], loss[SKIP:], 'g.', label='Training loss')
plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf()
# Draw a graph of mean absolute error, which is another way of
# measuring the amount of error in the prediction.
mae = history_1.history['mae']
val_mae = history_1.history['val_mae']
plt.plot(epochs[SKIP:], mae[SKIP:], 'g.', label='Training MAE')
plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE')
plt.title('Training and validation mean absolute error')
plt.xlabel('Epochs')
plt.ylabel('MAE')
plt.legend()
plt.show()
# Use the model to make predictions from our validation data
predictions = model_1.predict(x_train)
# Plot the predictions along with to the test data
plt.clf()
plt.title('Training data predicted vs actual values')
plt.plot(x_test, y_test, 'b.', label='Actual')
plt.plot(x_train, predictions, 'r.', label='Predicted')
plt.legend()
plt.show()
model_2 = tf.keras.Sequential()
# First layer takes a scalar input and feeds it through 16 "neurons". The
# neurons decide whether to activate based on the 'relu' activation function.
model_2.add(layers.Dense(16, activation='relu', input_shape=(1,)))
# The new second layer may help the network learn more complex representations
model_2.add(layers.Dense(16, activation='relu'))
# Final layer is a single neuron, since we want to output a single value
model_2.add(layers.Dense(1))
# Compile the model using a standard optimizer and loss function for regression
model_2.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
history_2 = model_2.fit(x_train, y_train, epochs=600, batch_size=16,
validation_data=(x_validate, y_validate))
# Draw a graph of the loss, which is the distance between
# the predicted and actual values during training and validation.
loss = history_2.history['loss']
val_loss = history_2.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'g.', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Exclude the first few epochs so the graph is easier to read
SKIP = 100
plt.clf()
plt.plot(epochs[SKIP:], loss[SKIP:], 'g.', label='Training loss')
plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf()
# Draw a graph of mean absolute error, which is another way of
# measuring the amount of error in the prediction.
mae = history_2.history['mae']
val_mae = history_2.history['val_mae']
plt.plot(epochs[SKIP:], mae[SKIP:], 'g.', label='Training MAE')
plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE')
plt.title('Training and validation mean absolute error')
plt.xlabel('Epochs')
plt.ylabel('MAE')
plt.legend()
plt.show()
# Calculate and print the loss on our test dataset
loss = model_2.evaluate(x_test, y_test)
# Make predictions based on our test dataset
predictions = model_2.predict(x_test)
# Graph the predictions against the actual values
plt.clf()
plt.title('Comparison of predictions and actual values')
plt.plot(x_test, y_test, 'b.', label='Actual')
plt.plot(x_test, predictions, 'r.', label='Predicted')
plt.legend()
plt.show()
# Convert the model to the TensorFlow Lite format without quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model_2)
tflite_model = converter.convert()
# Save the model to disk
open("sine_model.tflite", "wb").write(tflite_model)
# Convert the model to the TensorFlow Lite format with quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model_2)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
# Save the model to disk
open("sine_model_quantized.tflite", "wb").write(tflite_model)
# Instantiate an interpreter for each model
sine_model = tf.lite.Interpreter('sine_model.tflite')
sine_model_quantized = tf.lite.Interpreter('sine_model_quantized.tflite')
# Allocate memory for each model
sine_model.allocate_tensors()
sine_model_quantized.allocate_tensors()
# Get the input and output tensors so we can feed in values and get the results
sine_model_input = sine_model.tensor(sine_model.get_input_details()[0]["index"])
sine_model_output = sine_model.tensor(sine_model.get_output_details()[0]["index"])
sine_model_quantized_input = sine_model_quantized.tensor(sine_model_quantized.get_input_details()[0]["index"])
sine_model_quantized_output = sine_model_quantized.tensor(sine_model_quantized.get_output_details()[0]["index"])
# Create arrays to store the results
sine_model_predictions = np.empty(x_test.size)
sine_model_quantized_predictions = np.empty(x_test.size)
# Run each model's interpreter for each value and store the results in arrays
for i in range(x_test.size):
sine_model_input().fill(x_test[i])
sine_model.invoke()
sine_model_predictions[i] = sine_model_output()[0]
sine_model_quantized_input().fill(x_test[i])
sine_model_quantized.invoke()
sine_model_quantized_predictions[i] = sine_model_quantized_output()[0]
# See how they line up with the data
plt.clf()
plt.title('Comparison of various models against actual values')
plt.plot(x_test, y_test, 'bo', label='Actual')
plt.plot(x_test, predictions, 'ro', label='Original predictions')
plt.plot(x_test, sine_model_predictions, 'bx', label='Lite predictions')
plt.plot(x_test, sine_model_quantized_predictions, 'gx', label='Lite quantized predictions')
plt.legend()
plt.show()
import os
basic_model_size = os.path.getsize("sine_model.tflite")
print("Basic model is %d bytes" % basic_model_size)
quantized_model_size = os.path.getsize("sine_model_quantized.tflite")
print("Quantized model is %d bytes" % quantized_model_size)
difference = basic_model_size - quantized_model_size
print("Difference is %d bytes" % difference)
# Install xxd if it is not available
!apt-get -qq install xxd
# Save the file as a C source file
!xxd -i sine_model_quantized.tflite > sine_model_quantized.cc
# Print the source file
!cat sine_model_quantized.cc
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create and convert a TensorFlow model
Step2: Generate data
Step3: Add some noise
Step4: Split our data
Step5: Design a model
Step6: Train the model
Step7: Check the training metrics
Step8: Look closer at the data
Step9: Further metrics
Step10: This graph of mean absolute error tells another story. We can see that training data shows consistently lower error than validation data, which means that the network may have overfit, or learned the training data so rigidly that it can't make effective predictions about new data.
Step11: Oh dear! The graph makes it clear that our network has learned to approximate the sine function in a very limited way. From 0 <= x <= 1.1 the line mostly fits, but for the rest of our x values it is a rough approximation at best.
Step12: We'll now train the new model. To save time, we'll train for only 600 epochs
Step13: Evaluate our new model
Step14: Great results! From these graphs, we can see several exciting things
Step15: Much better! The evaluation metrics we printed show that the model has a low loss and MAE on the test data, and the predictions line up visually with our data fairly well.
Step16: Test the converted models
Step17: We can see from the graph that the predictions for the original model, the converted model, and the quantized model are all close enough to be indistinguishable. This means that our quantized model is ready to use!
Step18: Our quantized model is only 16 bytes smaller than the original version, which only a tiny reduction in size! At around 2.6 kilobytes, this model is already so small that the weights make up only a small fraction of the overall size, meaning quantization has little effect.
|
14,332 | <ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import time
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.iteritems():
print '%s: ' % k, v.shape
# Test the affine_forward function
num_inputs = 2
input_shape = (4, 5, 6)
output_dim = 3
input_size = num_inputs * np.prod(input_shape)
weight_size = output_dim * np.prod(input_shape)
x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)
w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)
b = np.linspace(-0.3, 0.1, num=output_dim)
out, _ = affine_forward(x, w, b)
correct_out = np.array([[ 1.49834967, 1.70660132, 1.91485297],
[ 3.25553199, 3.5141327, 3.77273342]])
# Compare your output with ours. The error should be around 1e-9.
print 'Testing affine_forward function:'
print 'difference: ', rel_error(out, correct_out)
# Test the affine_backward function
x = np.random.randn(10, 2, 3)
w = np.random.randn(6, 5)
b = np.random.randn(5)
dout = np.random.randn(10, 5)
dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)
_, cache = affine_forward(x, w, b)
dx, dw, db = affine_backward(dout, cache)
# The error should be around 1e-10
print 'Testing affine_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
# Test the relu_forward function
x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)
out, _ = relu_forward(x)
correct_out = np.array([[ 0., 0., 0., 0., ],
[ 0., 0., 0.04545455, 0.13636364,],
[ 0.22727273, 0.31818182, 0.40909091, 0.5, ]])
# Compare your output with ours. The error should be around 1e-8
print 'Testing relu_forward function:'
print 'difference: ', rel_error(out, correct_out)
x = np.random.randn(10, 10)
dout = np.random.randn(*x.shape)
dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)
_, cache = relu_forward(x)
dx = relu_backward(dout, cache)
# The error should be around 1e-12
print 'Testing relu_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
from cs231n.layer_utils import affine_relu_forward, affine_relu_backward
x = np.random.randn(2, 3, 4)
w = np.random.randn(12, 10)
b = np.random.randn(10)
dout = np.random.randn(2, 10)
out, cache = affine_relu_forward(x, w, b)
dx, dw, db = affine_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout)
print 'Testing affine_relu_forward:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
num_classes, num_inputs = 10, 50
x = 0.001 * np.random.randn(num_inputs, num_classes)
y = np.random.randint(num_classes, size=num_inputs)
dx_num = eval_numerical_gradient(lambda x: svm_loss(x, y)[0], x, verbose=False)
loss, dx = svm_loss(x, y)
# Test svm_loss function. Loss should be around 9 and dx error should be 1e-9
print 'Testing svm_loss:'
print 'loss: ', loss
print 'dx error: ', rel_error(dx_num, dx)
dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False)
loss, dx = softmax_loss(x, y)
# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8
print '\nTesting softmax_loss:'
print 'loss: ', loss
print 'dx error: ', rel_error(dx_num, dx)
N, D, H, C = 3, 5, 50, 7
X = np.random.randn(N, D)
y = np.random.randint(C, size=N)
std = 1e-2
model = TwoLayerNet(input_dim=D, hidden_dim=H, num_classes=C, weight_scale=std)
print 'Testing initialization ... '
W1_std = abs(model.params['W1'].std() - std)
b1 = model.params['b1']
W2_std = abs(model.params['W2'].std() - std)
b2 = model.params['b2']
assert W1_std < std / 10, 'First layer weights do not seem right'
assert np.all(b1 == 0), 'First layer biases do not seem right'
assert W2_std < std / 10, 'Second layer weights do not seem right'
assert np.all(b2 == 0), 'Second layer biases do not seem right'
print 'Testing test-time forward pass ... '
model.params['W1'] = np.linspace(-0.7, 0.3, num=D*H).reshape(D, H)
model.params['b1'] = np.linspace(-0.1, 0.9, num=H)
model.params['W2'] = np.linspace(-0.3, 0.4, num=H*C).reshape(H, C)
model.params['b2'] = np.linspace(-0.9, 0.1, num=C)
X = np.linspace(-5.5, 4.5, num=N*D).reshape(D, N).T
scores = model.loss(X)
correct_scores = np.asarray(
[[11.53165108, 12.2917344, 13.05181771, 13.81190102, 14.57198434, 15.33206765, 16.09215096],
[12.05769098, 12.74614105, 13.43459113, 14.1230412, 14.81149128, 15.49994135, 16.18839143],
[12.58373087, 13.20054771, 13.81736455, 14.43418138, 15.05099822, 15.66781506, 16.2846319 ]])
scores_diff = np.abs(scores - correct_scores).sum()
assert scores_diff < 1e-6, 'Problem with test-time forward pass'
print 'Testing training loss (no regularization)'
y = np.asarray([0, 5, 1])
loss, grads = model.loss(X, y)
correct_loss = 3.4702243556
assert abs(loss - correct_loss) < 1e-10, 'Problem with training-time loss'
model.reg = 1.0
loss, grads = model.loss(X, y)
correct_loss = 26.5948426952
assert abs(loss - correct_loss) < 1e-10, 'Problem with regularization loss'
for reg in [0.0, 0.7]:
print 'Running numeric gradient check with reg = ', reg
model.reg = reg
loss, grads = model.loss(X, y)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
model = TwoLayerNet()
solver = None
##############################################################################
# TODO: Use a Solver instance to train a TwoLayerNet that achieves at least #
# 50% accuracy on the validation set. #
##############################################################################
data1 = {
'X_train': data['X_train'],# training data
'y_train': data['y_train'],# training labels
'X_val': data['X_val'],# validation data
'y_val': data['y_val'] # validation labels
}
solver = Solver(model, data1,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=10, batch_size=100,
print_every=100)
solver.train()
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Run this cell to visualize training loss and train / val accuracy
plt.subplot(2, 1, 1)
plt.title('Training loss')
plt.plot(solver.loss_history, 'o')
plt.xlabel('Iteration')
plt.subplot(2, 1, 2)
plt.title('Accuracy')
plt.plot(solver.train_acc_history, '-o', label='train')
plt.plot(solver.val_acc_history, '-o', label='val')
plt.plot([0.5] * len(solver.val_acc_history), 'k--')
plt.xlabel('Epoch')
plt.legend(loc='lower right')
plt.gcf().set_size_inches(14, 12)
plt.show(block=True)
plt.show()
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print 'Running check with reg = ', reg
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64)
loss, grads = model.loss(X, y)
print 'Initial loss: ', loss
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
# TODO: Use a three-layer Net to overfit 50 training examples.
num_train = 50
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
learning_rate = 1e-3
weight_scale = 1e-1
model = FullyConnectedNet([100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
# TODO: Use a five-layer Net to overfit 50 training examples.
import random
num_train = 50
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
max_count = 1 #before=30
# find the best learning rate and weight scale by changing the uniform :)
for count in xrange(max_count):
#learning_rate = 10**random.uniform(-4,0)
#weight_scale = 10**random.uniform(-4,0)
# best values found for 100% accuracy on training set
learning_rate=0.000894251014039
weight_scale=0.0736354068714
model = FullyConnectedNet([100, 100, 100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
num_epochs=20, batch_size=25,
update_rule='sgd', verbose=True,
optim_config={
'learning_rate': learning_rate,
}
)
#solver = Solver(model, small_data,
# print_every=10, num_epochs=20, batch_size=25,
# update_rule='sgd',
# optim_config={
# 'learning_rate': learning_rate,
# }
# )
solver.train()
#print "lr=" + str(learning_rate) + ",ws=" + str(weight_scale) \
# + "loss=" + str(solver.loss_history[-1])
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
from cs231n.optim import sgd_momentum
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
v = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-3, 'velocity': v}
next_w, _ = sgd_momentum(w, dw, config=config)
expected_next_w = np.asarray([
[ 0.1406, 0.20738947, 0.27417895, 0.34096842, 0.40775789],
[ 0.47454737, 0.54133684, 0.60812632, 0.67491579, 0.74170526],
[ 0.80849474, 0.87528421, 0.94207368, 1.00886316, 1.07565263],
[ 1.14244211, 1.20923158, 1.27602105, 1.34281053, 1.4096 ]])
expected_velocity = np.asarray([
[ 0.5406, 0.55475789, 0.56891579, 0.58307368, 0.59723158],
[ 0.61138947, 0.62554737, 0.63970526, 0.65386316, 0.66802105],
[ 0.68217895, 0.69633684, 0.71049474, 0.72465263, 0.73881053],
[ 0.75296842, 0.76712632, 0.78128421, 0.79544211, 0.8096 ]])
print 'next_w error: ', rel_error(next_w, expected_next_w)
print 'velocity error: ', rel_error(expected_velocity, config['velocity'])
num_train = 4000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
solvers = {}
for update_rule in ['sgd', 'sgd_momentum']:
print 'running with ', update_rule
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': 1e-2,
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in solvers.iteritems():
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(12, 9)
plt.show()
#plt.show()
# Test RMSProp implementation; you should see errors less than 1e-7
from cs231n.optim import rmsprop
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
cache = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'cache': cache}
next_w, _ = rmsprop(w, dw, config=config)
expected_next_w = np.asarray([
[-0.39223849, -0.34037513, -0.28849239, -0.23659121, -0.18467247],
[-0.132737, -0.08078555, -0.02881884, 0.02316247, 0.07515774],
[ 0.12716641, 0.17918792, 0.23122175, 0.28326742, 0.33532447],
[ 0.38739248, 0.43947102, 0.49155973, 0.54365823, 0.59576619]])
expected_cache = np.asarray([
[ 0.5976, 0.6126277, 0.6277108, 0.64284931, 0.65804321],
[ 0.67329252, 0.68859723, 0.70395734, 0.71937285, 0.73484377],
[ 0.75037008, 0.7659518, 0.78158892, 0.79728144, 0.81302936],
[ 0.82883269, 0.84469141, 0.86060554, 0.87657507, 0.8926 ]])
print 'next_w error: ', rel_error(expected_next_w, next_w)
print 'cache error: ', rel_error(expected_cache, config['cache'])
# Test Adam implementation; you should see errors around 1e-7 or less
from cs231n.optim import adam
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
m = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
v = np.linspace(0.7, 0.5, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'm': m, 'v': v, 't': 5}
next_w, _ = adam(w, dw, config=config)
expected_next_w = np.asarray([
[-0.40094747, -0.34836187, -0.29577703, -0.24319299, -0.19060977],
[-0.1380274, -0.08544591, -0.03286534, 0.01971428, 0.0722929],
[ 0.1248705, 0.17744702, 0.23002243, 0.28259667, 0.33516969],
[ 0.38774145, 0.44031188, 0.49288093, 0.54544852, 0.59801459]])
expected_v = np.asarray([
[ 0.69966, 0.68908382, 0.67851319, 0.66794809, 0.65738853,],
[ 0.64683452, 0.63628604, 0.6257431, 0.61520571, 0.60467385,],
[ 0.59414753, 0.58362676, 0.57311152, 0.56260183, 0.55209767,],
[ 0.54159906, 0.53110598, 0.52061845, 0.51013645, 0.49966, ]])
expected_m = np.asarray([
[ 0.48, 0.49947368, 0.51894737, 0.53842105, 0.55789474],
[ 0.57736842, 0.59684211, 0.61631579, 0.63578947, 0.65526316],
[ 0.67473684, 0.69421053, 0.71368421, 0.73315789, 0.75263158],
[ 0.77210526, 0.79157895, 0.81105263, 0.83052632, 0.85 ]])
print 'next_w error: ', rel_error(expected_next_w, next_w)
print 'v error: ', rel_error(expected_v, config['v'])
print 'm error: ', rel_error(expected_m, config['m'])
learning_rates = {'rmsprop': 1e-4, 'adam': 1e-3}
for update_rule in ['adam', 'rmsprop']:
print 'running with ', update_rule
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': learning_rates[update_rule]
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in solvers.iteritems():
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
#plt.gcf().set_size_inches(15,15)
plt.show(block=True)
#fig.savefig('vs.png')
best_model = None
################################################################################
# TODO: Train the best FullyConnectedNet that you can on CIFAR-10. You might #
# batch normalization and dropout useful. Store your best model in the #
# best_model variable. #
################################################################################
max_count = 1 #before=30
# find the best learning rate and weight scale by changing the uniform :)
for count in xrange(max_count):
#lr = 10**random.uniform(-5,0)
#ws = 10**random.uniform(-5,0)
#print "lr=" + str(lr) + ",ws=" + str(ws)
lr=1e-4
ws=5e-2
best_model = FullyConnectedNet([100, 100, 100, 100], weight_scale=ws,use_batchnorm=True)
solver = Solver(best_model, data,
num_epochs=20, batch_size=500,
update_rule='adam',
optim_config={
'learning_rate': lr,
},
verbose=True)
solver.train()
y_test_pred = np.argmax(best_model.loss(data['X_test']), axis=1)
y_val_pred = np.argmax(best_model.loss(data['X_val']), axis=1)
print 'Val_acc: ', (y_val_pred == data['y_val']).mean()
print 'Test_acc: ', (y_test_pred == data['y_test']).mean()
print '------------'
################################################################################
# END OF YOUR CODE #
################################################################################
y_test_pred = np.argmax(best_model.loss(data['X_test']), axis=1)
y_val_pred = np.argmax(best_model.loss(data['X_val']), axis=1)
print 'Validation set accuracy: ', (y_val_pred == data['y_val']).mean()
print 'Test set accuracy: ', (y_test_pred == data['y_test']).mean()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fully-Connected Neural Nets
Step4: Affine layer
Step5: Affine layer
Step6: ReLU layer
Step7: ReLU layer
Step8: "Sandwich" layers
Step9: Loss layers
Step10: Two-layer network
Step11: Solver
Step12: Multilayer network
Step13: As another sanity check, make sure you can overfit a small dataset of 50 images. First we will try a three-layer network with 100 units in each hidden layer. You will need to tweak the learning rate and initialization scale, but you should be able to overfit and achieve 100% training accuracy within 20 epochs.
Step14: Now try to use a five-layer network with 100 units on each layer to overfit 50 training examples. Again you will have to adjust the learning rate and weight initialization, but you should be able to achieve 100% training accuracy within 20 epochs.
Step15: Inline question
Step16: Once you have done so, run the following to train a six-layer network with both SGD and SGD+momentum. You should see the SGD+momentum update rule converge faster.
Step17: RMSProp and Adam
Step18: Once you have debugged your RMSProp and Adam implementations, run the following to train a pair of deep networks using these new update rules
Step19: Train a good model!
Step20: Test you model
|
14,333 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn import cross_validation, metrics
from sklearn import preprocessing
import matplotlib.pyplot as plt
cols = ['Area', 'Perimeter','Compactness','Kernel_Length','Kernel_Width','Assymetry_Coefficient','Kernel_Groove_Length', 'Class']
# read .csv from provided dataset
csv_filename="seeds_dataset.txt"
# df=pd.read_csv(csv_filename,index_col=0)
df=pd.read_csv(csv_filename,delim_whitespace=True,names=cols)
df.head()
features = df.columns[:-1]
features
X = df[features]
y = df['Class']
X.head()
# split dataset to 60% training and 40% testing
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.4, random_state=0)
X_train.shape, y_train.shape
y.unique()
len(features)
# Apply PCA with the same number of dimensions as variables in the dataset
from sklearn.decomposition import PCA
pca = PCA(n_components=7) #7 components for 7 variables
pca.fit(X)
# Print the components and the amount of variance in the data contained in each dimension
print(pca.components_)
print(pca.explained_variance_ratio_)
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(list(pca.explained_variance_ratio_),'-o')
plt.title('Explained variance ratio as function of PCA components')
plt.ylabel('Explained variance ratio')
plt.xlabel('Component')
plt.show()
features
X = df[features].values
y= df['Class'].values
pca = PCA(n_components=2)
reduced_X = pca.fit_transform(X)
red_x, red_y = [], []
blue_x, blue_y = [], []
green_x, green_y = [], []
for i in range(len(reduced_X)):
if y[i] == 1:
red_x.append(reduced_X[i][0])
red_y.append(reduced_X[i][1])
elif y[i] == 2:
blue_x.append(reduced_X[i][0])
blue_y.append(reduced_X[i][1])
else:
green_x.append(reduced_X[i][0])
green_y.append(reduced_X[i][1])
plt.scatter(red_x, red_y, c='r', marker='x')
plt.scatter(blue_x, blue_y, c='b', marker='D')
plt.scatter(green_x, green_y, c='g', marker='.')
plt.show()
# Import clustering modules
from sklearn.cluster import KMeans
from sklearn.mixture import GMM
# First we reduce the data to two dimensions using PCA to capture variation
pca = PCA(n_components=2)
reduced_data = pca.fit_transform(X)
print(reduced_data[:10]) # print upto 10 elements
kmeans = KMeans(n_clusters=3)
clusters = kmeans.fit(reduced_data)
print(clusters)
# Plot the decision boundary by building a mesh grid to populate a graph.
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
hx = (x_max-x_min)/1000.
hy = (y_max-y_min)/1000.
xx, yy = np.meshgrid(np.arange(x_min, x_max, hx), np.arange(y_min, y_max, hy))
# Obtain labels for each point in mesh. Use last trained model.
Z = clusters.predict(np.c_[xx.ravel(), yy.ravel()])
# Find the centroids for KMeans or the cluster means for GMM
centroids = kmeans.cluster_centers_
print('*** K MEANS CENTROIDS ***')
print(centroids)
# TRANSFORM DATA BACK TO ORIGINAL SPACE FOR ANSWERING 7
print('*** CENTROIDS TRANSFERED TO ORIGINAL SPACE ***')
print(pca.inverse_transform(centroids))
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('Clustering on the seeds dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
distortions = []
for i in range(1, 11):
km = KMeans(n_clusters=i,
init='k-means++',
n_init=10,
max_iter=300,
random_state=0)
km.fit(X)
distortions .append(km.inertia_)
plt.plot(range(1,11), distortions , marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.tight_layout()
#plt.savefig('./figures/elbow.png', dpi=300)
plt.show()
import numpy as np
from matplotlib import cm
from sklearn.metrics import silhouette_samples
km = KMeans(n_clusters=3,
init='k-means++',
n_init=10,
max_iter=300,
tol=1e-04,
random_state=0)
y_km = km.fit_predict(X)
cluster_labels = np.unique(y_km)
n_clusters = cluster_labels.shape[0]
silhouette_vals = silhouette_samples(X, y_km, metric='euclidean')
y_ax_lower, y_ax_upper = 0, 0
yticks = []
for i, c in enumerate(cluster_labels):
c_silhouette_vals = silhouette_vals[y_km == c]
c_silhouette_vals.sort()
y_ax_upper += len(c_silhouette_vals)
color = cm.jet(i / n_clusters)
plt.barh(range(y_ax_lower, y_ax_upper), c_silhouette_vals, height=1.0,
edgecolor='none', color=color)
yticks.append((y_ax_lower + y_ax_upper) / 2)
y_ax_lower += len(c_silhouette_vals)
silhouette_avg = np.mean(silhouette_vals)
plt.axvline(silhouette_avg, color="red", linestyle="--")
plt.yticks(yticks, cluster_labels + 1)
plt.ylabel('Cluster')
plt.xlabel('Silhouette coefficient')
plt.tight_layout()
# plt.savefig('./figures/silhouette.png', dpi=300)
plt.show()
from sklearn.cluster import AgglomerativeClustering
ac = AgglomerativeClustering(n_clusters=3, affinity='euclidean', linkage='complete')
labels = ac.fit_predict(X)
print('Cluster labels: %s' % labels)
from sklearn.cross_validation import train_test_split
X = df[features]
y = df['Class']
X_train, X_test, y_train, y_test = train_test_split(X, y ,test_size=0.25, random_state=42)
from sklearn import cluster
clf = cluster.KMeans(init='k-means++', n_clusters=3, random_state=5)
clf.fit(X_train)
print( clf.labels_.shape)
print (clf.labels_)
# Predict clusters on testing data
y_pred = clf.predict(X_test)
from sklearn import metrics
print ("Addjusted rand score:{:.2}".format(metrics.adjusted_rand_score(y_test, y_pred)))
print ("Homogeneity score:{:.2} ".format(metrics.homogeneity_score(y_test, y_pred)) )
print ("Completeness score: {:.2} ".format(metrics.completeness_score(y_test, y_pred)))
print ("Confusion matrix")
print (metrics.confusion_matrix(y_test, y_pred))
# Affinity propagation
aff = cluster.AffinityPropagation()
aff.fit(X_train)
print (aff.cluster_centers_indices_.shape)
y_pred = aff.predict(X_test)
from sklearn import metrics
print ("Addjusted rand score:{:.2}".format(metrics.adjusted_rand_score(y_test, y_pred)))
print ("Homogeneity score:{:.2} ".format(metrics.homogeneity_score(y_test, y_pred)) )
print ("Completeness score: {:.2} ".format(metrics.completeness_score(y_test, y_pred)))
print ("Confusion matrix")
print (metrics.confusion_matrix(y_test, y_pred))
ms = cluster.MeanShift()
ms.fit(X_train)
print( ms.cluster_centers_)
y_pred = ms.predict(X_test)
from sklearn import metrics
print ("Addjusted rand score:{:.2}".format(metrics.adjusted_rand_score(y_test, y_pred)))
print ("Homogeneity score:{:.2} ".format(metrics.homogeneity_score(y_test, y_pred)) )
print ("Completeness score: {:.2} ".format(metrics.completeness_score(y_test, y_pred)))
print ("Confusion matrix")
print (metrics.confusion_matrix(y_test, y_pred))
from sklearn import mixture
# Define a heldout dataset to estimate covariance type
X_train_heldout, X_test_heldout, y_train_heldout, y_test_heldout = train_test_split(
X_train, y_train,test_size=0.25, random_state=42)
for covariance_type in ['spherical','tied','diag','full']:
gm=mixture.GMM(n_components=3, covariance_type=covariance_type, random_state=42, n_init=5)
gm.fit(X_train_heldout)
y_pred=gm.predict(X_test_heldout)
print ("Adjusted rand score for covariance={}:{:.2}".format(covariance_type,
metrics.adjusted_rand_score(y_test_heldout, y_pred)))
gm = mixture.GMM(n_components=3, covariance_type='tied', random_state=42)
gm.fit(X_train)
# Print train clustering and confusion matrix
y_pred = gm.predict(X_test)
print ("Addjusted rand score:{:.2}".format(metrics.adjusted_rand_score(y_test, y_pred)))
print ("Homogeneity score:{:.2} ".format(metrics.homogeneity_score(y_test, y_pred)) )
print ("Completeness score: {:.2} ".format(metrics.completeness_score(y_test, y_pred)))
print ("Confusion matrix")
print (metrics.confusion_matrix(y_test, y_pred))
pl=plt
from sklearn import decomposition
# In this case the seeding of the centers is deterministic,
# hence we run the kmeans algorithm only once with n_init=1
pca = decomposition.PCA(n_components=2).fit(X_train)
reduced_X_train = pca.transform(X_train)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .01 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will asign a color to each
x_min, x_max = reduced_X_train[:, 0].min() + 1, reduced_X_train[:, 0].max() - 1
y_min, y_max = reduced_X_train[:, 1].min() + 1, reduced_X_train[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
gm.fit(reduced_X_train)
#print np.c_[xx.ravel(),yy.ravel()]
Z = gm.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1)
pl.clf()
pl.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=pl.cm.Paired,
aspect='auto', origin='lower')
#print reduced_X_train.shape
pl.plot(reduced_X_train[:, 0], reduced_X_train[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = gm.means_
pl.scatter(centroids[:, 0], centroids[:, 1],
marker='.', s=169, linewidths=3,
color='w', zorder=10)
pl.title('Mixture of gaussian models on the seeds dataset (PCA-reduced data)\n'
'Means are marked with white dots')
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
pl.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <hr>
Step2: No of components will be equal to no of feature variables i.e. 7.
Step3: The explained variance is high for the first two dimensions, but drops significantly beginning with the third dimension. Thus, the first two components explain already 86.5 % of the variation in the data.
Step4: The first dimension seems to basically represent only the 'Area'-feature, as this feature has a strong negative projection on the first dimension. The other features have rather weak (mostly negative) projections on the first dimension. That is, the first dimension basically tells us whether the 'area'-feature value is high or low, mixed with a little bit of information from the other features.
Step5: Clustering
Step6: <hr>
Step7: As we can see in the following plot, the elbow is located at k = 3, which provides evidence that k = 3 is indeed a good choice for this dataset.
Step8: Our clustering with 3 centroids is good.
Step9:
Step10: <hr>
Step11: <hr>
Step12: <hr>
Step13: <hr>
|
14,334 | <ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))
documents = dataset.data
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
no_features = 1000
# NMF is able to use tf-idf
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(documents)
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
# LDA can only use raw term counts for LDA because it is a probabilistic graphical model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words='english')
tf = tf_vectorizer.fit_transform(documents)
tf_feature_names = tf_vectorizer.get_feature_names()
from sklearn.decomposition import NMF, LatentDirichletAllocation
no_topics = 20
# Run NMF
nmf = NMF(n_components=no_topics, random_state=1, alpha=.1, l1_ratio=.5, init='nndsvd').fit(tfidf)
# Run LDA
lda = LatentDirichletAllocation(n_topics=no_topics, max_iter=5, learning_method='online', learning_offset=50.,random_state=0).fit(tf)
def display_topics(model, feature_names, no_top_words):
for topic_idx, topic in enumerate(model.components_):
print ("Topic %d:" % (topic_idx))
print (" ".join([feature_names[i]
for i in topic.argsort()[:-no_top_words - 1:-1]]))
no_top_words = 10
display_topics(nmf, tfidf_feature_names, no_top_words)
display_topics(lda, tf_feature_names, no_top_words)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The creation of the bag of words matrix is very easy in Scikit Learn — all the heavy lifting is done by the feature extraction functionality provided for text datasets. A tf-idf transformer is applied to the bag of words matrix that NMF must process with the TfidfVectorizer. LDA on the other hand, being a probabilistic graphical model (i.e. dealing with probabilities) only requires raw counts, so a CountVectorizer is used. Stop words are removed and the number of terms included in the bag of words matrix is restricted to the top 1000.
Step2: NMF and LDA with Scikit Learn
Step3: Displaying and Evaluating Topics
Step4: This was using NMF
|
14,335 | <ASSISTANT_TASK:>
Python Code:
np.random.seed(0)
x = 10 * np.random.rand(100)
def model(x, sigma=0.3):
fast_oscillation = np.sin(5 * x)
slow_oscillation = np.sin(0.5 * x)
noise = sigma * np.random.rand(len(x))
return slow_oscillation + fast_oscillation + noise
plt.figure(figsize = (12,10))
y = model(x)
plt.errorbar(x, y, 0.3, fmt='o')
xfit = np.linspace(0, 10, 1000)
# fit the model and get the estimation for each data points
yfit = RandomForestRegressor(100, random_state=42).fit(x[:, None], y).predict(xfit[:, None])
ytrue = model(xfit, 0)
plt.figure(figsize = (12,10))
plt.errorbar(x, y, 0.3, fmt='o')
plt.plot(xfit, yfit, '-r', label = 'predicted', zorder = 10)
plt.plot(xfit, ytrue, '-k', alpha=0.5, label = 'true model', zorder = 10)
plt.legend()
mse = mean_squared_error(ytrue, yfit)
print(mse)
from sklearn.neural_network import MLPRegressor
mlp = MLPRegressor(hidden_layer_sizes=(200,200,200), max_iter = 4000, solver='lbfgs', \
alpha=0.01, activation = 'tanh', random_state = 8)
yfit = mlp.fit(x[:, None], y).predict(xfit[:, None])
plt.figure(figsize = (12,10))
plt.errorbar(x, y, 0.3, fmt='o')
plt.plot(xfit, yfit, '-r', label = 'predicted', zorder = 10)
plt.plot(xfit, ytrue, '-k', alpha=0.5, label = 'true model', zorder = 10)
plt.legend()
mse = mean_squared_error(ytrue, yfit)
print(mse)
from sklearn.svm import SVR
# define your model
svr =
# get the estimation from the model
yfit =
# plot the results as above
plt.figure(figsize = (12,10))
plt.errorbar(x, y, 0.3, fmt='o')
plt.plot(xfit, yfit, '-r', label = 'predicted', zorder = 10)
plt.plot(xfit, ytrue, '-k', alpha=0.5, label = 'true model', zorder = 10)
plt.legend()
%load ../solutions/solution_03.py
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fit a Random Forest Model
Step2: Print out the misfit using the mean squared error.
Step3: Using ANN
Step4: Exercise
|
14,336 | <ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
df_all = pd.read_csv('All content.csv')
df_erik = pd.read_csv('Erik content.csv')
df_all = df_all[(df_all.Published > df_erik.at[6,'Published']) &
(df_all['Url'].str.contains('/articles/')) & (df_all.Published < df_erik.at[16,'Published']) &
(df_all.Title[~df_all.Title.isin(df_erik.Title)])]
df_erik = df_erik[(df_erik.Published < df_erik.at[16,'Published'])]
print 'All PVs median'
print df_all['Page Views'].median()
print 'Erik PVs median'
print df_erik['Page Views'].median()
df_all['Page Views'].describe()
df_erik['Page Views'].describe()
d = {'erik':df_erik['Page Views'],'all':df_all['Page Views']}
df = pd.DataFrame(data=d)
df.plot(kind='box',showfliers=False, title = "Page Views Distribution")
shares = {'erik shares':df_erik['Facebook Shares'],'all_shares':df_all['Facebook Shares']}
df_shares = pd.DataFrame(data=shares)
df_shares.plot(kind='box',showfliers=False, title = "Facebook Shares Distribution")
d = {'erik':df_erik['Page Views'],'all':df_all['Page Views']}
df = pd.DataFrame(data=d)
df.plot(kind='box',showfliers=True, title = "Page Views Distribution")
shares = {'erik shares':df_erik['Facebook Shares'],'all_shares':df_all['Facebook Shares']}
df_shares = pd.DataFrame(data=shares)
df_shares.plot(kind='box',showfliers=True, title = "Facebook Shares Distribution")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we eliminate any article published more recently than last Friday or any article published before Erik joined the company. This is done in order to allow a fair, apples-to-apples comparison. We also remove any article authored by Erik from the "All" dataset.
Step2: Above we see that, at first glance, the median Erik article receives about 800 fewer Page Views than the rest of our data set.
Step3: Here we plot the Page Views received per article while eliminating outliers (both high and low).
Step4: And here we plot the Facebook Shares per article, also eliminating outliers.
Step5: Finally, we plot both the Page Views and Facebook Shares with outliers included (indicated by the "+" symbols above the respective box plots).
|
14,337 | <ASSISTANT_TASK:>
Python Code:
cursor.execute(SELECT *
FROM dot_311
LIMIT 1)
cursor.fetchone()
cursor.execute(SELECT column_name FROM information_schema.columns WHERE table_name='dot_311')
cursor.fetchall()
cursor.execute(SELECT created_date, closed_date
FROM dot_311
LIMIT 1)
cursor.fetchone()
cursor.execute(SELECT created_date, closed_date
FROM dot_311)
results = cursor.fetchall()
results[0][0]
results[0][1]
results[0][1] - results[0][0]
dif = results[0][1] - results[0][0]
divmod(dif.days * 86400 + dif.seconds, 60)
# datetime.datetime.strftime(datetime.timedelta(0, 3780), "%H:%M")
dif = results[0][1] - results[0][0]
divmod(dif.seconds, 60)
dif = datetime.timedelta(2, 3780)
dif.days * 1440 + dif.seconds/60
results[:3]
timelen = []
for created, closed in results[:3]:
print('created', created)
print('closed', closed)
print('')
timelen = []
for created, closed in results:
if created and closed:
dif = closed - created
timelen.append(dif.days * 1440 + dif.seconds/60)
len(results) - len(timelen)
201 / (len(results) + len(timelen))
num = 0
for time in timelen:
if time <= 0:
num += 1
print(num)
sorted(timelen)[:20]
df = pd.DataFrame({'timelen': timelen})
df.hist(bins = 28, range = (-120, 1560))
df.mean()
df[df['timelen'] >= 0].mean()
df.median()
df[df['timelen'] >= 0].median()
df.mode()
df[df['timelen'] >= 0].mode()
max(timelen) - min(timelen)
df[df['timelen'] >= 0].max() - df[df['timelen'] >= 0].min()
df.std()
df[df['timelen'] >= 0].std()
df.mean() + df.std() * 3
df.mean() - df.std() * 3
std_outliers = df[(df['timelen'] < -53111.819884) | (df['timelen'] > 72202.006197)]
len(std_outliers)
std_outliers.head()
df[df['timelen'] >= 0].mean() + df[df['timelen'] >= 0].std() * 3
df[df['timelen'] >= 0].mean() - df[df['timelen'] >= 0].std() * 3
std_outliers = df[df['timelen'] > 75447.537602]
len(std_outliers)
std_outliers.head()
IQR = df.quantile(q=0.75) - df.quantile(q=0.25)
IQR
IQR2 = df[df['timelen'] >= 0].quantile(q=0.75) - df[df['timelen'] >= 0].quantile(q=0.25)
IQR2
df.quantile(q=0.25) - 1.5 * IQR
df.quantile(q=0.75) + 1.5 * IQR
IQR_outliers = df[(df['timelen'] < -8501.2125) | (df['timelen'] > 14366.020833)]
len(IQR_outliers)
IQR_outliers.head()
df[df['timelen'] >= 0].quantile(q=0.25) - 1.5 * IQR2
df[df['timelen'] >= 0].quantile(q=0.75) + 1.5 * IQR2
IQR_outliers2 = df[(df['timelen'] > 14366.020833)]
len(IQR_outliers2)
IQR_outliers2.head()
plt.boxplot(df['timelen'])
plt.boxplot(df[df['timelen'] >= 0]['timelen'])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What does this data look like?
Step3: What are the column names?
Step5: What does a created and closed date look like?
Step7: Okay, let's just save the created and closed dates.
Step8: Exploring how to convert them into the length of time the complaints are open.
Step9: Okay, let's make a list of how many minutes each one of these is open!
Step10: 201 or 1.7% of the items didn't have both a closed and created time
Step11: And at least 1103 of the results are probably not accurate.
Step12: Let's turn this into a Pandas DataFrame.
Step13: Okay, let's make a histogram showing how many incidents happen.
Step14: Looks like those 1103 0-minute cases are all outliers that are going to mess things up.
Step15: Median
Step16: Mode
Step17: Measures of Spread
Step18: Standard Deviation
Step19: If you think outliers are numbers three standard deviations away from the mean, there are 68 outliers, but it doesn't catch all of the negative values.
Step20: Interquartile Range
Step21: If you think outliers are 1.5 times the interquartile range above the 3rd quartile or below the 1st quartile, there are 1067 outliers, more than with the standard deviation outliers, but still not getting rid of the negative values.
Step22: Box and Whiskers
|
14,338 | <ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow import keras
class CustomModel(keras.Model):
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
import numpy as np
# Construct and compile an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
# Just use `fit` as usual
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.fit(x, y, epochs=3)
loss_tracker = keras.metrics.Mean(name="loss")
mae_metric = keras.metrics.MeanAbsoluteError(name="mae")
class CustomModel(keras.Model):
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute our own loss
loss = keras.losses.mean_squared_error(y, y_pred)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Compute our own metrics
loss_tracker.update_state(loss)
mae_metric.update_state(y, y_pred)
return {"loss": loss_tracker.result(), "mae": mae_metric.result()}
@property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch
# or at the start of `evaluate()`.
# If you don't implement this property, you have to call
# `reset_states()` yourself at the time of your choosing.
return [loss_tracker, mae_metric]
# Construct an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
# We don't passs a loss or metrics here.
model.compile(optimizer="adam")
# Just use `fit` as usual -- you can use callbacks, etc.
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.fit(x, y, epochs=5)
class CustomModel(keras.Model):
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
if len(data) == 3:
x, y, sample_weight = data
else:
sample_weight = None
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value.
# The loss function is configured in `compile()`.
loss = self.compiled_loss(
y,
y_pred,
sample_weight=sample_weight,
regularization_losses=self.losses,
)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the metrics.
# Metrics are configured in `compile()`.
self.compiled_metrics.update_state(y, y_pred, sample_weight=sample_weight)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
# Construct and compile an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
# You can now use sample_weight argument
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
sw = np.random.random((1000, 1))
model.fit(x, y, sample_weight=sw, epochs=3)
class CustomModel(keras.Model):
def test_step(self, data):
# Unpack the data
x, y = data
# Compute predictions
y_pred = self(x, training=False)
# Updates the metrics tracking the loss
self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Update the metrics.
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
# Construct an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(loss="mse", metrics=["mae"])
# Evaluate with our custom test_step
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.evaluate(x, y)
from tensorflow.keras import layers
# Create the discriminator
discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
# Create the generator
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
layers.Dense(7 * 7 * 128),
layers.LeakyReLU(alpha=0.2),
layers.Reshape((7, 7, 128)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
class GAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super(GAN, self).__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
def compile(self, d_optimizer, g_optimizer, loss_fn):
super(GAN, self).compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
def train_step(self, real_images):
if isinstance(real_images, tuple):
real_images = real_images[0]
# Sample random points in the latent space
batch_size = tf.shape(real_images)[0]
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
# Decode them to fake images
generated_images = self.generator(random_latent_vectors)
# Combine them with real images
combined_images = tf.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(tf.shape(labels))
# Train the discriminator
with tf.GradientTape() as tape:
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
# Assemble labels that say "all real images"
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = self.discriminator(self.generator(random_latent_vectors))
g_loss = self.loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, self.generator.trainable_weights)
self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
return {"d_loss": d_loss, "g_loss": g_loss}
# Prepare the dataset. We use both the training & test MNIST digits.
batch_size = 64
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
dataset = tf.data.Dataset.from_tensor_slices(all_digits)
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)
gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
loss_fn=keras.losses.BinaryCrossentropy(from_logits=True),
)
# To limit the execution time, we only train on 100 batches. You can train on
# the entire dataset. You will need about 20 epochs to get nice results.
gan.fit(dataset.take(100), epochs=1)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A first simple example
Step2: Let's try this out
Step3: Going lower-level
Step4: Supporting sample_weight & class_weight
Step5: Providing your own evaluation step
Step6: Wrapping up
Step7: Here's a feature-complete GAN class, overriding compile() to use its own signature,
Step8: Let's test-drive it
|
14,339 | <ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
# Dictionary to go from the words to an id, we'll call vocab_to_int
vocab_to_int = {word: i for i,word in enumerate(set(text))}
# Dictionary to go from the id to word, we'll call int_to_vocab
int_to_vocab = {i:word for i, word in enumerate(set(text))}
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
# TODO: Implement Function
return {'.':'||Period||',
',':'||Comma||',
'"':'||Quotation_Mark||',
';':'||Semicolon||',
'?':'||Question_mark||',
'!':'||Exclamation_mark||',
'(':'||Left_Parentheses||',
')':'||Right_Parentheses||',
'--':'||Dash||',
'\n':'||Return||'}
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
# Input text placeholder named "input" using the TF Placeholder name parameter.
Input = tf.placeholder(tf.int32,shape=[None, None],name="input")
# Targets placeholder
Targets = tf.placeholder(tf.int32,shape=[None, None],name="targets")
# Learning Rate placeholder
LearningRate = tf.placeholder(tf.float32,name="learning_rate")
return Input, Targets, LearningRate
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
# TODO: Implement Function
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
#drop = tf.contrib.rnn.DropoutWrapper(lstm)
Cell = tf.contrib.rnn.MultiRNNCell([lstm]*1)
InitialState = Cell.zero_state(batch_size,tf.float32)
InitialState = tf.identity(InitialState, name = "initial_state")
return Cell, InitialState
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform([vocab_size, embed_dim]))
embed = tf.nn.embedding_lookup(embedding,input_data)
return embed
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
# TODO: Implement Function
# Build the RNN using the tf.nn.dynamic_rnn()
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype = tf.float32)
#Apply the name "final_state" to the final state using tf.identity()
final_state = tf.identity(final_state, name="final_state")
return outputs, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
# TODO: Implement Function
# Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
embed = get_embed(input_data, vocab_size, embed_dim)
# Build RNN using cell and your build_rnn(cell, inputs) function.
rnn, FinalState = build_rnn(cell, embed)
# Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Logits = tf.contrib.layers.fully_connected(
inputs = rnn,
num_outputs = vocab_size,
activation_fn = None)
return Logits, FinalState
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# TODO: Implement Function
# Compute nb of batchs
num_batches = int(len(int_text) / batch_size / seq_length)
#print(len(int_text), batch_size, seq_length,num_batches)
# Extract input_data and target_data
input_vector = np.array(int_text[:num_batches * batch_size * seq_length])
target_vector = np.array(int_text[1:(num_batches * batch_size * seq_length)+1])
#print(len(input_vector))
# Notice that the last target value in the last batch is the first input value of the first batch.
target_vector[-1] = input_vector[0]
# reshape to batch size
inputs = input_vector.reshape(batch_size, -1)
targets = target_vector.reshape(batch_size, -1)
#print(inputs.shape)
# split secquences
batch_inputs = np.array(np.split(inputs, num_batches, 1))
batch_targets = np.array(np.split(targets, num_batches, 1))
#print(batch_inputs[0].shape)
# concatenate inputs and targets batches
Batches = np.array(list(zip(batch_inputs, batch_targets)))
return Batches
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 512
# Embedding Dimension Size
embed_dim = 300
# Sequence Length
seq_length = 5
# Learning Rate
learning_rate = 0.005
# Show stats for every n number of batches
show_every_n_batches = 100
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
InputTensor = loaded_graph.get_tensor_by_name(name = "input:0")
InitialStateTensor = loaded_graph.get_tensor_by_name(name = "initial_state:0")
FinalStateTensor = loaded_graph.get_tensor_by_name(name = "final_state:0")
ProbsTensor = loaded_graph.get_tensor_by_name(name = "probs:0")
return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
# First "bad" idea
#return int_to_vocab[np.argmax(probabilities)]
# Use of slight bit of randomness is helpful when predicting the next word. Otherwise, the predictions might fall into a loop of the same words.
word_idx = np.random.choice(len(probabilities), size=1, p=probabilities)[0]
pred_word = int_to_vocab[word_idx]
return pred_word
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
14,340 | <ASSISTANT_TASK:>
Python Code:
# WELL
# all valves closed
st = 'A01'
x1,y1,z1 = locs[st]
Z.move(42)
XY.move_xy(x1,y1)
Z.move(z1)
log.append([time.ctime(time.time()), 'AT '+st])
# ACQUIRE 120 frames 11000 ms
# OPEN Hep_1 + W_1 (tree in + out)
log.append([time.ctime(time.time()), 'OPEN tree in + out'])
# flow 20 min (fill tube + tree)
# ACQUIRE 120 frames 5000 ms
# CLOSE W_1 (tree out); OPEN in_1 + out_1 (chip in + out)
log.append([time.ctime(time.time()), 'CLOSE tree out; OPEN chip in + out'])
# flow 10 min (fill chip)
# CLOSE out_1 + in_1 (chip out + in); CLOSE Hep_1 (tree in)
log.append([time.ctime(time.time()), 'CLOSE chip out + in; CLOSE tree in'])
# incubate... WASH in meantime
# WASH
Z.move(42)
XY.move_xy(-8.9473,1.0592)
Z.move(62)
log.append([time.ctime(time.time()), 'AT wash'])
import time
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# config director must have "__init__.py" file
# from the 'config' directory, import the following classes:
from config import Motor, ASI_Controller, Autosipper
from config import utils as ut
autosipper = Autosipper(Motor('config/motor.yaml'), ASI_Controller('config/asi_controller.yaml'))
autosipper.coord_frames
# add/determine deck info
autosipper.coord_frames.deck.position_table = ut.read_delim_pd('config/position_tables/deck')
# check deck alignment
# CLEAR DECK OF OBSTRUCTIONS!!
autosipper.go_to('deck', ['name'],'align')
# add plate
from config import Manifold
manifold = Manifold('192.168.1.3', 'config/valvemaps/valvemap.csv', 512)
manifold.valvemap[manifold.valvemap.name>0]
# !!!! Also must have MM folder on system PATH
# mm_version = 'C:\Micro-Manager-1.4'
# cfg = 'C:\Micro-Manager-1.4\SetupNumber2_05102016.cfg'
mm_version = 'C:\Program Files\Micro-Manager-2.0beta'
cfg = 'C:\Program Files\Micro-Manager-2.0beta\Setup2_20170413.cfg'
import sys
sys.path.insert(0, mm_version) # make it so python can find MMCorePy
import MMCorePy
from PIL import Image
core = MMCorePy.CMMCore()
core.loadSystemConfiguration(cfg)
core.setProperty("Spectra", "White_Enable", "1")
core.waitForDevice("Spectra")
core.setProperty("Cam Andor_Zyla4.2", "Sensitivity/DynamicRange", "16-bit (low noise & high well capacity)") # NEED TO SET CAMERA TO 16 BIT (ceiling 12 BIT = 4096)
# core.initializeCircularBuffer()
# core.setCircularBufferMemoryFootprint(4096) # MiB
core.setConfig('Channel','1_PBP')
core.setProperty(core.getCameraDevice(), "Exposure", 300)
core.snapImage()
img = core.getImage()
plt.imshow(img,cmap='gray')
image = Image.fromarray(img)
image.save('TESTIMAGE.tif')
for i in range(5):
x = core.getXPosition()
y = core.getYPosition()
core.setXYPosition(x-1500,y)
core.waitForDevice(core.getXYStageDevice())
core.snapImage()
img = core.getImage()
image = Image.fromarray(img)
image.save('images/images_{}.tif'.format(i))
core.getFocusDevice()
core.getCameraDevice()
core.XYStageDevice()
core.getDevicePropertyNames(core.getCameraDevice())
# cv2.startWindowThread()
cv2.namedWindow('Video')
cv2.imshow('Video',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
core.stopSequenceAcquisition()
import cv2
cv2.namedWindow('Video')
core.startContinuousSequenceAcquisition(1)
while True:
img = core.getLastImage()
if core.getRemainingImageCount() > 0:
# img = core.popNextImage()
img = core.getLastImage()
cv2.imshow('Video', img)
cv2.waitkey(0)
else:
print('No frame')
if cv2.waitKey(20) >= 0:
break
cv2.destroyAllWindows()
core.stopSequenceAcquisition()
# core.reset()
autosipper.exit()
manifold.exit()
core.unloadAllDevices()
core.reset()
print 'closed'
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SETUP
Step2: Autosipper
Step3: Manifold
Step4: Micromanager
Step5: Preset
Step6: MM Get info
Step7: Video
Step8: EXIT
|
14,341 | <ASSISTANT_TASK:>
Python Code:
# Imports
from __future__ import print_function
from convert_frcmod import *
import openeye.oechem as oechem
import openeye.oeiupac as oeiupac
import openeye.oeomega as oeomega
import openeye.oedepict as oedepict
from IPython.display import display
from openff.toolkit.typing.engines.smirnoff.forcefield import *
from openff.toolkit.typing.engines.smirnoff.forcefield_utils import get_molecule_parameterIDs
from openff.toolkit.utils import *
% matplotlib inline
import matplotlib
import numpy as np
import pylab as pl
import matplotlib.pyplot as plt
import time
import IPython
import pickle
import glob
def depictAtomByIdx(mol_copy, atomIdxList, supH = True, width=900, height=500):
mol = oechem.OEMol(mol_copy)
OEGenerate2DCoordinates(mol)
atomBondSet = oechem.OEAtomBondSet()
for atom in mol.GetAtoms():
if atom.GetIdx() in atomIdxList:
atomBondSet.AddAtom( atom)
for bond in atom.GetBonds():
nbrAtom = bond.GetNbr(atom)
nbrIdx = nbrAtom.GetIdx()
if (nbrIdx in atomIdxList) and nbrIdx>atom.GetIdx():
atomBondSet.AddBond( bond)
from IPython.display import Image
dopt = oedepict.OEPrepareDepictionOptions()
dopt.SetDepictOrientation( oedepict.OEDepictOrientation_Horizontal)
dopt.SetSuppressHydrogens(supH)
oedepict.OEPrepareDepiction(mol, dopt)
opts = oedepict.OE2DMolDisplayOptions(width, height, oedepict.OEScale_AutoScale)
disp = oedepict.OE2DMolDisplay(mol, opts)
aroStyle = oedepict.OEHighlightStyle_Color
aroColor = oechem.OEColor(oechem.OEGrey)
oedepict.OEAddHighlighting(disp, aroColor, aroStyle,
oechem.OEIsAromaticAtom(), oechem.OEIsAromaticBond() )
hstyle = oedepict.OEHighlightStyle_BallAndStick
hcolor = oechem.OEColor(oechem.OELightGreen)
oedepict.OEAddHighlighting(disp, hcolor, hstyle, atomBondSet)
#ofs = oechem.oeosstream()
img = oedepict.OEImage(width, height)
oedepict.OERenderMolecule(img, disp)
#oedepict.OERenderMolecule(ofs, 'png', disp)
#ofs.flush()
#return Image(data = "".join(ofs.str()))
return Image(oedepict.OEWriteImageToString("png",img))
def getMolParamIDToAtomIndex( oemol, ff):
Take an OEMol and a SMIRNOFF force field object and return a dictionary,
keyed by parameter ID, where each entry is a tuple of
( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS
corresponding to that parameter ID and a list of the atom groups in that
molecule that parameter is applied to.
Parameters
----------
oemol : OEMol
OpenEye OEMol with the molecule to investigate.
ff : ForceField
SMIRNOFF ForceField object (obtained from an ffxml via ForceField(ffxml)) containing FF of interest.
Returns
-------
param_usage : dictionary
Dictionary, keyed by parameter ID, where each entry is a tuple of
( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS
corresponding to that parameter ID and a list of the atom groups in
that molecule that parameter is applied to.
labels = ff.labelMolecules([oemol])
param_usage = {}
for mol_entry in range(len(labels)):
for force in labels[mol_entry].keys():
for (atom_indices, pid, smirks) in labels[mol_entry][force]:
if not pid in param_usage:
param_usage[pid] = (smirks, [atom_indices])
else:
param_usage[pid][1].append( atom_indices )
return param_usage
def labels_to_pidDict(labels):
This method takes a set of SMIRNOFF force field labels and returns
a dictionary with information for each molecule at each force type
in the form:
{ force_type: {mol_index: {(indice tuple): pid, ...}, ... } }
force_type_dict = dict()
for idx, mol_dict in enumerate(labels):
for force_type, label_set in mol_dict.items():
if not force_type in force_type_dict:
force_type_dict[force_type] = dict()
force_type_dict[force_type][idx] = dict()
for (indices, pid, smirks) in label_set:
force_type_dict[force_type][idx][tuple(indices)] = {'pid': pid, 'smirks':smirks}
return force_type_dict
# Input and output info
#infile = 'example.frcmod' # smirnoffish frcmod file to convert
infile = 'smirnoffishFrcmod.parm99Frosst.txt' # smirnoffish frcmod file to convert
ffxmlFile = 'smirnoff99FrosstFrcmod.offxml'
template = 'template.offxml' # Template FFXML file without parameters (but with remainder of contents)
# Convert
# Already converted
convert_frcmod_to_ffxml( infile, template, ffxmlFile)
# Load SMIRNOFF FFXML
test_ff = ForceField(ffxmlFile) # We will use this below to access details of parameters
ref_ff = ForceField('test_forcefields/smirnoff99Frosst.offxml')
molecule_file = "DrugBank_tripos.mol2"
molecules = utils.read_molecules(molecule_file)
init = time.time()
test_labels = test_ff.labelMolecules(molecules)
ref_labels = ref_ff.labelMolecules(molecules)
t = (time.time() - init) / 60.0
print("Typed %i molecules with test and reference force fields in %.2f minutes" % (len(molecules), t))
# Make dictionary by molecule and tuple indices
init = time.time()
test_dict = labels_to_pidDict(test_labels)
ref_dict = labels_to_pidDict(ref_labels)
t = (time.time() - init) / 60.0
print("created indices tuple to pid dictionaries in %.2f minutes" % t)
# Make a dictionary to store mismatches:
mismatch = dict()
# This will have embedded dictionaries with this form:
# force_type: {mol_idx:{(index tuple): {test_pid, test_smirks, ref_pid, ref_smirks}}}
mismatch_count = dict()
# loop through force types
for force_type, test_mol_dict in test_dict.items():
if force_type not in mismatch:
mismatch[force_type] = dict()
if force_type not in mismatch_count:
mismatch_count[force_type] = 0
# loop through molecules in each force type
for mol_idx, test_tuple_dict in test_mol_dict.items():
if not mol_idx in mismatch[force_type]:
mismatch[force_type][mol_idx] = dict()
# loop through all atom indice tuples in this molecule
for indice_tuple, test_info in test_tuple_dict.items():
# compare pid assignment
test_pid = test_info['pid']
ref_pid = ref_dict[force_type][mol_idx][indice_tuple]['pid']
# if they don't match store info in mismatch dictionary and update count
if test_pid != ref_pid:
test_smirks = test_info['smirks']
ref_smirks = ref_dict[force_type][mol_idx][indice_tuple]['smirks']
mismatch[force_type][mol_idx][indice_tuple] = {'test_pid': test_pid, 'test_smirks': test_smirks,
'ref_pid': ref_pid, 'ref_smirks': ref_smirks}
mismatch_count[force_type] +=1
print("%-35s %s" % ("Force Type", "Number mismatches"))
print("-"*55)
for force_type, count in mismatch_count.items():
print("%-35s %i" % (force_type, count))
ForceType = "PeriodicTorsionGenerator"
for mol_idx, tuple_dict in mismatch[ForceType].items():
# only visualize molecules with mismatch indices
keys = [k for k in tuple_dict.keys()]
if len(keys) == 0:
continue
mol = OEMol(molecules[mol_idx])
print("Looking at molecule %i" % mol_idx)
for indice_tuple, pid_info in tuple_dict.items():
test_pid = pid_info['test_pid']
test_smirks = pid_info['test_smirks']
ref_pid = pid_info['ref_pid']
ref_smirks = pid_info['ref_smirks']
print("%-10s %-40s %-40s" % ('', 'test force field', 'reference force field'))
print("%-10s %-40s %-40s" % ('pid'))
print("%-10s %-30s %-10s %-30s" % (test_pid, test_smirks, ref_pid, ref_smirks))
display(depictAtomByIdx(mol, indice_tuple, supH = False))
print("\n")
print("\n")
print("-"*100)
print("\n")
# loop through force types
for force_type, test_mol_dict in test_dict.items():
# loop through molecules in each force type
for mol_idx, test_tuple_dict in test_mol_dict.items():
# loop through all atom indice tuples in this molecule
for indice_tuple, test_info in test_tuple_dict.items():
# compare pid assignment
test_pid = test_info['pid']
test_smirks = test_info['smirks']
# Check for 'R'
if 'R' in test_smirks:
print("Found 'R' in %s (%s)" % )
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Relevant methods
Step4: 1. Convert specified SMIRKS frcmod file to SMIRNOFF FFXML
Step5: 2. Load smirnoff99Frosst from current release
Step6: 3. Generate or take in a set of molecules in OpenEye OEMol format
Step7: 4. Identify any molecules not assigned the same parameters by both force fields
Step8: 5. Visualize mismatches by force type
Step9: Extra check for R
|
14,342 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
from sklearn import svm
from sklearn import preprocessing
# Define a useful helper function to read in our PCL files and store the gene names,
# matrix of values, and sample names
# We'll use this function later, but we don't need to dig into how it works here.
def read_dataset(filename):
data_fh = open(filename)
samples = data_fh.readline().strip().split('\t') # sample ids tab delimited
gids = [] # gene ids will be stored here
genes_samples = [] # genes x samples -- gene major
for line in data_fh:
toks = line.strip().split('\t')
gids.append(toks[0]) # add gene id
vals = [float(x) for x in toks[1:]]
zarray = preprocessing.scale(vals) # make each gene's expression values comparable
genes_samples.append(zarray)
data_fh.close()
#because we want samples x genes instead of genes x samples, we need to transpose
samples_genes = np.transpose(np.array(genes_samples))
return {'genes': gids, 'matrix': samples_genes, 'samples': samples}
# Use the function that we defined to read in our dataset
bric = read_dataset('../29_Data_ML-II/METABRIC_dataset.pcl')
# Now we need to figure out which samples in metabric are tumors and which are normal.
# We will store this in status_list (matching each example in the dataset), so that we
# can provide this to scikit learn's SVM implementation.
status = {} # hold tumor/normal status encoded as 1 (tumor)/2 (normal)
label_fh = open('tumor_normal_label.txt')
for line in label_fh:
toks = line.strip().split()
if toks[1] == 'Tumor':
status[toks[0]] = 1
elif toks[1] == 'Normal':
status[toks[0]] = 2
status_list = []
for sample in bric['samples']:
status_list.append(status[sample])
# Now we're going to construct a classifier. First we need to set up our parameters
svm_classifier = svm.SVC(C=0.000001, kernel='linear')
# Once our parameters are set, we can fit the classifier to our data
svm_classifier.fit(bric['matrix'], status_list)
# Once we have our classifier, we can apply it back to the examples and get our score
# Since this is binary classification. We get an accuracy.
score = svm_classifier.score(bric['matrix'], status_list)
print("Training Accuracy: " + str(score))
## Load necessary Python packages
import numpy as np # numpy makes it convenient to load/modify matrices of data
import sklearn.linear_model as lm # this scikit learn module has code to fit a line
import matplotlib.pyplot as plt # this lets us plot our results
from sklearn.metrics import mean_squared_error # we use this to see how well our model fits data
%matplotlib inline
# This code will make our data by adding random noise to a linear relationship
# Simulate two variables x and y
# y=x+e, e is some noise
x = np.linspace(0., 2, 10)
y = x + 0.5*np.random.randn(len(x))
# This uses matplotlib to show points. You've seen a little bit of this before in the kmeans code
# We're using it for examples but you don't have to understand how this works.
# If you one day want to plot your results using python, you might want to keep this code
# as a reference.
plt.figure(figsize=(8,6))
plt.scatter(x[:100], y[:100])
plt.xlabel("x")
plt.ylabel("y")
#plt.plot(x, y)
# You don't need to know how this code works. We're not going to focus on regression
# during this course. You may want to have it to refer to in the future.
### simple regression
lr = lm.LinearRegression()
lr.fit(x[:,np.newaxis], y);
y_lr = lr.predict(x[:, np.newaxis])
### multiple regression
lrp = lm.LinearRegression()
lrp.fit(np.vander(x, N=10, increasing=True), y)
y_lrp = lrp.predict(np.vander(x, N=10, increasing=True))
x_plot = np.linspace(0., 2, 1000)
y_plot = lrp.predict(np.vander(x_plot, N=10, increasing=True))
plt.figure(figsize=(8,6))
plt.scatter(x, y)
plt.plot(x, y_lr, 'g',label='Simple regression')
plt.title("Linear regression")
plt.plot(x_plot, y_plot,label='Multiple regression')
plt.legend(loc=2)
mean_squared_error(y, y_lr)
mean_squared_error(y, y_lrp)
x_new = np.linspace(0., 2, 10)
y_new = x + 0.5*np.random.randn(len(x_new))
y_lr_new = lr.predict(x_new[:, np.newaxis])
y_lrp_new = lrp.predict(np.vander(x_new, N=10, increasing=True))
plt.figure(figsize=(8,6))
plt.scatter(x_new, y_new)
plt.plot(x, y_lr, 'g',label='Simple regression')
plt.title("Linear regression")
plt.plot(x_plot, y_plot,label='Multiple regression')
plt.legend(loc=2)
mean_squared_error(y_new, y_lr_new)
mean_squared_error(y_new, y_lrp_new)
# Let's read in the dataset and mark examples as tumor or normal depending on
# how they are annotated the sample description file (BRCA.547.PAM50.SigClust.Subtypes.txt)
tcga = read_dataset('../29_Data_ML-II/TCGA_dataset.pcl')
tcga_status = {} # hol tumor/normal status encoded as 1 (tumor)/2 (normal)
label_fh = open('BRCA.547.PAM50.SigClust.Subtypes.txt')
for line in label_fh:
toks = line.strip().split()
if toks[1] == 'tumor-adjacent normal':
tcga_status[toks[0]] = 2
else:
tcga_status[toks[0]] = 1
tcga_status_list = []
for sample in tcga['samples']:
tcga_status_list.append(tcga_status[sample])
# The first lines here are just the code from above copied down for convenience.
# Now we're going to construct a classifier. First we need to set up our parameters
svm_classifier = svm.SVC(C=0.000000001, kernel='linear')
# Once our parameters are set, we can fit the classifier to our data
svm_classifier.fit(bric['matrix'], status_list)
# Once we have our classifier, we can apply it back to the examples and get our score
# Since this is binary classification. We get an accuracy.
score = svm_classifier.score(bric['matrix'], status_list)
print("Training Accuracy: " + str(score))
# Ok - now let's apply our classifier from before to these data:
tcga_score = svm_classifier.score(tcga['matrix'], tcga_status_list)
print("Testing Accuracy: " + str(tcga_score))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Congratulations! You've built your first SVM, and on training data it separates tumor data from normal data with over 90% accuracy! Now that we've done this with some biomedical data, let's take a step back and talk about things we should consider as we build a model.
Step2: Let's plot the data. The code in the box below will do this. As we can see, the relation between x and y is linear but with some random noise.
Step3: Next, we want to train linear regression models on x and use the models to predict y. The models we are going to use are
Step4: Let's plot the fitting results.
Step5: Let's calculate the MSE for simple regression model
Step6: Let's calculate the MSE for multiple regression model
Step7: The multiple regression model fits the data perferlly (MSE is almost 0). The predicted values are the exact the same as the observed values since the prediction curve goes through every point. However, the simple regression model captures the linear relation between x and y but it didn't predict perfectlly well with the observed values. Then, shoud we choose multiple regression model rather than simple regression model since the former fitts the data much better than the latter?
Step8: Let's plot the old models applied to the new data.
Step9: MSE for simple regression on new data
Step10: MSE for multiple regression on new data
Step11: The multiple regression model will almost certainly perform worse than simple regression model on the new data (we don't know for sure in your case, because new data are simulated each time - check with your neighbors to see what they get as well, or feel free to clear and re-run the code to see another example). This is because the multiple regression model overfits the training data. It captures not only the true linear relation between x and y but also the random noise. However, simple regression only captures linear relation.
|
14,343 | <ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
DO NOT MODIFY THIS CELL
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
DO NOT MODIFY THIS CELL
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)
return conv_layer
DO NOT MODIFY THIS CELL
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units, is_training):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=None)
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, is_training):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=None)
conv_layer = tf.layers.batch_normalization(conv_layer, is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Add placeholder to indicate whether or not we are training the model
is_training = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
in_channels = prev_layer.get_shape().as_list()[3]
out_channels = layer_depth*4
weights = tf.Variable(
tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))
bias = tf.Variable(tf.zeros(out_channels))
conv_layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME')
conv_layer = tf.nn.bias_add(conv_layer, bias)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Batch Normalization using tf.layers.batch_normalization<a id="example_1"></a>
Step6: We'll use the following function to create convolutional layers in our network. They are very basic
Step8: Run the following cell, along with the earlier cells (to load the dataset and define the necessary functions).
Step10: With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)
Step12: TODO
Step13: TODO
Step15: With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output
Step17: TODO
Step18: TODO
|
14,344 | <ASSISTANT_TASK:>
Python Code:
from azure.identity import AzureCliCredential
from azure.digitaltwins.core import DigitalTwinsClient
# using yaml instead of
import yaml
import uuid
# using altair instead of matplotlib for vizuals
import numpy as np
import pandas as pd
# you will get this from the ADT resource at portal.azure.com
your_digital_twin_url = "home-test-twin.api.wcus.digitaltwins.azure.net"
azure_cli = AzureCliCredential()
service_client = DigitalTwinsClient(
your_digital_twin_url, azure_cli)
service_client
def query_ADT(query):
query_result = service_client.query_twins(query)
values = [i for i in query_result]
return values
def query_to_df(query):
query_result = query_ADT(query)
values = pd.DataFrame(query_result)
return values
query_expression = "SELECT * FROM digitaltwins"
query_to_df(query_expression)
query_expression = "SELECT * FROM digitaltwins where IS_OF_MODEL('dtmi:mymodels:patron;1')"
customers = query_to_df(query_expression)
customers.satisfaction.describe()
query_expression =
SELECT T, CT
FROM DIGITALTWINS T
JOIN CT RELATED T.locatedIn
WHERE CT.$dtId = 'line-2'
customers_in_area_2 = query_to_df(query_expression)
customers_in_area_2
customers_in_area_2.loc[0]
customers_in_area_2.loc[0,'CT']
customers_in_area_2.loc[0,'T']
l2_cust = pd.DataFrame(customers_in_area_2['T'].tolist())
l2_cust
l2_cust.satisfaction.describe()
customers.satisfaction.describe()
query =
SELECT COUNT()
FROM DIGITALTWINS T
JOIN CT RELATED T.locatedIn
WHERE CT.$dtId IN ['line-0','line-1','line-2', 'line-3']
customers_in_lines = query_to_df(query)
customers_in_lines
query =
SELECT COUNT()
FROM DIGITALTWINS T
JOIN CT RELATED T.locatedIn
WHERE CT.$dtId IN ['line-2']
customers_in_lines = query_to_df(query)
customers_in_lines
query =
SELECT line, customer
FROM DIGITALTWINS customer
JOIN line RELATED customer.locatedIn
WHERE line.$dtId IN ['line-0','line-1','line-2', 'line-3']
AND IS_OF_MODEL(customer, 'dtmi:mymodels:patron;1')
customers_in_lines = query_to_df(query)
customers_in_lines
c_in_line = pd.concat(
[pd.DataFrame(customers_in_lines['line'].tolist()),
pd.DataFrame(customers_in_lines['customer'].tolist())],
axis=1
)
cols = c_in_line.columns.tolist()
cols[0] = 'line'
cols[4] = 'customer'
c_in_line.columns = cols
c_in_line
c_in_line.groupby('line').count()['customer']
c_in_line.groupby('line').mean()['satisfaction']
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I'm going to set up a generic function that runs queries and gets the data. This will keep me from doing it over and over.
Step3: Note that the larger query will give you back all of the values, so you can pop it into a dataframe and filter on the $metadata to get the values you want
Step4: Ok let's unpack that
Step5: So let's look at the customers in line-2
Step8: Customers in line 2 have higher satisfaction than customers in general.
Step10: The rough part is that you can only get one count back, not a count per line, like you could with propper SQL. You also have to hard code all of your $dtID as they require literal values. Lame.
Step11: Easy enough to munge it into a dataframe
Step12: How many people are in each line
Step13: Which group of people has the highest satisfaction?
|
14,345 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.covariance import LedoitWolf
import seaborn as sns
import statsmodels.api as sm
tickers = ['WFC', 'JPM', 'USB', 'XOM', 'BHI', 'SLB'] # The securities we want to go long on
historical_prices = get_pricing(tickers, start_date='2015-01-01',end_date='2017-02-22') # Obtain prices
rets = historical_prices['close_price'].pct_change().fillna(0) # Calculate returns
lw_cov = LedoitWolf().fit(rets).covariance_ # Calculate Ledoit-Wolf estimator
def extract_corr_from_cov(cov_matrix):
# Linear algebra result:
# https://math.stackexchange.com/questions/186959/correlation-matrix-from-covariance-matrix
d = np.linalg.inv(np.diag(np.sqrt(np.diag(cov_matrix))))
corr = d.dot(cov_matrix).dot(d)
return corr
fig, (ax1, ax2) = plt.subplots(ncols=2)
fig.tight_layout()
corr = extract_corr_from_cov(lw_cov)
# Plot prices
left = historical_prices['close_price'].plot(ax=ax1)
# Plot covariance as a heat map
right = sns.heatmap(corr, ax=ax2, fmt='d', vmin=-1, vmax=1, xticklabels=tickers, yticklabels=tickers)
average_corr = np.mean(corr[np.triu_indices_from(corr, k=1)])
print 'Average pairwise correlation: %.4f' % average_corr
tickers = ['WFC', 'JPM', 'USB', 'SPY', 'XOM', 'BHI', 'SLB' ] # The securities we want to go long on plus SPY
historical_prices = get_pricing(tickers, start_date='2015-01-01',end_date='2017-02-22') # Obtain prices
rets = historical_prices['close_price'].pct_change().fillna(0) # Calculate returns
market = rets[symbols(['SPY'])]
stock_rets = rets.drop(symbols(['SPY']), axis=1)
residuals = stock_rets.copy()*0
for stock in stock_rets.columns:
model = sm.OLS(stock_rets[stock], market.values)
results = model.fit()
residuals[stock] = results.resid
lw_cov = LedoitWolf().fit(residuals).covariance_ # Calculate Ledoit-Wolf Estimator
fig, (ax1, ax2) = plt.subplots(ncols=2)
fig.tight_layout()
corr = extract_corr_from_cov(lw_cov)
left = (1+residuals).cumprod().plot(ax=ax1)
right = sns.heatmap(corr, ax=ax2, fmt='d', vmin=-1, vmax=1, xticklabels=tickers, yticklabels=tickers)
average_corr = np.mean(corr[np.triu_indices_from(corr, k=1)])
print 'Average pairwise correlation: %.4f' % average_corr
tickers = ['WFC', 'JPM', 'USB', 'XLF', 'SPY', 'XOM', 'BHI', 'SLB', 'XLE']
historical_prices = get_pricing(tickers, start_date='2015-01-01',end_date='2017-02-22')
rets = historical_prices['close_price'].pct_change().fillna(0)
# Get market hedge ticker
mkt = symbols(['SPY'])
# Get sector hedge tickers
sector_1_hedge = symbols(['XLF'])
sector_2_hedge = symbols(['XLE'])
# Identify securities for each sector
sector_1_stocks = symbols(['WFC', 'JPM', 'USB'])
sector_2_stocks = symbols(['XOM', 'BHI', 'SLB'])
market_rets = rets[mkt]
sector_1_rets = rets[sector_1_hedge]
sector_2_rets = rets[sector_2_hedge]
stock_rets = rets.drop(symbols(['XLF', 'SPY', 'XLE']), axis=1)
residuals_market = stock_rets.copy()*0
residuals = stock_rets.copy()*0
# Calculate market beta of sector 1 benchmark
model = sm.OLS(sector_1_rets.values, market.values)
results = model.fit()
sector_1_excess = results.resid
# Calculate market beta of sector 2 benchmark
model = sm.OLS(sector_2_rets.values, market.values)
results = model.fit()
sector_2_excess = results.resid
for stock in sector_1_stocks:
# Calculate market betas for sector 1 stocks
model = sm.OLS(stock_rets[stock], market.values)
results = model.fit()
# Calculate residual of security + market hedge
residuals_market[stock] = results.resid
# Calculate sector beta for previous residuals
model = sm.OLS(residuals_market[stock], sector_1_excess)
results = model.fit()
# Get final residual
residuals[stock] = results.resid
for stock in sector_2_stocks:
# Calculate market betas for sector 2 stocks
model = sm.OLS(stock_rets[stock], market.values)
results = model.fit()
# Calculate residual of security + market hedge
residuals_market[stock] = results.resid
# Calculate sector beta for previous residuals
model = sm.OLS(residuals_market[stock], sector_2_excess)
results = model.fit()
# Get final residual
residuals[stock] = results.resid
# Get covariance of residuals
lw_cov = LedoitWolf().fit(residuals).covariance_
fig, (ax1, ax2) = plt.subplots(ncols=2)
fig.tight_layout()
corr = extract_corr_from_cov(lw_cov)
left = (1+residuals).cumprod().plot(ax=ax1)
right = sns.heatmap(corr, ax=ax2, fmt='d', vmin=-1, vmax=1, xticklabels=tickers, yticklabels=tickers)
average_corr = np.mean(corr[np.triu_indices_from(corr, k=1)])
print 'Average pairwise correlation: %.4f' % average_corr
def buckle_BR_const(N, rho):
return N/(1 + rho*(N - 1))
corr = np.linspace(start=0, stop=1.0, num=500)
plt.plot(corr, buckle_BR_const(6, corr))
plt.title('Effective Breadth as a function of Forecast Correlation (6 Stocks)')
plt.ylabel('Effective Breadth (Number of Bets)')
plt.xlabel('Forecast Correlation');
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Fundamental Law of Asset Management
Step2: The result here is that we have six bets and they are all very highly correlated.
Step3: The beta hedge has brought down the average correlation significanty. Theoretically, this should improve our breadth. It is obvious that we are left with two highly correlated clusters however. Let's see what happens when we hedge the sector risk.
Step4: There we go! The sector hedge brought down the correlation between our bets to close to zero.
|
14,346 | <ASSISTANT_TASK:>
Python Code:
import processing.procore as pcore
import features.socios as s
tbls = pcore.loadTables()
print("Stored Data Tables\n")
for k in sorted(list(tbls.keys())):
print(k)
tbls['questionaires'][tbls['questionaires'].QuestionaireID.isin([3, 4, 6, 7, 1000000, 1000001, 1000002])]
searchterm = ['earn per month', 'watersource', 'GeyserNumber', 'GeyserBroken', 'roof', 'wall', 'main switch', 'floor area']
questionaire_id = 3
s.searchQuestions(searchterm, questionaire_id)
searchterm = ['earn per month', 'watersource', 'GeyserNumber', 'GeyserBroken', 'roof', 'wall', 'main switch', 'floor area']
questionaire_id = 3
answers = s.searchAnswers(searchterm, questionaire_id)
print(answers[1])
answers[0].head()
s.recorderLocations(year = 2011)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: List of Questionaires
Step2: Search Questions
Step3: Search Answers
Step4: List of Site Locations and Corresponding RecorderIDs by Year
|
14,347 | <ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import urllib2
import scipy.stats as stats
url = ('https://raw.githubusercontent.com/Upward-Spiral-Science/data/master/syn-density/output.csv')
data = urllib2.urlopen(url)
csv = np.genfromtxt(data, delimiter=",")[1:] # Remove lable row
# Clip data based on thresholds on x and y coordinates. Found from Bijan visual
x_bounds = (409, 3529)
y_bounds = (1564, 3124)
def check_in_bounds(row, x_bounds, y_bounds):
if row[0] < x_bounds[0] or row[0] > x_bounds[1]: #check x inbounds
return False
if row[1] < y_bounds[0] or row[1] > y_bounds[1]: #check y inbounds
return False
if row[3] == 0: # remove zeros of unmasked values
return False
return True
indices_in_bound = np.where(np.apply_along_axis(check_in_bounds, 1, csv, x_bounds, y_bounds))
data_clipped = csv[indices_in_bound]
density = np.divide(data_clipped[:, 4],data_clipped[:, 3])*(64**3)
data_density = np.vstack((data_clipped[:,0],data_clipped[:,1],data_clipped[:,2],density))
data_density = data_density.T
print 'End removing zero unmasked and removing image edges'
#edges of k-means, k = 3 clusters
cluster3x1bounds = (409,1500)
cluster3x2bounds = (1501,2500)
cluster3x3bounds = (2501,3529)
#edges of k-means, k = 4 clusters
cluster4x1bounds = (409,1100)
cluster4x4bounds = (2750,3529)
def check_in_cluster(row, x_bounds):
if row[0] < x_bounds[0] or row[0] > x_bounds[1]: #check x inbounds
return False
return True
indices_in_bound = np.where(np.apply_along_axis(check_in_cluster, 1, data_density, cluster3x1bounds))
data_k3_1 = data_density[indices_in_bound]
indices_in_bound = np.where(np.apply_along_axis(check_in_cluster, 1, data_density, cluster3x2bounds))
data_k3_2 = data_density[indices_in_bound]
indices_in_bound = np.where(np.apply_along_axis(check_in_cluster, 1, data_density, cluster3x3bounds))
data_k3_3 = data_density[indices_in_bound]
from sklearn.linear_model import LinearRegression
from sklearn.svm import LinearSVR
from sklearn.neighbors import KNeighborsRegressor as KNN
from sklearn.ensemble import RandomForestRegressor as RF
from sklearn.preprocessing import PolynomialFeatures as PF
from sklearn.pipeline import Pipeline
from sklearn import cross_validation
names = ['Linear Regression','SVR','KNN Regression','Random Forest Regression','Polynomial Regression']
regressions = [LinearRegression(),
LinearSVR(C=1.0),
KNN(n_neighbors=10, algorithm='auto'),
RF(max_depth=5, max_features=1),
Pipeline([('poly', PF(degree=2)),('linear', LinearRegression(fit_intercept=False))])]
k_fold = 10
print('Regression on X=(x,y,z), Y=syn/unmasked')
X = data_k3_1[:, (0, 1, 2)] # x,y,z
Y = data_k3_1[:, 3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# x
print
print('Regressions on x and density')
X = data_k3_1[:,[0]] # x,y,z
Y = data_k3_1[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# y
print
print('Regression on y and density')
X = data_k3_1[:,[1]] # x,y,z
Y = data_k3_1[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# z -> syn/unmasked
print
print('Regression on z and density')
X = data_k3_1[:,[2]] # x,y,z
Y = data_k3_1[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
print('Regression on X=(x,y,z), Y=syn/unmasked')
X = data_k3_2[:, (0, 1, 2)] # x,y,z
Y = data_k3_2[:, 3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# x
print
print('Regressions on x and density')
X = data_k3_2[:,[0]] # x,y,z
Y = data_k3_2[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# y
print
print('Regression on y and density')
X = data_k3_2[:,[1]] # x,y,z
Y = data_k3_2[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# z -> syn/unmasked
print
print('Regression on z and density')
X = data_k3_2[:,[2]] # x,y,z
Y = data_k3_2[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
print('Regression on X=(x,y,z), Y=syn/unmasked')
X = data_k3_3[:, (0, 1, 2)] # x,y,z
Y = data_k3_3[:, 3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# x
print
print('Regressions on x and density')
X = data_k3_3[:,[0]] # x,y,z
Y = data_k3_3[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# y
print
print('Regression on y and density')
X = data_k3_3[:,[1]] # x,y,z
Y = data_k3_3[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# z -> syn/unmasked
print
print('Regression on z and density')
X = data_k3_3[:,[2]] # x,y,z
Y = data_k3_3[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
#edges of k-means, k = 4 clusters, Not sure how to get full clusters
cluster4x1bounds = (409,1100)
cluster4x4bounds = (2750,3529)
def check_in_cluster(row, x_bounds):
if row[0] < x_bounds[0] or row[0] > x_bounds[1]: #check x inbounds
return False
return True
indices_in_bound = np.where(np.apply_along_axis(check_in_cluster, 1, data_density, cluster4x1bounds))
data_k4_1 = data_density[indices_in_bound]
indices_in_bound = np.where(np.apply_along_axis(check_in_cluster, 1, data_density, cluster4x4bounds))
data_k4_4 = data_density[indices_in_bound]
print('Regression on X=(x,y,z), Y=syn/unmasked')
X = data_k4_1[:, (0, 1, 2)] # x,y,z
Y = data_k4_1[:, 3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# x
print
print('Regressions on x and density')
X = data_k4_1[:,[0]] # x,y,z
Y = data_k4_1[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# y
print
print('Regression on y and density')
X = data_k4_1[:,[1]] # x,y,z
Y = data_k4_1[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# z -> syn/unmasked
print
print('Regression on z and density')
X = data_k4_1[:,[2]] # x,y,z
Y = data_k4_1[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
print('Regression on X=(x,y,z), Y=syn/unmasked')
X = data_k4_4[:, (0, 1, 2)] # x,y,z
Y = data_k4_4[:, 3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# x
print
print('Regressions on x and density')
X = data_k4_4[:,[0]] # x,y,z
Y = data_k4_4[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# y
print
print('Regression on y and density')
X = data_k4_4[:,[1]] # x,y,z
Y = data_k4_4[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
# z -> syn/unmasked
print
print('Regression on z and density')
X = data_k4_4[:,[2]] # x,y,z
Y = data_k4_4[:,3] # syn/unmasked
for idx2, reg in enumerate(regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: \t %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
import sklearn.mixture as mixture
n_clusters = 4
###########################################
gmm = mixture.GMM(n_components=n_clusters, n_iter=1000, covariance_type='diag', random_state=1)
clusters = [[] for i in xrange(n_clusters)]
centroidmatrix = [0]*4
print centroidmatrix
predicted = gmm.fit_predict(data_density)
for label, row in zip(predicted, data_density[:,]):
clusters[label].append(row)
for i in xrange(n_clusters):
clusters[i] = np.array(clusters[i])
print "# of samples in cluster %d: %d" % (i+1, len(clusters[i]))
print "centroid: ", np.average(clusters[i], axis=0)
centroidmatrix = np.vstack((centroidmatrix,np.average(clusters[i], axis=0)))
# print "cluster covariance: "
covar = np.cov(clusters[i].T)
# print covar
print "determinant of covariance matrix: ", np.linalg.det(covar)
print
centroidmatrix = np.delete(centroidmatrix, (0), axis = 0)
print centroidmatrix
fig = plt.figure(figsize=(10, 7))
ax = fig.gca(projection='3d')
ax.view_init()
ax.dist = 10 # distance
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_title('Scatter Plot of Centroids, size weighted by density')
ax.set_xticks(np.arange(500, 3500, 500))
ax.set_yticks(np.arange(1200,3200, 500))
ax.set_zticks(np.arange(0,1200, 150))
ax.scatter(
centroidmatrix[:, 0], centroidmatrix[:, 1], centroidmatrix[:, 2], # data
c='blue', # marker colour
marker='o', # marker shape
s=centroidmatrix[:,3]*10 # marker size
)
plt.show
from mpl_toolkits.mplot3d import Axes3D
# Random Sample
samples = 20000
perm = np.random.permutation(xrange(1, len(data_density[:])))
data_density_sample = data_density[perm[:samples]]
data_uniques, UIndex, UCounts = np.unique(data_density_sample[:,2], return_index = True, return_counts = True)
print 'uniques'
print 'index: ' + str(UIndex)
print 'counts: ' + str(UCounts)
print 'values: ' + str(data_uniques)
xmin = data_density_sample[:,0].min()
xmax = data_density_sample[:,0].max()
ymin = data_density_sample[:,1].min()
ymax = data_density_sample[:,1].max()
def check_z(row):
if row[2] == 55:
return True
return False
index_true = np.where(np.apply_along_axis(check_z, 1, data_density_sample))
dds55 = data_density_sample[index_true]
data_uniques, UIndex, UCounts = np.unique(dds55[:,2], return_index = True, return_counts = True)
print 'uniques check'
print 'index: ' + str(UIndex)
print 'counts: ' + str(UCounts)
print 'values: ' + str(data_uniques)
#plt.subplots_adjust(hspace=1)
#plt.subplot(121)
plt.hexbin(dds55[:,0], dds55[:,1], cmap=plt.cm.YlOrRd_r,mincnt=1)
plt.axis([xmin, xmax, ymin, ymax])
plt.title("Hexagon binning")
plt.xlabel('x coordinates')
plt.ylabel('y coordinates')
cb = plt.colorbar()
cb.set_label('density')
plt.show()
def check_z(row):
if row[2] == 166:
return True
return False
index_true = np.where(np.apply_along_axis(check_z, 1, data_density_sample))
ddsZ = data_density_sample[index_true]
plt.hexbin(ddsZ[:,0], ddsZ[:,1], cmap=plt.cm.YlOrRd_r,mincnt=1)
plt.axis([xmin, xmax, ymin, ymax])
plt.title("Hexagon binning")
plt.xlabel('x coordinates')
plt.ylabel('y coordinates')
cb = plt.colorbar()
cb.set_label('density')
plt.show()
def check_z(row):
if row[2] == 277:
return True
return False
index_true = np.where(np.apply_along_axis(check_z, 1, data_density_sample))
ddsZ = data_density_sample[index_true]
plt.hexbin(ddsZ[:,0], ddsZ[:,1], cmap=plt.cm.YlOrRd_r,mincnt=1)
plt.axis([xmin, xmax, ymin, ymax])
plt.title("Hexagon binning")
plt.xlabel('x coordinates')
plt.ylabel('y coordinates')
cb = plt.colorbar()
cb.set_label('density')
plt.show()
def check_z(row):
if row[2] == 388:
return True
return False
index_true = np.where(np.apply_along_axis(check_z, 1, data_density_sample))
ddsZ = data_density_sample[index_true]
plt.hexbin(ddsZ[:,0], ddsZ[:,1], cmap=plt.cm.YlGnBu,mincnt=1)
plt.axis([xmin, xmax, ymin, ymax])
plt.title("Hexagon binning")
plt.xlabel('x coordinates')
plt.ylabel('y coordinates')
cb = plt.colorbar()
cb.set_label('density')
plt.show()
def check_spike(row):
if row[3] > 0.0013 and row[3] < 0.00135 :
return True
return False
index_true = np.where(np.apply_along_axis(check_z, 1, data_density))
spike = data_density[index_true]
print "Spike"
print "length: ", len(spike)
print "Mean: ", np.mean(spike)
print "Median: ", np.mean(spike)
print "STD: ", np.std(spike)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up different sections of data based on clusters of 3 and regressions
Step2: Start regressions in cluster
Step3: 1.2) 2nd cluster
Step4: 1.3) 3rd cluster
Step5: 2) Set up 4 clusters
Step6: 2.1) 4 cluster, section 1
Step7: 2.2) 4 cluster, 4th section
Step8: 3) Cluster Explore
Step9: 4) Hex plot
Step10: 5) Stats of spike shown to Jovo
|
14,348 | <ASSISTANT_TASK:>
Python Code:
def lenet(num_classes):
import mxnet as mx
data = mx.symbol.Variable('data')
# first conv
conv1 = mx.symbol.Convolution(data=data, kernel=(5,5), num_filter=20)
tanh1 = mx.symbol.Activation(data=conv1, act_type="tanh")
pool1 = mx.symbol.Pooling(data=tanh1, pool_type="max", kernel=(2,2), stride=(2,2))
# second conv
conv2 = mx.symbol.Convolution(data=pool1, kernel=(5,5), num_filter=50)
tanh2 = mx.symbol.Activation(data=conv2, act_type="tanh")
pool2 = mx.symbol.Pooling(data=tanh2, pool_type="max", kernel=(2,2), stride=(2,2))
# first fullc
flatten = mx.symbol.Flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
tanh3 = mx.symbol.Activation(data=fc1, act_type="tanh")
# second fullc
fc2 = mx.symbol.FullyConnected(data=tanh3, num_hidden=num_classes)
# loss
lenet = mx.symbol.SoftmaxOutput(data=fc2, name='softmax')
return lenet
train = h2o.import_file("../../bigdata/laptop/mnist/train.csv.gz")
test = h2o.import_file("../../bigdata/laptop/mnist/test.csv.gz")
predictors = list(range(0,784))
resp = 784
train[resp] = train[resp].asfactor()
test[resp] = test[resp].asfactor()
nclasses = train[resp].nlevels()[0]
model = lenet(nclasses)
model_path = "/tmp/symbol_lenet-py.json"
model.save(model_path)
#!head "/tmp/symbol_lenet-py.json"
model = H2ODeepWaterEstimator(epochs=100, learning_rate=1e-3,
mini_batch_size=64,
network='user',
network_definition_file=model_path,
image_shape=[28,28], channels=1)
model.train(x=predictors,y=resp, training_frame=train, validation_frame=test)
model.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: DeepWater for MXNET
Step2: Let's create the lenet model architecture from scratch using the MXNet Python API
Step3: To import the model inside the DeepWater training engine we need to save the model to a file
Step4: The model is just the structure of the network expressed as a json dict
Step5: Importing the LeNET model architecture for training in H2O
|
14,349 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
print(dir(np.random))
%pylab inline
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'font.size': 20})
rdata = np.random.randn(1000)
fig = plt.figure(figsize=(6, 4))
plt.hist(rdata)
print(np.mean(rdata), np.median(rdata), np.std(rdata))
np.std(np.random.randn(1000) + np.random.randn(1000))
randexp = np.random.exponential(2., size=(1000))
hist(randexp, np.linspace(0,10,50));
randps = np.random.poisson(10, size=(10000,))
hist(randps, np.arange(20));
M = 4.
m = 15.
merr = 0.1
rand_m = np.random.randn(1000)*0.1+m
hist(rand_m);
rand_d = 10.**(1+0.2*(rand_m-M))
hist(rand_d, np.linspace(1300, 1900, 30));
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'font.size':20})
# fig = plt.figure(figsize=(10,10))
x = np.linspace(0, 6*np.pi, 100)
plt.plot(x, np.cos(x), 'rv--');
plt.plot(x, np.sin(x), 'bs-.', alpha=.1);
plt.scatter(x, np.cos(x)+0.2, s=np.random.rand(*x.shape)*80, c=np.sin(x)+1)
# use numpy.savetxt & numpy.loadtxt
a = np.random.randn(4, 5)
print(a)
np.savetxt('./data/text/rdata.dat', a)
!gedit ./data/text/rdata.dat
b = np.loadtxt('./data/text/rdata.dat')
print(b)
a==b.reshape(4, 5)
impath = "./data/image_data/G178_final.850.fits"
%pylab inline
from matplotlib import rcParams
rcParams.update({'font.size': 20})
from aplpy import FITSFigure
fig = FITSFigure(impath)
fig.show_colorscale()
impath = "./data/wise_image/w1_cut.fits"
%pylab inline
%matplotlib inline
from matplotlib import rcParams
rcParams.update({'font.size': 20})
from aplpy import FITSFigure
fig = FITSFigure(impath)
fig.show_colorscale()
fig.show_grayscale()
ls ./data/lamost_dr2_spectra/
specpath = "./data/lamost_dr2_spectra/spec-55892-F9205_sp09-174.fits"
from astropy.io import fits
hl = fits.open(specpath)
hl.info()
hl
hl[0]
hl[0].header
wave = 10.**(hl[0].header['CRVAL1']+np.arange(hl[0].header['NAXIS1'])*hl[0].header['CD1_1'])
wave
np.log10(wave)
flux = hl[0].data # [flux, ivar, wave, and_mask, or_mask]
flux
%pylab
%matplotlib inline
fig = figure(figsize=(10, 5))
plt.plot(wave, flux[0, :])
# fig.savefig("here goes the file path")
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: how to draw samples from a gaussian distribution
Step2: other distributions ...
Step3: $\log_{10}(d) = 1 + \mu /5 $
Step4: 2. plotting
Step5: 3. IO (text files & fits files)
Step6: LAMOST spectra
Step7: CRVAL1 = 3.5682 / Central wavelength (log10) of first pixel
|
14,350 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import scipy as sp
import pymc as pm
import seaborn as sb
import matplotlib.pyplot as plt
def sample_path(rho, sigma, T, y0=None):
'''
Simulates the sample path for y of length T+1 starting from a specified initial value OR if y0
is None, it initializes the path with a draw from the stationary distribution of y.
Arguments
-----------------
rho (Float) : AR coefficient
sigma (Float) : standard deviation of the error
T (Int) : length of the sample path without x0
y0 (Float) : initial value of X
Return:
-----------------
y_path (Numpy Array) : simulated path
'''
if y0 == None:
stdev_erg = sigma / np.sqrt(1 - rho**2)
y0 = np.random.normal(0, stdev_erg)
y_path = np.empty(T+1)
y_path[0] = y0
eps_path = np.random.normal(0, 1, T)
for t in range(T):
y_path[t + 1] = rho * y_path[t] + sigma * eps_path[t]
return y_path
#-------------------------------------------------------
# Pick true values:
rho_true, sigma_x_true, T = 0.5, 1.0, 20
#np.random.seed(1453534)
sample = sample_path(rho_true, sigma_x_true, T)
# Priors:
rho = pm.Uniform('rho', lower = -1, upper = 1) # note the capitalized distribution name (rule for pymc distributions)
sigma_x = pm.InverseGamma('sigma_x', alpha = 3, beta = 1)
# random() method
print('Initialization:')
print("Current value of rho = {: f}".format(rho.value.reshape(1,)[0]))
print("Current logprob of rho = {: f}".format(rho.logp))
rho.random()
print('\nAfter redrawing:')
print("Current value of rho = {: f}".format(rho.value.reshape(1,)[0]))
print("Current logprob of rho = {: f}".format(rho.logp))
@pm.deterministic(trace = False)
def y0_stdev(rho = rho, sigma = sigma_x):
return sigma / np.sqrt(1 - rho**2)
# Alternatively:
#y0_stdev = pm.Lambda('y0_stdev', lambda r = rho, s = sigma_x: s / np.sqrt(1 - r**2) )
# For elementary operators simply write
mu_y = rho * sample[:-1]
print(type(mu_y))
# You could also write, to generate a list of Determinisitc functions
#MU_y = [rho * sample[j] for j in range(T)]
#print(type(MU_y))
#print(type(MU_y[1]))
#MU_y = pm.Container(MU_y)
#print(type(MU_y))
y0_stdev.parents
y0_stdev.parents['rho'].value
rho.random()
y0_stdev.parents['rho'].value # if the parent is a pymc variable, the current value will be always 'updated'
print("Current value of y0_stdev = {: f}".format(y0_stdev.value))
rho.random()
print('\nAfter redrawing rho:')
print("Current value of y0_stdev = {: f}".format(y0_stdev.value))
print("Current value of mu_y:")
print(mu_y.value[:4])
rho.random()
print('\nAfter redrawing rho:')
print("Current value of mu_y:")
print(mu_y.value[:4])
y0 = pm.Normal('y0', mu = 0.0, tau = 1 / y0_stdev, observed = True, value = sample[0])
Y = pm.Normal('Y', mu = mu_y, tau = 1 / sigma_x, observed=True, value = sample[1:])
Y.value
Y.parents['tau'].value
sigma_x.random()
print(Y.parents['tau'].value)
Y.value
Y_alt = np.empty(T + 1, dtype = object)
Y_alt[0] = y0 # definition of y0 is the same as above
for i in range(1, T + 1):
Y_alt[i] = pm.Normal('y_{:d}'.format(i), mu = mu_y[i-1], tau = 1 / sigma_x)
print(type(Y_alt))
Y_alt
Y_alt = pm.Container(Y_alt)
type(Y_alt)
ar1_model = pm.Model([rho, sigma_x, y0, Y, y0_stdev, mu_y])
ar1_model.stochastics # notice that this is an unordered set (!)
ar1_model.deterministics
M = pm.MCMC(ar1_model)
M.step_method_dict
# draw a sample of size 20,000, drop the first 1,000 and keep only every 5th draw
M.sample(iter = 50000, burn = 1000, thin = 5)
M.step_method_dict
M.trace('rho')[:20]
M.trace('sigma_x')[:].shape
sigma_sample = M.trace('sigma_x')[:]
rho_sample = M.trace('rho')[:]
fig, ax = plt. subplots(1, 2, figsize = (15, 5))
ax[0].plot(sigma_sample)
ax[1].hist(sigma_sample)
from pymc.Matplot import plot as fancy_plot
fancy_plot(M.trace('rho'))
M.stats('rho')
# Try also:
#M.summary()
N = len(rho_sample)
rho_pr = [rho.random() for i in range(N)]
sigma_pr = [sigma_x.random() for i in range(N)]
Prior = np.vstack([rho_pr, sigma_pr]).T
Posterior = np.vstack([rho_sample, sigma_sample]).T
fig, bx = plt.subplots(1, 2, figsize = (17, 10), sharey = True)
sb.kdeplot(Prior, shade = True, cmap = 'PuBu', ax = bx[0])
bx[0].patch.set_facecolor('white')
bx[0].collections[0].set_alpha(0)
bx[0].axhline(y = sigma_x_true, color = 'DarkRed', lw =2)
bx[0].axvline(x = rho_true, color = 'DarkRed', lw =2)
bx[0].set_xlabel(r'$\rho$', fontsize = 18)
bx[0].set_ylabel(r'$\sigma_x$', fontsize = 18)
bx[0].set_title('Prior', fontsize = 20)
sb.kdeplot(Posterior, shade = True, cmap = 'PuBu', ax = bx[1])
bx[1].patch.set_facecolor('white')
bx[1].collections[0].set_alpha(0)
bx[1].axhline(y = sigma_x_true, color = 'DarkRed', lw =2)
bx[1].axvline(x = rho_true, color = 'DarkRed', lw =2)
bx[1].set_xlabel(r'$\rho$', fontsize = 18)
bx[1].set_ylabel(r'$\sigma_x$', fontsize = 18)
bx[1].set_title('Posterior', fontsize = 20)
plt.xlim(-1, 1)
plt.ylim(0, 1.5)
plt.tight_layout()
plt.savefig('beamer/prior_post.pdf')
rho_grid = np.linspace(-1, 1, 100)
sigmay_grid = np.linspace(0, 1.5, 100)
U = sp.stats.uniform(-1, 2)
IG = sp.stats.invgamma(3)
fig2, cx = plt.subplots(2, 2, figsize = (17, 12), sharey = True)
cx[0, 0].plot(rho_grid, U.pdf(rho_grid), 'r-', lw = 3, alpha = 0.6, label = r'$\rho$ prior')
cx[0, 0].set_title(r"Marginal prior for $\rho$", fontsize = 18)
cx[0, 0].axvline(x = rho_true, color = 'DarkRed', lw = 2, linestyle = '--', label = r'True $\rho$')
cx[0, 0].legend(loc='best', fontsize = 16)
cx[0, 0].set_xlim(-1, 1)
sb.distplot(rho_sample, ax = cx[0,1], kde_kws={"color": "r", "lw": 3, "label": r"$\rho$ posterior"})
cx[0, 1].set_title(r"Marginal posterior for $\rho$", fontsize = 18)
cx[0, 1].axvline(x = rho_true, color = 'DarkRed', lw = 2, linestyle = '--', label = r'True $\rho$')
cx[0, 1].legend(loc='best', fontsize = 16)
cx[0, 1].set_xlim(-1, 1)
cx[1, 0].plot(sigmay_grid, IG.pdf(sigmay_grid), 'r-', lw=3, alpha=0.6, label=r'$\sigma_y$ prior')
cx[1, 0].set_title(r"Marginal prior for $\sigma_y$", fontsize = 18)
cx[1, 0].axvline(x = sigma_x_true, color = 'DarkRed', lw = 2, linestyle = '--', label = r'True $\sigma_y$')
cx[1, 0].legend(loc = 'best', fontsize = 16)
cx[1, 0].set_xlim(0, 3)
sb.distplot(sigma_sample, ax = cx[1,1], kde_kws={"color": "r", "lw": 3, "label": r"$\sigma_y$ posterior"})
cx[1, 1].set_title(r"Marginal posterior for $\sigma_y$", fontsize = 18)
cx[1, 1].axvline(x = sigma_x_true, color = 'DarkRed', lw = 2, linestyle = '--', label = r'True $\sigma_y$')
cx[1, 1].legend(loc = 'best', fontsize = 16)
cx[1, 1].set_xlim(0, 3)
plt.tight_layout()
plt.savefig('beamer/marginal_prior_post.pdf')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Probabilistic model
Step2: Probabilistic models in pymc
Step3: 2) Determinsitic variable
Step4: (b) Conditional mean of $y_t$, $\mu_y$, is a deterministic function of $\rho$ and $y_{t-1}$
Step5: Let's see the parents of y0_stdev...
Step6: Notice that this is a dictionary, so for example...
Step7: ... and as we alter the parent's value, the child's value changes accordingly
Step8: and similarly for mu_y
Step9: How to tell pymc what you 'know' about the data?
Step10: Notice that the value of this variable is fixed (even if the parent's value changes)
Step11: (B) $T+1$ scalar random variables
Step12: Currently, this is just a numpy array of pymc.Deterministic functions. We can make it a pymc object by using the pymc.Container type.
Step13: and the pymc methods are applied element-wise.
Step14: This object have very limited awareness of the structure of the probabilistic model that it describes and does not itslef possess methods for updating the values in the sampling methods.
Step15: Notice that the step_methods are not assigned yet
Step16: You can specify them now, or if you call the sample method, pymc will assign the step_methods automatically according to some rule
Step17: ... and you can check what kind of step methods have been assigned (the default in most cases is the Metropolis step method for non-observed stochastic variables, while in case of observed stochastics, we simply draw from the prior)
Step18: The sample can be reached by the trace method (use the names you used at the initialization not the python name -- useful if the two coincide)
Step19: Then this is just a numpy array, so you can do different sort of things with it. For example plot
Step20: Acutally, you don't have to waste your time on construction different subplots. pymc's built-in plotting functionality creates pretty informative plots for you (baed on matplotlib). On the figure below
Step21: For a non-graphical summary of the posterior use the stats() method
|
14,351 | <ASSISTANT_TASK:>
Python Code:
for p in (True, False):
for q in (True, False):
print("%10s %10s %10s" %(p, q, (p and q)))
for p in (True, False):
for q in (True, False):
for r in (True, False):
print("%10s %10s %10s %s" %(p, q, r, not p and (q or r)))
def implies(a, b):
if a:
return b
else:
return True
x = 4
y = -1
implies(x >= 0 and y >= 0, x*y >= 0)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To get more intuition, let's consider the code segment that generates the truth table for the expression $\neg p \wedge (q\vee r)$
Step2: Other connectives can easily be encoded as functions by use of the if statement. The implication arrow, for example, may be defined as follows
Step3: To express the logical statement $x\geq 0 \wedge y\geq 0 \rightarrow x*y\geq 0$ in Python one simply writes
|
14,352 | <ASSISTANT_TASK:>
Python Code:
# Print the last item from year and pop
# print(year[-1])
# print(pop[-1])
# Import matplotlib.pyplot as plt
# import matplotlib.pyplot as plt
# Make a line plot: year on the x-axis, pop on the y-axis
# plt.plot( year, pop)
# plt.show()
# Print the last item of gdp_cap and life_exp
# print( gdp_cap[ -1 ] )
# print( life_exp[ -1 ])
# Make a line plot, gdp_cap on the x-axis, life_exp on the y-axis
# plt.plot( gdp_cap, life_exp )
# Display the plot
# plt.show()
# Change the line plot below to a scatter plot
#plt.scatter(gdp_cap, life_exp)
# Put the x-axis on a logarithmic scale
#plt.xscale('log')
# Show plot
#plt.show()
# Import package
# import matplotlib.pyplot as plt
# Build Scatter plot
# plt.scatter( pop, life_exp )
# Show plot
# plt.show()
Conclusion: Ther's no correlation b/w population
and Life Expectency! Which makes perfect sense.
x = [0, 0, 0, 0, 0, 0, 0, 0, 0, 20]
import matplotlib.pyplot as plt
plt.hist( x, 5 )
plt.show()
# Build histogram with 5 bins
# Ans: plt.hist(life_exp, bins = 5)
# 4th and 5th bins.
# Show and clean up plot
# plt.show()
# plt.clf()
# Build histogram with 20 bins
# Ans: plt.hist( life_exp, bins = 20 )
# Much better, 15th bin contains maximum value,
# i.e. most people tend to live upto 71-73 years.
# Show and clean up again
# plt.show()
# plt.clf()
# Histogram of life_exp, 15 bins
#Ans: plt.hist( life_exp, bins = 15)
# Show and clear plot
#plt.show()
#plt.clf()
# Histogram of life_exp1950, 15 bins
#Ans: plt.hist( life_exp1950, bins = 15)
# Show and clear plot again
#plt.show()
#plt.clf()
Conclusion: Neither one of these histogram is useful to
better understand the life expectancy data.
Why?
import matplotlib.pyplot as plt
x = [1, 2, 3]
y = [4, 5, 6]
plt.plot(x, y)
# customization here
plt.xlabel("var1")
plt.ylabel("var2")
plt.show()
It seems that customization should be done b/w
plot() and show() function.
import matplotlib.pyplot as plt
x = [1, 2, 3]
y = [4, 5, 6]
plt.plot(x, y)
# customization here
plt.show()
plt.xlabel("var1")
plt.ylabel("var2")
# Basic scatter plot, log scale
# plt.scatter(gdp_cap, life_exp)
# plt.xscale('log')
# Strings
# xlab = 'GDP per Capita [in USD]'
# ylab = 'Life Expectancy [in years]'
# title = 'World Development in 2007'
# Add axis labels
# plt.xlabel(xlab)
# plt.ylabel(ylab)
# Add title
# plt.title(title)
# After customizing, display the plot
# plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Line Plot 2
Step2: Scatter Plot 1
Step4: Scatter Plot 2
Step5: Histograms
Step6: RQ3
Step8: Build a histogram 3
Step10: Choose the right plot 1
Step11: Lab
|
14,353 | <ASSISTANT_TASK:>
Python Code:
from pygsf.io.gdal.raster import try_read_raster_band
source_data = "/home/mauro/Documents/projects/gsf/example_data/others/horiz_plane.asc"
success, cntnt = try_read_raster_band(raster_source=source_data)
print(success)
geotransform, projection, band_params, data = cntnt
type(geotransform)
print(geotransform)
type(projection)
print(projection)
type(band_params)
print(band_params)
type(data)
data.shape
data.min()
data.max()
from pygsf.georeferenced.rasters import GeoArray
ga = GeoArray(inGeotransform=geotransform, inLevels=[data])
from pygsf.orientations.orientations import Plane
gplane = Plane(azim=90.0, dip_ang=45.0)
print(gplane)
from pygsf.geometries.shapes.space3d import Point3D
pt = Point3D(0, 50, 50)
from pygsf.georeferenced.rasters import plane_dem_intersection
inters_pts = plane_dem_intersection(
srcPlaneAttitude=gplane,
srcPt=pt,
geo_array=ga)
print(inters_pts)
from bokeh.plotting import figure, output_notebook, show
x = list(map(lambda pt: pt.x, inters_pts))
y = list(map(lambda pt: pt.y, inters_pts))
output_notebook()
p = figure()
p.circle(x, y, size=2, color="navy", alpha=0.5)
show(p)
source_data = "/home/mauro/Documents/projects/gsf/example_data/others/horiz_plane.asc"
success, cntnt = try_read_raster_band(raster_source=source_data)
print(success)
geotransform, projection, band_params, data = cntnt
ga = GeoArray(inGeotransform=geotransform, inLevels=[data])
from pygsf.orientations.orientations import Plane
gplane = Plane(azim=90.0, dip_ang=0.0)
pt = Point3D(0, 50, 1)
inters_pts = plane_dem_intersection(
srcPlaneAttitude=gplane,
srcPt=pt,
geo_array=ga)
print(inters_pts)
pt = Point3D(0, 50, 0)
inters_pts = plane_dem_intersection(
srcPlaneAttitude=gplane,
srcPt=pt,
geo_array=ga)
print(inters_pts)
from bokeh.plotting import figure, output_notebook, show
x = list(map(lambda pt: pt.x, inters_pts))
y = list(map(lambda pt: pt.y, inters_pts))
output_notebook()
p = figure()
p.circle(x, y, size=2, color="navy", alpha=0.5)
show(p)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Test case 1
Step2: We read the data source with success. So we may unpack the result.
Step3: Hmmm, there is no projection info. In fact, there shouldn't..
Step4: A dictionary, as suspected. Try to see the content..
Step5: A very horizontal surface, we agree..
Step6: Given these data, we store them into a GeoArray
Step7: There is a single band provided in the geoarray, and represented by the data array.
Step8: The source point is located at (0, 50, 50)
Step9: Now we try calculating the intersection
Step10: As expected, all the intersection points lie at (50, *, 0)
Step11: Test case 2
Step12: The horizontal geological plane definition
Step13: The source point located at (0, 50, 1)
Step14: Ok, list is empty, as expected.
Step15: They seem correct, just quite numerous..
|
14,354 | <ASSISTANT_TASK:>
Python Code:
# Clone the repository from GitHub
!git clone --depth 1 -q https://github.com/tensorflow/tensorflow
# Copy the training scripts into our workspace
!cp -r tensorflow/tensorflow/lite/micro/examples/magic_wand/train train
# Download the data we will use to train the model
!wget http://download.tensorflow.org/models/tflite/magic_wand/data.tar.gz
# Extract the data into the train directory
!tar xvzf data.tar.gz -C train 1>/dev/null
# The scripts must be run from within the train directory
%cd train
# Prepare the data
!python data_prepare.py
# Split the data by person
!python data_split_person.py
# Load TensorBoard
%load_ext tensorboard
%tensorboard --logdir logs/scalars
!python train.py --model CNN --person true
# Install xxd if it is not available
!apt-get -qq install xxd
# Save the file as a C source file
!xxd -i model.tflite > /content/model.cc
# Print the source file
!cat /content/model.cc
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare the data
Step2: We'll then run the scripts that split the data into training, validation, and test sets.
Step3: Load TensorBoard
Step4: Begin training
Step5: Create a C source file
|
14,355 | <ASSISTANT_TASK:>
Python Code:
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
from os import path as op
import mne
from mne.preprocessing import maxwell_filter
print(__doc__)
data_path = op.join(mne.datasets.misc.data_path(verbose=True), 'movement')
head_pos = mne.chpi.read_head_pos(op.join(data_path, 'simulated_quats.pos'))
raw = mne.io.read_raw_fif(op.join(data_path, 'simulated_movement_raw.fif'))
raw_stat = mne.io.read_raw_fif(op.join(data_path,
'simulated_stationary_raw.fif'))
mne.viz.plot_head_positions(
head_pos, mode='traces', destination=raw.info['dev_head_t'], info=raw.info)
mne.viz.plot_head_positions(
head_pos, mode='field', destination=raw.info['dev_head_t'], info=raw.info)
# extract our resulting events
events = mne.find_events(raw, stim_channel='STI 014')
events[:, 2] = 1
raw.plot(events=events)
topo_kwargs = dict(times=[0, 0.1, 0.2], ch_type='mag', vmin=-500, vmax=500,
time_unit='s')
evoked_stat = mne.Epochs(raw_stat, events, 1, -0.2, 0.8).average()
evoked_stat.plot_topomap(title='Stationary', **topo_kwargs)
epochs = mne.Epochs(raw, events, 1, -0.2, 0.8)
evoked = epochs.average()
evoked.plot_topomap(title='Moving: naive average', **topo_kwargs)
raw_sss = maxwell_filter(raw, head_pos=head_pos)
evoked_raw_mc = mne.Epochs(raw_sss, events, 1, -0.2, 0.8).average()
evoked_raw_mc.plot_topomap(title='Moving: movement compensated (raw)',
**topo_kwargs)
evoked_evo_mc = mne.epochs.average_movements(epochs, head_pos=head_pos)
evoked_evo_mc.plot_topomap(title='Moving: movement compensated (evoked)',
**topo_kwargs)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualize the "subject" head movements. By providing the measurement
Step2: This can also be visualized using a quiver.
Step3: Process our simulated raw data (taking into account head movements).
Step4: First, take the average of stationary data (bilateral auditory patterns).
Step5: Second, take a naive average, which averages across epochs that have been
Step6: Third, use raw movement compensation (restores pattern).
Step7: Fourth, use evoked movement compensation. For these data, which contain
|
14,356 | <ASSISTANT_TASK:>
Python Code:
#先把数据读进来
import pandas as pd
data = pd.read_csv('kaggle_bike_competition_train.csv', header = 0, error_bad_lines=False)
#看一眼数据长什么样
data.head()
# 处理时间字段
temp = pd.DatetimeIndex(data['datetime'])
data['date'] = temp.date
data['time'] = temp.time
data.head()
# 设定hour这个小时字段
data['hour'] = pd.to_datetime(data.time, format="%H:%M:%S")
data['hour'] = pd.Index(data['hour']).hour
data
# 我们对时间类的特征做处理,产出一个星期几的类别型变量
data['dayofweek'] = pd.DatetimeIndex(data.date).dayofweek
# 对时间类特征处理,产出一个时间长度变量
data['dateDays'] = (data.date - data.date[0]).astype('timedelta64[D]')
data
byday = data.groupby('dayofweek')
# 统计下没注册的用户租赁情况
byday['casual'].sum().reset_index()
# 统计下注册的用户的租赁情况
byday['registered'].sum().reset_index()
data['Saturday']=0
data.Saturday[data.dayofweek==5]=1
data['Sunday']=0
data.Sunday[data.dayofweek==6]=1
data
# remove old data features
dataRel = data.drop(['datetime', 'count','date','time','dayofweek'], axis=1)
dataRel.head()
from sklearn.feature_extraction import DictVectorizer
# 我们把连续值的属性放入一个dict中
featureConCols = ['temp','atemp','humidity','windspeed','dateDays','hour']
dataFeatureCon = dataRel[featureConCols]
dataFeatureCon = dataFeatureCon.fillna( 'NA' ) #in case I missed any
X_dictCon = dataFeatureCon.T.to_dict().values()
# 把离散值的属性放到另外一个dict中
featureCatCols = ['season','holiday','workingday','weather','Saturday', 'Sunday']
dataFeatureCat = dataRel[featureCatCols]
dataFeatureCat = dataFeatureCat.fillna( 'NA' ) #in case I missed any
X_dictCat = dataFeatureCat.T.to_dict().values()
# 向量化特征
vec = DictVectorizer(sparse = False)
X_vec_cat = vec.fit_transform(X_dictCat)
X_vec_con = vec.fit_transform(X_dictCon)
dataFeatureCon.head()
X_vec_con
dataFeatureCat.head()
X_vec_cat
from sklearn import preprocessing
# 标准化连续值数据
scaler = preprocessing.StandardScaler().fit(X_vec_con)
X_vec_con = scaler.transform(X_vec_con)
X_vec_con
from sklearn import preprocessing
# one-hot编码
enc = preprocessing.OneHotEncoder()
enc.fit(X_vec_cat)
X_vec_cat = enc.transform(X_vec_cat).toarray()
X_vec_cat
import numpy as np
# combine cat & con features
X_vec = np.concatenate((X_vec_con,X_vec_cat), axis=1)
X_vec
# 对Y向量化
Y_vec_reg = dataRel['registered'].values.astype(float)
Y_vec_cas = dataRel['casual'].values.astype(float)
Y_vec_reg
Y_vec_cas
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 把datetime域切成 日期 和 时间 两部分。
Step2: 时间那部分,好像最细的粒度也只到小时,所以我们干脆把小时字段拿出来作为更简洁的特征。
Step3: 仔细想想,数据只告诉我们是哪天了,按照一般逻辑,应该周末和工作日出去的人数量不同吧。我们设定一个新的字段dayofweek表示是一周中的第几天。再设定一个字段dateDays表示离第一天开始租车多久了(猜测在欧美国家,这种绿色环保的出行方式,会迅速蔓延吧)
Step4: 其实我们刚才一直都在猜测,并不知道真实的日期相关的数据分布对吧,所以我们要做一个小小的统计来看看真实的数据分布,我们统计一下一周各天的自行车租赁情况(分注册的人和没注册的人)
Step5: 周末既然有不同,就单独拿一列出来给星期六,再单独拿一列出来给星期日
Step6: 从数据中,把原始的时间字段等踢掉
Step7: 特征向量化
Step8: 标准化连续值特征
Step9: 类别特征编码
Step10: 把特征拼一起
Step11: 最后的特征,前6列是标准化过后的连续值特征,后面是编码后的离散值特征
|
14,357 | <ASSISTANT_TASK:>
Python Code:
# Two threads that have a critical section executed in parallel without mutual exclusion.
# This code does not work!
import threading
import time
counter = 10
def task_1():
global counter
for i in range(10**6):
counter += 1
def task_2():
global counter
for i in range(10**6+1):
counter -= 1
thread_1 = threading.Thread(target=task_1)
thread_2 = threading.Thread(target=task_2)
thread_1.start()
thread_2.start()
print("(Both threads started)")
thread_1.join()
thread_2.join()
print("\nBoth threads finished")
print('counter =', counter)
# Two threads that have a critical section executed sequentially.
import threading
import time
lock = threading.Lock()
counter = 10
def task_1():
global counter
for i in range(10**6):
with lock:
counter += 1
def task_2():
global counter
for i in range(10**6+1):
with lock:
counter -= 1
thread_1 = threading.Thread(target=task_1)
thread_2 = threading.Thread(target=task_2)
now = time.perf_counter() # Real time (not only user time)
thread_1.start()
thread_2.start()
print("Both threads started")
thread_1.join()
thread_2.join()
print("Both threads finished")
elapsed = time.perf_counter() - now
print(f"elapsed {elapsed:0.2f} seconds")
print('counter =', counter)
# Two processes that have a critical section executed sequentially
import multiprocessing
import time
import ctypes
def task_1(lock, counter):
for i in range(10000):
with lock:
counter.value += 1
def task_2(lock, counter):
for i in range(10001):
with lock:
counter.value -= 1
lock = multiprocessing.Lock()
manager = multiprocessing.Manager()
counter = manager.Value(ctypes.c_int, 10)
process_1 = multiprocessing.Process(target=task_1, args=(lock, counter))
process_2 = multiprocessing.Process(target=task_2, args=(lock, counter))
now = time.perf_counter()
process_1.start()
process_2.start()
print("Both tasks started")
process_1.join()
process_2.join()
print("Both tasks finished")
elapsed = time.perf_counter() - now
print(f"elapsed {elapsed:0.2f} seconds")
print('counter =', counter.value)
import asyncio
counter = 10
async def task_1():
global counter
for i in range(10):
print("o", end='', flush=True)
counter += 1
await task_2()
async def task_2():
global counter
print("O", end='', flush=True)
counter -= 1
await task_1()
print('\ncounter =', counter)
import asyncio
import time
counter = 10
async def task_1():
global counter
for i in range(10**6):
counter += 1
await task_2()
async def task_2():
global counter
counter -= 1
now = time.perf_counter()
await task_1()
elapsed = time.perf_counter() - now
print(f"\nelapsed {elapsed:0.2f} seconds")
print('counter =', counter)
import time
counter = 10
def task():
global counter
for i in range(10**6):
counter += 1
counter -= 1
now = time.perf_counter()
task()
elapsed = time.perf_counter() - now
print(f"\nelapsed {elapsed:0.2f} seconds")
print('counter =', counter)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The same example, using mutual exclusion (using a lock)
Step2: Notice that both tasks are CPU-bound. This means that using threading has not any wall time advantage compared to an iterative implementation of both taks.
Step3: Unlike threading, multiprocessing is suitable for reducing the running times in the case of CPU-bound problems.
Step4: Coroutines are faster than threads, but not faster than the one-loop version of the task.
|
14,358 | <ASSISTANT_TASK:>
Python Code:
def get_max_profit(stock_prices_yesterday):
max_profit = 0
# go through every time
for outer_time in xrange(len(stock_prices_yesterday)):
# for every time, go through every OTHER time
for inner_time in xrange(len(stock_prices_yesterday)):
# for each pair, find the earlier and later times
earlier_time = min(outer_time, inner_time)
later_time = max(outer_time, inner_time)
# and use those to find the earlier and later prices
earlier_price = stock_prices_yesterday[earlier_time]
later_price = stock_prices_yesterday[later_time]
# see what our profit would be if we bought at the
# earlier price and sold at the later price
potential_profit = later_price - earlier_price
# update max_profit if we can do better
max_profit = max(max_profit, potential_profit)
return max_profit
def get_max_profit(stock_prices_yesterday):
max_profit = 0
# go through every price (with its index as the time)
for earlier_time, earlier_price in enumerate(stock_prices_yesterday):
# and go through all the LATER prices
for later_price in stock_prices_yesterday[earlier_time+1:]:
# see what our profit would be if we bought at the
# earlier price and sold at the later price
potential_profit = later_price - earlier_price
# update max_profit if we can do better
max_profit = max(max_profit, potential_profit)
return max_profit
def get_max_profit(stock_prices_yesterday):
min_price = stock_prices_yesterday[0]
max_profit = 0
for current_price in stock_prices_yesterday:
# ensure min_price is the lowest price we've seen so far
min_price = min(min_price, current_price)
# see what our profit would be if we bought at the
# min price and sold at the current price
potential_profit = current_price - min_price
# update max_profit if we can do better
max_profit = max(max_profit, potential_profit)
return max_profit
def get_max_profit(stock_prices_yesterday):
# make sure we have at least 2 prices
if len(stock_prices_yesterday) < 2:
raise IndexError('Getting a profit requires at least 2 prices')
# we'll greedily update min_price and max_profit, so we initialize
# them to the first price and the first possible profit
min_price = stock_prices_yesterday[0]
max_profit = stock_prices_yesterday[1] - stock_prices_yesterday[0]
for index, current_price in enumerate(stock_prices_yesterday):
# skip the first (0th) time
# we can't sell at the first time, since we must buy first,
# and we can't buy and sell at the same time!
# if we took this out, we'd try to buy /and/ sell at time 0.
# this would give a profit of 0, which is a problem if our
# max_profit is supposed to be /negative/--we'd return 0!
if index == 0:
continue
# see what our profit would be if we bought at the
# min price and sold at the current price
potential_profit = current_price - min_price
# update max_profit if we can do better
max_profit = max(max_profit, potential_profit)
# update min_price so it's always
# the lowest price we've seen so far
min_price = min(min_price, current_price)
return max_profit
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: But that will take O(n^2) time, since we have two nested loops—for every time, we're going through every other time. Can we do better?
Step2: What's our runtime now?
Step3: We’re finding the max profit with one pass and constant space!
|
14,359 | <ASSISTANT_TASK:>
Python Code:
# import the dataset
from quantopian.interactive.data.quandl import fred_icsa
# Since this data is public domain and provided by Quandl for free, there is no _free version of this
# data set, as found in the premium sets. This import gets you the entirety of this data set.
# import data operations
from odo import odo
# import other libraries we will use
import pandas as pd
import matplotlib.pyplot as plt
fred_icsa.sort('asof_date')
fred_icsa.count()
icsa_df = odo(fred_icsa, pd.DataFrame)
icsa_df.plot(x='asof_date', y='value')
plt.xlabel("As Of Date (asof_date)")
plt.ylabel("Number of Clais")
plt.title("United States Initial Unemployment Claims")
plt.legend().set_visible(False)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The data goes all the way back to 1947 and is updated quarterly.
Step2: Let's go plot for fun. 2545 rows are definitely small enough to just put right into a Pandas Dataframe
|
14,360 | <ASSISTANT_TASK:>
Python Code:
d = {'Angela': 23746, 'Sofia': 2514, 'Luis': 3747, 'Diego': 61562}
d['Angela']
d['Diego']
d['Luis']
d['Sofia']
d['Valeriano'] = 1234
print(d)
d.pop('Angela')
print(d)
list(d.keys())
list(d.values())
'Miguel' in d.keys()
'Luis' in d.keys()
activities = {
'Monday': {'study':4, 'sleep':8, 'party':0},
'Tuesday': {'study':8, 'sleep':4, 'party':0},
'Wednesday': {'study':8, 'sleep':4, 'party':0},
'Thursday': {'study':4, 'sleep':4, 'party':4},
'Friday': {'study':1, 'sleep':4, 'party':8},
}
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this example the keys are strings (corresponding to names) and the values are numbers.
Step2: Adding a new element in the dictionary is very simple
Step3: deleting an item is also easy to do
Step4: It is possible to gather all the keys
Step5: and also gather all the values
Step6: It is also easy to test whether a key (or value) is in the dictionary
Step7: Exercise 3.01
|
14,361 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inm', 'sandbox-1', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
14,362 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy.ndimage
square = np.zeros((32, 32))
square[10:-10, 10:-10] = np.random.randint(1, 255, size = (12, 12))
np.random.seed(12)
x, y = (32*np.random.random((2, 20))).astype(int)
square[x, y] = np.random.randint(1, 255, size = (20,))
def filter_isolated_cells(array, struct):
filtered_array = np.copy(array)
id_regions, num_ids = scipy.ndimage.label(filtered_array, structure=struct)
id_sizes = np.array(scipy.ndimage.sum(array, id_regions, range(num_ids + 1)))
area_mask = (id_sizes == 1)
filtered_array[area_mask[id_regions]] = 0
return filtered_array
arr = np.sign(square)
filtered_array = filter_isolated_cells(arr, struct=np.ones((3,3)))
square = np.where(filtered_array==1, square, 0)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
14,363 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'sandbox-1', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
14,364 | <ASSISTANT_TASK:>
Python Code:
plt.imshow(reshaped_sample, cmap = 'gray')
test_sample = mnist.test.images[0]
print(test_sample.shape)
reshaped_test_sample = test_sample.reshape((28,28))
print(reshaped_sample.shape)
plt.imshow(reshaped_test_sample, cmap = 'gray')
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x,W) + b
y_ = tf.placeholder(tf.float32, [None, 10])
learning_rate = 0.5
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Testing Data Sample
Step2: Main Code
Step3: reduce_sum
Step4: softmax_cross_entropy_with_logits(logits, labels, dim=-1, name=None)
Step5: Train
|
14,365 | <ASSISTANT_TASK:>
Python Code:
L = [2, 4, 6, 8, 10] #use enumerate to get both index and value of a list
for i, val in enumerate(L):
print(i, val)
L = [2, 4, 6, 8, 10]
R = [3, 6, 9, 12, 15]
for lval, rval in zip(L, R):
print(lval, rval)
# find the first 10 square numbers
square = lambda x: x ** 2
for val in map(square, range(10)):
print(val)
# find values up to 10 for which x % 2 is zero
is_even = lambda x: x % 2 == 0
for val in filter(is_even, range(10)):
print(val)
Next jumping to data science tool
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: zip
Step2: map and filter
Step3: The filter iterator looks similar, except it only passes-through values for which the filter function evaluates to True
Step4: Yields and generators need to revisit to grasp the concept fully. To access the chapter, use this link
|
14,366 | <ASSISTANT_TASK:>
Python Code:
import requests
from bs4 import BeautifulSoup
def listFD(url, ext=''):
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
return [url + node.get('href') for node in soup.find_all('a')
if node.get('href').endswith(ext)]
site = 'http://people.duke.edu/~ccc14/misc/'
ext = 'png'
for i, file in enumerate(listFD(site, ext)):
if i == 5:
break
print(file)
def download_one(url, path):
r = requests.get(url, stream=True)
img = r.raw.read()
with open(path, 'wb') as f:
f.write(img)
%%time
for url in listFD(site, ext):
filename = os.path.split(url)[-1]
download_one(url, filename)
%%time
from concurrent.futures import ThreadPoolExecutor
args = [(url, os.path.split(url)[-1])
for url in listFD(site, ext)]
with ThreadPoolExecutor(max_workers=4) as pool:
pool.map(lambda x: download_one(x[0], x[1]), args)
%%time
from multiprocessing import Pool
args = [(url, os.path.split(url)[-1])
for url in listFD(site, ext)]
with Pool(processes=4) as pool:
pool.starmap(download_one, args)
n = 100
p = 10
xs = np.random.random((n, p))
# This is the only version necessary.
# The numba and numpy versions are just for education.
def buffon():
Simulate dropping of one needle.
center = np.random.random()
angle = 2*np.pi*np.random.random()
offset = 0.5 * np.sin(angle)
if (center + offset > 1) or (center - offset < 0):
return 1
else:
return 0
def buffon_python(n):
Calcualte π using Buffon's needle method.
crosses = 0
for i in range(n):
crosses += buffon()
return n/crosses
def buffon_numpy(n):
Calcualte π using Buffon's needle method.
centers = np.random.uniform(0, 1, n)
angles = np.random.uniform(0, 2*np.pi, n)
offset = 0.5 * np.sin(angles)
crosses = np.sum((centers + offset > 1) | (centers - offset < 0))
return n/crosses
import numba
@numba.jit(nopython=True)
def buffon_():
Simulate dropping of one needle.
center = np.random.random()
angle = 2*np.pi*np.random.random()
offset = 0.5 * np.sin(angle)
if (center + offset > 1) or (center - offset < 0):
return 1
else:
return 0
@numba.jit(nopython=True)
def buffon_numba(n):
Calcualte π using Buffon's needle method.
crosses = 0
for i in range(n):
crosses += buffon_()
return n/crosses
%%time
n = int(1e6)
print(buffon_python(n))
# force JIT compilation before timing
print(buffon_numba(100))
%%time
n = int(1e6)
print(buffon_numba(n))
%%time
n = int(1e6)
print(buffon_numpy(n))
from concurrent.futures import ProcessPoolExecutor
def buffon_pool(n, f, k):
with ProcessPoolExecutor(max_workers=k) as pool:
return np.mean(list(pool.map(f, [n//k] * k)))
%%time
n = int(1e6)
k = 4
print([n/k] * k)
print(buffon_pool(n, buffon_python, 4))
%%file hw6_ex3.cpp
#include <iostream>
#include <fstream>
#include <armadillo>
using std::cout;
using std::ofstream;
int main()
{
using namespace arma;
vec x = linspace<vec>(10.0,15.0,10);
vec eps = 10*randn<vec>(10);
vec y = 3*x%x - 7*x + 2 + eps;
cout << "x:\n" << x << "\n";
cout << "y:\n" << y << "\n";
cout << "Lenght of x is: " << norm(x) << "\n";
cout << "Lenght of y is: " << norm(y) << "\n";
cout << "Distance(x, y) is: " << norm(x -y) << "\n";
cout << "Correlation(x, y) is: " << cor(x, y) << "\n";
mat A = join_rows(ones<vec>(10), x);
A = join_rows(A, x%x);
cout << "A:\n" << A << "\n";
vec b = solve(A, y);
cout << "b:\n" << b << "\n";
ofstream fout1("x.txt");
x.print(fout1);
ofstream fout2("y.txt");
y.print(fout2);
ofstream fout3("b.txt");
b.print(fout3);
}
%%bash
g++ -std=c++11 hw6_ex3.cpp -o hw6_ex3 -larmadillo
./hw6_ex3
n = 10
x = np.linspace(0, 10, n)
y = 3*x**2 - 7*x + 2 + np.random.normal(0, 10, n)
X = np.c_[np.ones(n), x, x**2]
beta = np.linalg.lstsq(X, y)[0]
beta
plt.scatter(x, y)
plt.plot(x, X @ beta, 'red')
pass
%%file wrap.cpp
<%
cfg['compiler_args'] = ['-std=c++11']
cfg['include_dirs'] = ['./eigen']
setup_pybind11(cfg)
%>
#include <pybind11/pybind11.h>
#include <pybind11/eigen.h>
#include <Eigen/LU>
namespace py = pybind11;
// Note: This direct translation is not the most stable or efficient way to solve this
Eigen::VectorXd least_squares(Eigen::MatrixXd X, Eigen::VectorXd y) {
auto XtX = X.transpose() * X;
auto Xty = X.transpose() * y;
return XtX.inverse() * Xty;
}
PYBIND11_PLUGIN(wrap) {
pybind11::module m("wrap", "auto-compiled c++ extension");
m.def("least_squares", &least_squares);
return m.ptr();
}
n = 10
x = np.linspace(0, 10, n)
y = 3*x**2 - 7*x + 2 + np.random.normal(0, 10, n)
X = np.c_[np.ones(n), x, x**2]
import cppimport
m = cppimport.imp("wrap")
beta = m.least_squares(X, y)
beta
plt.scatter(x, y)
plt.plot(x, X @ beta, 'red')
pass
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step6: 2. (25 points) Accelerating CPU bound procedures
Step7: 3. (25 points) Use C++ to
Step8: 4. (25 points) 4. Write a C++ function that uses the eigen library to solve the least squares linear problem
|
14,367 | <ASSISTANT_TASK:>
Python Code:
%%bash
java -version
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import seaborn as sns
from matplotlib import pylab
import numpy as np
pylab.rcParams['figure.figsize'] = (16.0, 8.0)
sns.set(style="whitegrid")
def createTestFileCollection(elements=120, entities=2, versions=2, depth=2, fields=2, batch=12):
!rm -rf input
!mkdir -p input
out = !java -jar es.um.nosql.streaminginference.benchmark-0.0.1-SNAPSHOT-jar-with-dependencies.jar \
--elements $elements \
--entities $entities \
--versions $versions \
--depth $depth \
--fields $fields \
--mode file \
--flow stream \
--batch $batch \
--output input/collection.json \
--delay 10
def createTestMongoCollection(elements=120, entities=2, versions=2, depth=2, fields=2):
out = !java -jar es.um.nosql.streaminginference.benchmark-0.0.1-SNAPSHOT-jar-with-dependencies.jar \
--elements $elements \
--entities $entities \
--versions $versions \
--depth $depth \
--fields $fields \
--mode mongo \
--host localhost \
--port 27017 \
--database benchmark
def createTestSingleCollection(elements=120, entities=2, versions=2, depth=2, fields=2):
!rm -rf input
!mkdir -p input
out = !java -jar es.um.nosql.streaminginference.benchmark-0.0.1-SNAPSHOT-jar-with-dependencies.jar \
--elements $elements \
--entities $entities \
--versions $versions \
--depth $depth \
--fields $fields \
--mode file \
--output input/collection.json
def createTestCollection(mode="file", elements=120, entities=2, versions=2, depth=2, fields=2, batch=12):
!mkdir -p output
if (mode == "file"):
createTestFileCollection(elements, entities, versions, depth, fields, batch)
elif (mode == "mongo"):
createTestMongoCollection(elements, entities, versions, depth, fields)
elif (mode == "single"):
createTestSingleCollection(elements, entities, versions, depth, fields)
def benchmarkFile(interval=1000, kryo="true"):
out = !spark-submit --driver-memory 8g --master local[*] es.um.nosql.streaminginference.json2dbschema-0.0.1-SNAPSHOT-jar-with-dependencies.jar \
--mode file \
--input input \
--benchmark true \
--interval $interval \
--kryo $kryo
def benchmarkMongo(interval=1000, block=200, kryo="true"):
out = !spark-submit --driver-memory 8g --master local[*] es.um.nosql.streaminginference.json2dbschema-0.0.1-SNAPSHOT-jar-with-dependencies.jar \
--mode mongo \
--database benchmark \
--host localhost \
--port 27017 \
--benchmark true \
--interval $interval \
--block-interval $block \
--kryo $kryo
def benchmarkSingle():
out = !spark-submit --driver-memory 8g --master local[*] es.um.nosql.streaminginference.json2dbschema-0.0.1-SNAPSHOT-jar-with-dependencies.jar \
--mode single \
--input input/collection.json \
--benchmark true
def benchmarkSparkApp(mode="file", interval=1000, block=200, kryo="true"):
if (mode == "file"):
benchmarkFile(interval, kryo)
elif (mode == "mongo"):
benchmarkMongo(interval, block, kryo)
elif (mode== "single"):
benchmarkSingle()
def benchmark(mode="file", interval=1000, block=200, elements=120, entities=2, versions=2, depth=2, fields=2, batch=12, kryo="true"):
global benchmarked
!rm -f output/stats.csv
createTestCollection(mode, elements, entities, versions, depth, fields, batch)
for x in range(0, 10):
benchmarkSparkApp(mode, interval, block, kryo)
benchmarked = pd.read_csv("output/stats.csv")
return benchmarked
createTestCollection(mode="file", elements=60000, batch=12000)
createTestCollection(mode="single", elements=60000)
createTestCollection(mode="mongo", elements=60000)
benchmark(mode="file",elements=60000, batch=12000)
benchmark(mode="single",elements=60000)
benchmark(mode="mongo", elements=60000)
results = pd.DataFrame()
df = benchmark(mode="file", elements=2400000, batch=80000, entities=30, versions=30, depth=5, fields=4, kryo="true")
df.to_csv("kryo-enabled.csv")
results["kryo enabled"] = df["TOTAL_PROCESSING"]
df = benchmark(mode="file", elements=2400000, batch=80000, entities=30, versions=30, depth=5, fields=4, kryo="false")
df.to_csv("kryo-disabled.csv")
results["kryo disabled"] = df["TOTAL_PROCESSING"]
ax = sns.barplot(data=results)
ax.set_ylabel("Milisegundos de procesamiento")
ents = np.array([])
mode = np.array([])
millis = np.array([])
for entities in [1, 50, 100, 200, 400]:
df = benchmark(mode="file", elements=2400000, batch=80000, entities=entities, versions=1, depth=2, fields=2, kryo="true")
df.to_csv("file-entities-"+str(entities)+".csv")
length = df["TOTAL_PROCESSING"].size
ents = np.append(ents, np.repeat(entities, length))
mode = np.append(mode, np.repeat("Paralelo", length))
millis = np.append(millis, df["TOTAL_PROCESSING"].as_matrix())
df = benchmark(mode="single", elements=2400000, entities=entities, versions=1, depth=2, fields=2)
df.to_csv("original-file-entities-"+str(entities)+".csv")
length = df["TOTAL_PROCESSING"].size
ents = np.append(ents, np.repeat(entities, length))
mode = np.append(mode, np.repeat("Original", length))
millis = np.append(millis, df["TOTAL_PROCESSING"].as_matrix())
results = pd.DataFrame({"Entidades":ents, "Modo": mode, "Milisegundos de procesamiento": millis})
sns.factorplot(x="Entidades", y="Milisegundos de procesamiento", col="Modo", data=results, kind="bar", size=7)
vers = np.array([])
mode = np.array([])
millis = np.array([])
for versions in [1, 50, 100, 200, 400]:
df = benchmark(mode="file", elements=2400000, batch=80000, entities=1, versions=versions, depth=2, fields=2, kryo="true")
df.to_csv("file-versions-"+str(versions)+".csv")
length = df["TOTAL_PROCESSING"].size
vers = np.append(vers, np.repeat(versions, length))
mode = np.append(mode, np.repeat("Paralelo", length))
millis = np.append(millis, df["TOTAL_PROCESSING"].as_matrix())
df = benchmark(mode="single", elements=2400000, entities=1, versions=versions, depth=2, fields=2)
df.to_csv("original-file-versions-"+str(versions)+".csv")
vers = np.append(vers, np.repeat(versions, length))
mode = np.append(mode, np.repeat("Original", length))
millis = np.append(millis, df["TOTAL_PROCESSING"].as_matrix())
results = pd.DataFrame({"Versiones":vers, "Modo": mode, "Milisegundos de procesamiento": millis})
sns.factorplot(x="Versiones", y="Milisegundos de procesamiento", col="Modo", data=results, kind="bar", size=7)
elems = np.array([])
mode = np.array([])
micros = np.array([])
for elements in [60000, 120000, 480000, 1200000, 2400000, 3600000]:
df = benchmark(mode="file", elements=elements, batch=(elements/30), entities=1, versions=1, depth=2, fields=2, kryo="true")
df.to_csv("light-file-elements-"+str(elements)+".csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("Paralelo", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
df = benchmark(mode="single", elements=elements, entities=1, versions=1, depth=2, fields=2)
df.to_csv("light-original-file-elements-"+str(elements)+".csv")
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("Original", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
results = pd.DataFrame({"Elementos":elems, "Modo": mode, "Microsegundos por elemento": micros})
sns.factorplot(x="Elementos", y="Microsegundos por elemento", col="Modo", data=results, kind="bar", size=7)
elems = np.array([])
mode = np.array([])
micros = np.array([])
for elements in [60000, 120000, 480000, 1200000, 2400000, 3600000]:
df = benchmark(mode="file", elements=elements, batch=(elements/30), entities=20, versions=20, depth=2, fields=2, kryo="true")
df.to_csv("medium-file-elements-"+str(elements)+".csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("Paralelo", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
df = benchmark(mode="single", elements=elements, entities=20, versions=20, depth=2, fields=2)
df.to_csv("medium-original-file-elements-"+str(elements)+".csv")
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("Original", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
results = pd.DataFrame({"Elementos":elems, "Modo": mode, "Microsegundos por elemento": micros})
sns.factorplot(x="Elementos", y="Microsegundos por elemento", col="Modo", data=results, kind="bar", size=7)
elems = np.array([])
mode = np.array([])
micros = np.array([])
for elements in [60000, 120000, 480000, 1200000, 2400000, 3600000]:
df = benchmark(mode="file", elements=elements, batch=(elements/30), entities=50, versions=50, depth=2, fields=2, kryo="true")
df.to_csv("hard-file-elements-"+str(elements)+".csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("Paralelo", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
df = benchmark(mode="single", elements=elements, entities=50, versions=50, depth=2, fields=2)
df.to_csv("hard-original-file-elements-"+str(elements)+".csv")
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("Original", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
results = pd.DataFrame({"Elementos":elems, "Modo": mode, "Microsegundos por elemento": micros})
sns.factorplot(x="Elementos", y="Microsegundos por elemento", col="Modo", data=results, kind="bar", size=7)
parts = np.array([])
millis = np.array([])
for partitions in [1, 2, 4, 8, 16]:
df = benchmark(mode="file", elements=2400000, batch=(elements/partitions), entities=1, versions=1, depth=2, fields=2, kryo="true")
df.to_csv("file-partitions-"+str(partitions)+".csv")
length = df["TOTAL_PROCESSING"].size
parts = np.append(parts, np.repeat(partitions, length))
millis = np.append(millis, df["TOTAL_PROCESSING"].as_matrix())
results = pd.DataFrame({"Particiones":parts, "Milisegundos de procesamiento": millis})
sns.factorplot(x="Particiones", y="Milisegundos de procesamiento", data=results, kind="bar", size=7)
elems = np.array([])
mode = np.array([])
micros = np.array([])
for elements in [480000, 1200000, 2400000, 3600000]:
for executors in [4, 16]:
df = pd.read_csv("cesga/results-"+str(elements)+"-1-1-"+str(elements/30)+"-"+str(executors)+"-1.csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("CESGA-1-"+str(executors), length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
results = pd.DataFrame({"Elementos":elems, "Modo": mode, "Microsegundos por elemento": micros})
sns.factorplot(x="Elementos", y="Microsegundos por elemento", col="Modo", col_wrap=3, data=results, kind="bar", size=5)
import matplotlib.pyplot as plt
import os.path
f, ax = plt.subplots(1,3, figsize=(11, 7))
f.tight_layout()
cmap = sns.color_palette("Blues", n_colors=1000)
row = 0
for version in [1, 20, 50]:
elems = np.array([])
mode = np.array([])
micros = np.array([])
for elements in [480000, 1200000, 2400000, 3600000]:
if version == 1:
strVersion = "light"
if version == 20:
strVersion = "medium"
elif version == 50:
strVersion = "hard"
df = pd.read_csv("local/"+strVersion+"-file-elements-"+str(elements)+".csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("PARALELO", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
df = pd.read_csv("local/"+strVersion+"-original-file-elements-"+str(elements)+".csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("ORIGINAL", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
for executors in [4, 16]:
df = pd.read_csv("cesga/results-"+str(elements)+"-"+str(version)+"-"+str(version)+"-"+str(elements/30)+"-"+str(executors)+"-1.csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("CESGA-"+str(executors).zfill(2)+"-1", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
df = pd.read_csv("cesga/results-"+str(elements)+"-"+str(version)+"-"+str(version)+"-"+str(elements/30)+"-2-8.csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("CESGA-02-8", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
df = pd.read_csv("cesga/results-"+str(elements)+"-"+str(version)+"-"+str(version)+"-"+str(elements/30)+"-8-2.csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("CESGA-08-2", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
results = pd.DataFrame({"Documentos":elems.astype(int), "Modo": mode, "Microsegundos por documento": micros})
grouped = results.groupby(['Documentos', 'Modo'], as_index=False).mean()
grouped.sort_values("Modo")
pivoted = grouped.pivot("Modo", "Documentos", "Microsegundos por documento")
#display(pivoted)
sns.heatmap(pivoted, annot=True, linewidths=.5, fmt="1.2f", ax=ax[row], cmap=cmap, cbar=False, annot_kws={"size": 14})
#ax[row].yticks(np.arange(0, 1, step=0.2))
row += 1
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
plt.show()
import matplotlib.pyplot as plt
import os.path
cmap = sns.color_palette("Blues", n_colors=1000)
f, ax = plt.subplots(1,1, figsize=(12.95, 4.5))
elems = np.array([])
mode = np.array([])
micros = np.array([])
bestMode = ""
bestMicros = 9999999
originalMicros = 0
labels = pd.DataFrame(columns=["Modo", "Documentos", "Candidato"])
results = pd.DataFrame(columns=["Modo", "Documentos", "Speedup"])
for version in [1, 20, 50]:
for elements in [480000, 1200000, 2400000]:
if version == 1:
strVersion = "light"
labelVersion = u"1 entidad\n1 versión"
if version == 20:
strVersion = "medium"
labelVersion = "20 entidades\n20 versiones"
elif version == 50:
strVersion = "hard"
labelVersion = "50 entidades\n50 versiones"
df = pd.read_csv("local/"+strVersion+"-file-elements-"+str(elements)+".csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
bestMode = "Local"
bestMicros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix()).mean()
df = pd.read_csv("local/"+strVersion+"-original-file-elements-"+str(elements)+".csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
originalMicros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix()).mean()
if (originalMicros < bestMicros):
bestMicros = originalMicros
bestMode = "Original"
for executors in [4, 16]:
df = pd.read_csv("cesga/results-"+str(elements)+"-"+str(version)+"-"+str(version)+"-"+str(elements/30)+"-"+str(executors)+"-1.csv")
length = df["TOTAL_PROCESSING"].size
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix()).mean()
if (micros < bestMicros):
bestMicros = micros
bestMode = "CESGA\n" + str(executors) + " executors 1 core"
df = pd.read_csv("cesga/results-"+str(elements)+"-"+str(version)+"-"+str(version)+"-"+str(elements/30)+"-2-8.csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix()).mean()
if (micros < bestMicros):
bestMicros = micros
bestMode = "CESGA\n2 executors 8 cores"
df = pd.read_csv("cesga/results-"+str(elements)+"-"+str(version)+"-"+str(version)+"-"+str(elements/30)+"-8-2.csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix()).mean()
if (micros < bestMicros):
bestMicros = micros
bestMode = "CESGA\n8 executors 2 cores"
speedup = originalMicros/bestMicros
bestMode += "\nSpeedup: " + "{0:.2f}".format(speedup)
results = results.append({"Modo": labelVersion, "Documentos": elements, "Speedup": speedup}, ignore_index=True)
labels = labels.append({"Modo": labelVersion, "Documentos": elements, "Candidato": bestMode}, ignore_index=True)
#results["Tipo"] = results["Tipo"].astype(int)
results["Documentos"] = results["Documentos"].astype(int)
results = results.pivot("Modo", "Documentos", "Speedup")
labels = labels.pivot("Modo", "Documentos", "Candidato")
sns.heatmap(results, annot=labels, linewidths=.5, fmt="", cmap=cmap, cbar=False, annot_kws={"size": 16}, ax=ax)
ax.set_ylabel('')
ax.set_xlabel("Documentos",fontsize=14)
ax.tick_params(labelsize="large")
plt.yticks(rotation=0)
plt.show()
elems = np.array([])
mode = np.array([])
micros = np.array([])
for elements in [480000, 1200000, 2400000, 3600000]:
for executors in [4, 16]:
df = pd.read_csv("cesga/results-"+str(elements)+"-1-1-"+str(elements/30)+"-"+str(executors)+"-1.csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("CESGA-"+str(executors).zfill(2)+"-1", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
results = pd.DataFrame({"Documentos":elems.astype(int), "Modo": mode, "Microsegundos por documento": micros})
sns.factorplot(x="Documentos", y="Microsegundos por documento", col="Modo", col_wrap=3, data=results, kind="bar", size=3)
elems = np.array([])
mode = np.array([])
micros = np.array([])
for elements in [480000, 1200000, 2400000, 3600000]:
for executors in [4, 16]:
df = pd.read_csv("cesga/results-"+str(elements)+"-50-50-"+str(elements/30)+"-"+str(executors)+"-1.csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("CESGA "+str(executors)+" Executors", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
results = pd.DataFrame({"Documentos":elems.astype(int), "Modo": mode, "Microsegundos por documento": micros})
sns.factorplot(x="Documentos", y="Microsegundos por documento", col="Modo", col_wrap=3, data=results, kind="bar", size=3.5)
elems = np.array([])
mode = np.array([])
micros = np.array([])
for elements in [480000, 1200000, 2400000, 3600000]:
df = pd.read_csv("cesga/results-"+str(elements)+"-1-1-"+str(elements/30)+"-16-1.csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("CESGA-16-1", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
df = pd.read_csv("cesga/results-"+str(elements)+"-1-1-"+str(elements/30)+"-8-2.csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("CESGA-08-2", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
df = pd.read_csv("cesga/results-"+str(elements)+"-1-1-"+str(elements/30)+"-2-8.csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("CESGA-02-8", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
results = pd.DataFrame({"Documentos":elems.astype(int), "Modo": mode, "Microsegundos por documento": micros})
sns.factorplot(x="Documentos", y="Microsegundos por documento", col="Modo", col_wrap=3, data=results, kind="bar", size=4)
ents = np.array([])
mode = np.array([])
millis = np.array([])
for entities in [1, 50, 100, 200, 400]:
df = pd.read_csv("local/file-entities-"+str(entities)+".csv")
length = df["TOTAL_PROCESSING"].size
ents = np.append(ents, np.repeat(entities, length))
mode = np.append(mode, np.repeat("Paralelo", length))
millis = np.append(millis, df["TOTAL_PROCESSING"].as_matrix())
df = pd.read_csv("local/original-file-entities-"+str(entities)+".csv")
length = df["TOTAL_PROCESSING"].size
ents = np.append(ents, np.repeat(entities, length))
mode = np.append(mode, np.repeat("Original", length))
millis = np.append(millis, df["TOTAL_PROCESSING"].as_matrix())
results = pd.DataFrame({"Entidades":ents.astype(int), "Modo": mode, "Milisegundos de procesamiento": millis})
sns.factorplot(x="Entidades", y="Milisegundos de procesamiento", col="Modo", data=results, kind="bar", size=3.5)
vers = np.array([])
mode = np.array([])
millis = np.array([])
for versions in [1, 50, 100, 200, 400]:
df = pd.read_csv("local/file-versions-"+str(versions)+".csv")
length = df["TOTAL_PROCESSING"].size
vers = np.append(vers, np.repeat(versions, length))
mode = np.append(mode, np.repeat("Paralelo", length))
millis = np.append(millis, df["TOTAL_PROCESSING"].as_matrix())
df = pd.read_csv("local/original-file-versions-"+str(versions)+".csv")
vers = np.append(vers, np.repeat(versions, length))
mode = np.append(mode, np.repeat("Original", length))
millis = np.append(millis, df["TOTAL_PROCESSING"].as_matrix())
results = pd.DataFrame({"Versiones":vers.astype(int), "Modo": mode, "Milisegundos de procesamiento": millis})
sns.factorplot(x="Versiones", y="Milisegundos de procesamiento", col="Modo", data=results, kind="bar", size=3.5)
elems = np.array([])
mode = np.array([])
micros = np.array([])
for elements in [60000, 120000, 480000, 1200000, 2400000]:
df = pd.read_csv("local/light-file-elements-"+str(elements)+".csv")
length = df["TOTAL_PROCESSING"].size
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("Paralelo", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
df = pd.read_csv("local/light-original-file-elements-"+str(elements)+".csv")
elems = np.append(elems, np.repeat(elements, length))
mode = np.append(mode, np.repeat("Original", length))
micros = np.append(micros, (df["TOTAL_PROCESSING"]*1000/elements).as_matrix())
results = pd.DataFrame({"Documentos":elems.astype(int), "Modo": mode, "Microsegundos por documento": micros})
sns.factorplot(x="Documentos", y="Microsegundos por documento", col="Modo", data=results, kind="bar", size=3.5)
parts = np.array([])
millis = np.array([])
for partitions in [1, 2, 4, 8, 16]:
df = pd.read_csv("local/file-partitions-"+str(partitions)+".csv")
length = df["TOTAL_PROCESSING"].size
parts = np.append(parts, np.repeat(partitions, length))
millis = np.append(millis, df["TOTAL_PROCESSING"].as_matrix())
results = pd.DataFrame({"Ficheros de entrada":parts.astype(int), "Milisegundos de procesamiento": millis})
sns.factorplot(x="Ficheros de entrada", y="Milisegundos de procesamiento", data=results, kind="bar", size=3.5)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: También es necesario tener añadida al PATH la carpeta bin de spark 2.2.1 para hadoop 2.7 o posterior (descarga).
Step2: Creación de las colecciones de test
Step3: Esta función rellena la base de datos benchmark con entidades de prueba
Step4: Esta función crea un único archivo json con una colección de elementos en la carpeta input con nombre "collection"
Step5: Esta función determina el comando a utilizar en función del modo de funcionamiento
Step6: Benchmarking de aplicaciones
Step7: Esta función ejecuta la aplicación de inferencia sobre la base de datos previamente creada y genera el archivo stats.csv
Step8: Esta función ejecuta la aplicación de inferencia sobre la colección creada genera el archivo stats.csv, en este caso solamente se mostrará el tiempo de procesamiento
Step9: Esta función determina el comando a utilizar en función del modo de funcionamiento
Step10: Todo junto
Step11: Pruebas
Step12: Creación de un único archivo con 60000 elementos
Step13: Inserción en la base de datos "benchmark" de MongoDB de 60000 elementos
Step14: Prueba de ejecución de 60000 elementos en modo file, en batches de 12000 elementos
Step15: Prueba de ejecución de 30000 elementos en modo single
Step16: Prueba de ejecución de 1200 elementos en modo mongo
Step17: Medición de parámetros
Step18: Estudio del efecto del número de entidades en el tiempo de procesamiento
Step19: Estudio del efecto del número de versiones en el tiempo de procesamiento
Step20: Estudio del efecto del número de elementos en el tiempo de procesamiento
Step21: Estudio del efecto del número de particiones en el tiempo de procesamiento
Step22: Lectura de resultados obtenidos
Step23: Mapa de calor con mejor alternativa y speedup respecto a proceso de inferencia original
Step24: Evolución del tiempo de ejecución en función del número de executors (1 entidad 1 versión)
Step25: Evolución del tiempo de ejecución en función del número de executors (50 entidades 50 versiones)
Step26: Evolución del tiempo de ejecución en función del número de cores (1 entidad 1 versión)
Step27: Evolución del tiempo de ejecución en función del número de entidades
Step28: Evolución del tiempo de ejecución en función del número de versiones
Step29: Evolución del tiempo de ejecución en función del número de documentos
Step30: Evolución del tiempo de ejecución en función del número de ficheros de entrada
|
14,368 | <ASSISTANT_TASK:>
Python Code:
# import third party python libraries
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
# make plots inline
%matplotlib inline
# later try %matplotlib notebook
#%matplotlib notebook
# import pygslib
import pygslib
# importing drillhole tables into pandas dataframes
collar = pd.read_csv('Babbitt/collar_BABBITT.csv')
survey = pd.read_csv('Babbitt/survey_BABBITT.csv')
assay = pd.read_csv('Babbitt/assay_BABBITT.csv')
# print first 3 lines of the table collar
collar.head(4)
# print first 3 lines of the table survey
survey.head(3)
# print first 3 lines of the table assay
assay.head(3)
# droping some columns
assay.drop(['NI','S','FE'], axis=1, inplace=True)
# making non-sampled intervals equal to zero
assay.loc[~np.isfinite(assay['CU']), 'CU']=0
#creating a drillhole object
mydholedb=pygslib.drillhole.Drillhole(collar=collar, survey=survey)
# now you can add as many interval tables as you want, for example, assays, lithology and RQD.
mydholedb.addtable(assay, 'assay', overwrite = False)
# validating a drillhole object
mydholedb.validate()
# fixing the issue of single interval at survey table
mydholedb.fix_survey_one_interval_err(90000.)
#validating interval tables
mydholedb.validate_table('assay')
# Calculating length of sample intervals
mydholedb.table['assay']['Length']= mydholedb.table['assay']['TO']-mydholedb.table['assay']['FROM']
# plotting the interval lengths
mydholedb.table['assay']['Length'].hist(bins=np.arange(15)+0.5)
# printing length mode
print ('The Length Mode is:', mydholedb.table['assay']['Length'].mode()[0])
# compositing
mydholedb.downh_composite('assay', variable_name= "CU", new_table_name= "CMP",
cint = 10, minlen=-1, overwrite = True)
# first 5 rows of a table
mydholedb.table["CMP"].tail(5)
# desurveying an interval table
mydholedb.desurvey('CMP',warns=False, endpoints=True)
# first 3 rows of a table
mydholedb.table["CMP"].head(3)
# creating BHID of type integer
mydholedb.txt2intID('CMP')
# first 3 rows of a subtable
mydholedb.table["CMP"][['BHID', 'BHIDint', 'FROM', 'TO']].tail(3)
# exporting results to VTK
mydholedb.export_core_vtk_line('CMP', 'cmp.vtk', title = 'Drillhole created in PyGSLIB')
# inspecting interval tables in drillhole object
print ("Table names ", mydholedb.table_mames)
print ("Tables names", mydholedb.table.keys())
print ("table is ", type(mydholedb.table))
# exporting to csv
mydholedb.table["CMP"].to_csv('cmp.csv', index=False)
# importing the a wireframe (this one was created with https://geomodelr.com)
domain=pygslib.vtktools.loadSTL('Babbitt/Mpz.stl')
# creating array to tag samples in domain1
inside1=pygslib.vtktools.pointinsolid(domain,
x=mydholedb.table['CMP']['xm'].values,
y=mydholedb.table['CMP']['ym'].values,
z=mydholedb.table['CMP']['zm'].values)
# creating a new domain field
mydholedb.table['CMP']['Domain']=inside1.astype(int)
# first 3 rows of a subtable
mydholedb.table['CMP'][['BHID', 'FROM', 'TO', 'Domain']].head(3)
# exporting results to VTK
mydholedb.export_core_vtk_line('CMP', 'cmp.vtk', title = 'Generated with PyGSLIB')
# exporting to csv
mydholedb.table["CMP"].to_csv('cmp.csv', index=False)
# The model definition
xorg = 2288230
yorg = 415200
zorg = -1000
dx = 100
dy = 100
dz = 30
nx = 160
ny = 100
nz = 90
# Creating an empty block model
mymodel=pygslib.blockmodel.Blockmodel(nx,ny,nz,xorg,yorg,zorg,dx,dy,dz)
# filling wireframe with blocks
mymodel.fillwireframe(domain)
# the fillwireframe function generates a field named __in,
# this is the proportion inside the wireframe. Here we rename __in to D1
mymodel.bmtable.rename(columns={'__in': 'D1'},inplace=True)
# creating a partial model by filtering out blocks with zero proportion inside the solid
mymodel.set_blocks(mymodel.bmtable[mymodel.bmtable['D1']> 0])
# export partial model to a vtk unstructured grid (*.vtu)
mymodel.blocks2vtkUnstructuredGrid(path='model.vtu')
#declustering parameters
parameters_declus = {
'x' : mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'xm'],
'y' : mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'ym'],
'z' : mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'zm'],
'vr' : mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'CU'],
'anisy' : 1.,
'anisz' : 0.05,
'minmax' : 0,
'ncell' : 100,
'cmin' : 100.,
'cmax' : 5000.,
'noff' : 8,
'maxcel' : -1}
# declustering
wtopt,vrop,wtmin,wtmax,error, \
xinc,yinc,zinc,rxcs,rycs,rzcs,rvrcr = pygslib.gslib.declus(parameters_declus)
#Plotting declustering optimization results
plt.plot (rxcs, rvrcr, '-o')
plt.xlabel('X cell size')
plt.ylabel('declustered mean')
plt.show()
plt.plot (rycs, rvrcr, '-o')
plt.xlabel('Y cell size')
plt.ylabel('declustered mean')
plt.show()
plt.plot (rzcs, rvrcr, '-o')
plt.xlabel('Z cell size')
plt.ylabel('declustered mean')
plt.show()
# parameters for declustering with the cell size selected
parameters_declus = {
'x' : mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'xm'],
'y' : mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'ym'],
'z' : mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'zm'],
'vr' : mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'CU'],
'anisy' : 1., # y == x
'anisz' : 0.1, # z = x/20
'minmax' : 0,
'ncell' : 1,
'cmin' : 1000.,
'cmax' : 1000.,
'noff' : 8,
'maxcel' : -1}
# declustering
wtopt,vrop,wtmin,wtmax,error, \
xinc,yinc,zinc,rxcs,rycs,rzcs,rvrcr = pygslib.gslib.declus(parameters_declus)
# Adding declustering weight to a drillhole interval table
mydholedb.table["CMP"]['declustwt'] = 1
mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'declustwt'] = wtopt
# calculating declustered mean
decl_mean = rvrcr[0]
# prepare parameters dictionary
parameters = {
'hmin' : None, #in/output rank-0 array(float,'d')
'hmax' : None, #in/output rank-0 array(float,'d')
'ncl' : 30, #int, number of bins
'iwt' : 1, #int, 1 use declustering weight
'ilog' : 1, #int, 1 use logscale
'icum' : 0, #int, 1 use cumulative
'va' : mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'CU'], # array('d') with bounds (nd)
'wt' : mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'declustwt'], # array('d') with bounds (nd), wight variable (obtained with declust?)
'figure' : None , # a bokeh figure object (Optional: new figure created if None). Set none or undefined if creating a new figure.
'title' : 'Hist Cu', # string. Figure title
'xlabel' : 'Cu (%)', # string. X axis label
'ylabel' : 'f(%)', # string. Y axis label
# visual parameter for the histogram
'color' : 'red', # string with valid CSS colour (https://www.w3schools.com/colors/colors_names.asp), or an RGB(A) hex value, or tuple of integers (r,g,b), or tuple of (r,g,b,a)
'legend': 'Non - Declustered', # string (Optional, default "NA")
'alpha' : 0.5, # float [0-1]. Transparency of the fill colour
'lwidth': 1, # float. Line width
# legend
'legendloc': 'top_left'}
# calculate histogram
stats, fig = pygslib.plothtml.histgplt(parameters)
print ('CV', stats['xcvr'])
print ('Mean', stats['xmen'])
print ('Min', stats['xmin'])
print ('Max', stats['xmax'])
# show the figure
pygslib.plothtml.show(fig)
# plot CDF
parameters_probplt = {
# gslib parameters for histogram calculation
'iwt' : 1, # input boolean (Optional: set True). Use weight variable?
'va' : mydholedb.table["CMP"].loc[(mydholedb.table['CMP']['Domain']==1) & (mydholedb.table['CMP']['CU']>0), 'CU'], # array('d') with bounds (nd)
'wt' : mydholedb.table["CMP"].loc[(mydholedb.table['CMP']['Domain']==1) & (mydholedb.table['CMP']['CU']>0), 'declustwt'], # array('d') with bounds (nd), wight variable (obtained with declust?)
# visual parameters for figure (if a new figure is created)
'figure' : None, # a bokeh figure object (Optional: new figure created if None). Set none or undefined if creating a new figure.
'title' : 'Prob blot', # string (Optional, "Histogram"). Figure title
'xlabel' : 'Cu', # string (Optional, default "Z"). X axis label
'ylabel' : 'P[Cu<c]', # string (Optional, default "f(%)"). Y axis label
'xlog' : 1, # boolean (Optional, default True). If true plot X axis in log sale.
'ylog' : 1, # boolean (Optional, default True). If true plot Y axis in log sale.
# visual parameter for the probplt
'style' : 'cross', # string with valid bokeh chart type
'color' : 'blue', # string with valid CSS colour (https://www.w3schools.com/colors/colors_names.asp), or an RGB(A) hex value, or tuple of integers (r,g,b), or tuple of (r,g,b,a) (Optional, default "navy")
'legend': 'Declustered Cu', # string (Optional, default "NA").
'alpha' : 1, # float [0-1] (Optional, default 0.5). Transparency of the fill colour
'lwidth': 0, # float (Optional, default 1). Line width
# leyend
'legendloc': 'bottom_right'} # float (Optional, default 'top_right'). Any of top_left, top_center, top_right, center_right, bottom_right, bottom_center, bottom_left, center_left or center
results, fig2 = pygslib.plothtml.probplt(parameters_probplt)
# show the plot
pygslib.plothtml.show(fig2)
results
# TODO:
# creating parameter dictionary for estimation in one block
kt3d_Parameters = {
# Input Data (Only using intervals in the mineralized domain)
# ----------
'x' : mydholedb.table["CMP"]['xm'][mydholedb.table["CMP"]['Domain']==1].values,
'y' : mydholedb.table["CMP"]['ym'][mydholedb.table["CMP"]['Domain']==1].values,
'z' : mydholedb.table["CMP"]['zm'][mydholedb.table["CMP"]['Domain']==1].values,
'vr' : mydholedb.table["CMP"]['CU'][mydholedb.table["CMP"]['Domain']==1].values,
'bhid' : mydholedb.table["CMP"]['BHIDint'][mydholedb.table["CMP"]['Domain']==1].values, # an interger BHID
# Output (Target)
# ----------
'nx' : nx,
'ny' : ny,
'nz' : nz,
'xmn' : xorg,
'ymn' : yorg,
'zmn' : zorg,
'xsiz' : dx,
'ysiz' : dy,
'zsiz' : dz,
'nxdis' : 5,
'nydis' : 5,
'nzdis' : 3,
'outx' : mymodel.bmtable['XC'][mymodel.bmtable['IJK']==1149229].values, # filter to estimate only on block with IJK 1149229
'outy' : mymodel.bmtable['YC'][mymodel.bmtable['IJK']==1149229].values,
'outz' : mymodel.bmtable['ZC'][mymodel.bmtable['IJK']==1149229].values,
# Search parameters
# ----------
'radius' : 850,
'radius1' : 850,
'radius2' : 250,
'sang1' : -28,
'sang2' : 34,
'sang3' : 7,
'ndmax' : 12,
'ndmin' : 4,
'noct' : 0,
'nbhid' : 3,
# Kriging parameters and options
# ----------
'ktype' : 1, # 1 Ordinary kriging
'idbg' : 1, # 0 no debug
# Variogram parameters
# ----------
'c0' : 0.35 * 0.109758094158, # we require not normalized variance for GCOS, fix... multiply for actual variance
'it' : [2,2],
'cc' : [0.41*0.109758094158,0.23*0.109758094158],
'aa' : [96,1117],
'aa1' : [96,1117],
'aa2' : [96,300],
'ang1' : [-28,-28],
'ang2' : [ 34, 34],
'ang3' : [ 7, 7]}
# estimating in one block
estimate, debug, summary = pygslib.gslib.kt3d(kt3d_Parameters)
#saving debug to a csv file using Pandas
pd.DataFrame({'x':debug['dbgxdat'],'y':debug['dbgydat'],'z':debug['dbgzdat'],'wt':debug['dbgwt']}).to_csv('dbg_data.csv', index=False)
#pd.DataFrame({'x':[debug['dbgxtg']],'y':[debug['dbgytg']],'z':[debug['dbgztg']],'na':[debug['na']]}).to_csv('dbg_target.csv', index=False)
# save the search ellipse to a VTK file
pygslib.vtktools.SavePolydata(debug['ellipsoid'], 'search_ellipsoid')
# calculate block variance, wee need it for global change of support validation
# you can also calculate this with the function pygslib.gslib.block_covariance(...)
cbb=debug['cbb']
# update parameter file
kt3d_Parameters['idbg'] = 0 # set the debug of
kt3d_Parameters['outx'] = mymodel.bmtable['XC'].values # use all the blocks
kt3d_Parameters['outy'] = mymodel.bmtable['YC'].values
kt3d_Parameters['outz'] = mymodel.bmtable['ZC'].values
# estimating in all blocks
estimate, debug, summary = pygslib.gslib.kt3d(kt3d_Parameters)
# adding the estimate into the model
mymodel.bmtable['CU_OK'] = estimate['outest']
mymodel.bmtable['CU_ID2'] = estimate['outidpower']
mymodel.bmtable['CU_NN'] = estimate['outnn']
mymodel.bmtable['CU_Lagrange'] = estimate['outlagrange']
mymodel.bmtable['CU_KVar']= estimate['outkvar']
# exporting block model to VTK (unstructured grid)
mymodel.blocks2vtkUnstructuredGrid(path='model.vtu')
# exporting to csv using Pandas
mymodel.bmtable['Domain']= 1
mymodel.bmtable[mymodel.bmtable['CU_OK'].notnull()].to_csv('model.csv', index = False)
print ("Mean in model OK :", mymodel.bmtable['CU_OK'].mean())
print ("Mean in model ID2 :", mymodel.bmtable['CU_ID2'].mean())
print ("Mean in model NN :", mymodel.bmtable['CU_NN'].mean())
print ("Mean in data :", mydholedb.table["CMP"]['CU'][mydholedb.table["CMP"]['Domain']==1].mean())
print ("Declustered mean:", decl_mean)
mymodel.bmtable.groupby('XC')[['CU_OK','CU_ID2','CU_NN']].mean().plot()
mymodel.bmtable.groupby('YC')[['CU_OK','CU_ID2','CU_NN']].mean().plot()
mymodel.bmtable.groupby('ZC')[['CU_OK','CU_ID2','CU_NN']].mean().plot()
# Fit anamorphosis by changing, zmax, zmin, and extrapolation function
PCI, H, raw, zana, gauss, z, P, raw_var, PCI_var, fig1 = pygslib.nonlinear.anamor(
z = mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'CU'],
w = mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'declustwt'],
zmin = mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'CU'].min(),
zmax = mydholedb.table["CMP"].loc[mydholedb.table['CMP']['Domain']==1, 'CU'].max(),
zpmin = None, zpmax = None,
ymin=-5, ymax=5,
ndisc = 5000,
ltail=1, utail=4, ltpar=1, utpar=1.5, K=40)
# calculate the support correction coefficient r
r = pygslib.nonlinear.get_r(Var_Zv = cbb, PCI = PCI)
print ('cbb :', cbb)
print ('r :', r)
# fit block anamorphosis
ZV, PV, fig2 = pygslib.nonlinear.anamor_blk( PCI, H, r = r, gauss = gauss, Z = z,
ltail=1, utail=1, ltpar=1, utpar=1,
raw=raw, zana=zana)
cutoff = np.arange(0,0.6, 0.01)
tt = []
gg = []
label = []
# calculate GTC from gaussian in block support
t,ga,gb = pygslib.nonlinear.gtcurve (cutoff = cutoff, z=ZV, p=PV, varred = 1, ivtyp = 0, zmin = 0, zmax = None,
ltail = 1, ltpar = 1, middle = 1, mpar = 1, utail = 1, utpar = 1,maxdis = 1000)
tt.append(t)
gg.append(ga)
label.append('DGM with block support')
fig = pygslib.nonlinear.plotgt(cutoff = cutoff, t = tt, g = gg, label = label)
# to compare global resources with the one estimated we calculate the CDF of the blocks
# cdf of kriging estimate
parameters_probplt = {
'iwt' : 0, #int, 1 use declustering weight
'va' : mymodel.bmtable['CU_OK'][mymodel.bmtable['CU_OK'].notnull()].values, # array('d') with bounds (nd)
'wt' : np.ones(mymodel.bmtable['CU_OK'][mymodel.bmtable['CU_OK'].notnull()].shape[0])} # array('d') with bounds (nd), wight variable (obtained with declust?)
binval_ok,cl_ok,xpt025,xlqt,xmed,xuqt,xpt975,xmin,xmax, \
xcvr,xmen,xvar,error = pygslib.gslib.__plot.probplt(**parameters_probplt)
# cdf of id2
parameters_probplt = {
'iwt' : 0, #int, 1 use declustering weight
'va' : mymodel.bmtable['CU_ID2'][mymodel.bmtable['CU_OK'].notnull()].values, # array('d') with bounds (nd)
'wt' : np.ones(mymodel.bmtable['CU_OK'][mymodel.bmtable['CU_OK'].notnull()].shape[0])} # array('d') with bounds (nd), wight variable (obtained with declust?)
binval_id2,cl_id2,xpt025,xlqt,xmed,xuqt,xpt975,xmin,xmax, \
xcvr,xmen,xvar,error = pygslib.gslib.__plot.probplt(**parameters_probplt)
# calculate GTC ok
t,ga,gb = pygslib.nonlinear.gtcurve (cutoff = cutoff, z=cl_ok, p=binval_ok, varred = 1, ivtyp = 2, zmin = 0, zmax = None,
ltail = 1, ltpar = 1, middle = 1, mpar = 1, utail = 1, utpar = 1,maxdis = 1000)
tt.append(t)
gg.append(ga)
label.append('Ordinary Kriging')
# calculate GTC in block support
t,ga,gb = pygslib.nonlinear.gtcurve (cutoff = cutoff, z=cl_id2, p=binval_id2, varred = 1, ivtyp = 2, zmin = 0, zmax = None,
ltail = 1, ltpar = 1, middle = 1, mpar = 1, utail = 1, utpar = 1,maxdis = 1000)
tt.append(t)
gg.append(ga)
label.append('Inverse of the Distance 2)')
fig = pygslib.nonlinear.plotgt(cutoff = cutoff, t = tt, g = gg, label = label)
# we can plot diferences (relative error in grade)
plt.plot (cutoff, gg[0]-gg[1], label = 'DGM - OK')
plt.plot (cutoff, gg[0]-gg[2], label = 'DGM - ID2')
plt.plot (cutoff, np.zeros(cutoff.shape[0]),'--k', label = 'Zero error')
plt.title('relative error in grade')
plt.legend()
# we can plot diferences (relative error in tonnage)
plt.plot (cutoff, tt[0]-tt[1], label = 'DGM - OK')
plt.plot (cutoff, tt[0]-tt[2], label = 'DGM - ID2')
plt.plot (cutoff, np.zeros(cutoff.shape[0]),'--k', label = 'Zero error')
plt.legend()
plt.title('relative error in tonnage')
# To get tonnes right just multiply per total tonnes
# calcullate tottal tonnage (million tonnes)
ttonnes = mymodel.bmtable['D1'][mymodel.bmtable['CU_OK'].notnull()].sum()*100*100*30* 0.0283168 * 2.7 /1000000
# cubic foot to m -> 0.0283168, density 2.7
ttt = tt[0]*ttonnes
#plot
plt.plot(cutoff, ttt)
plt.ylabel('Mt')
plt.xlabel('Cutoff')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Need some help? Just type
Step2: Pandas provides a large set of functions to modify your data. Let's remove some columns and make non-assayed intervals equal to zero.
Step3: Creating a drillhole object
Step4: The output above is a warning message. This one is a complain because the field LENGTH was not included in the collar table. You will see similar warnings any time PyGSLIB detects a potential issue in your data.
Step5: The warning above is serious. There are drillholes with only one survey record and to desurvey we need at least two records, the first one may be at the collar of the drillhole.
Step6: Note
Step7: Compositing
Step8: Most samples (the mode) are 10 ft length. This value or any of its multiples are good options for composite length, they minimize the oversplitting of sample intervals.
Step9: Note that some especial fields were created, those fields have prefix _. _acum is the grade accumulated in the composite interval (sum of grades from sample intervals contributing to the composite interval) and _len is the actual length of the composite.
Step10: Createing a BHID of type integer
Step11: Rendering drillhole intervals in Paraview and exporting drillhole data
Step12: This is how it looks in Paraview
Step13: Tagging samples with domain code
Step14: Only Stereo Lithography (*.STL) and XML VTK Polydata (VTP) file formats are implemented. If your data is in a different format, ej. DXF, you can use a file format converter, my favorite is meshconv
Step15: A section of the wireframe and the drillholes may look as follows
Step16: Note that fillwireframe created or overwrited mymodel.bmtable. The blocks outside the wireframe where filtered out and the final output is a partial model with block inside or touching the wireframe domain.
Step17: Now we can calculate some declustered stats and plot declustered histogras
Step18: Variography
Step19: Estimating Cu grade in one block
Step20: The variogram was calculated and modelled in a differnt software
Step21: The results may look like this in Paraview.
Step22: Estimating in all blocks
Step23: Validating the results
Step24: Create swath plots
Step25: Global change of support
Step26: Note that r is very low...
|
14,369 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, [None, real_dim])
inputs_z = tf.placeholder(tf.float32, [None, z_dim])
return inputs_real, inputs_z
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out:
'''
with tf.variable_scope('generator', reuse = reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(inputs = z,
units = n_units,
activation = None)
# Leaky ReLU
h1 = tf.maximum(x = alpha * h1,
y = h1)
# Logits and tanh output
logits = tf.layers.dense(inputs = h1,
units = out_dim,
activation = None)
out = tf.tanh(logits)
return out
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse = reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(inputs = x,
units = n_units,
activation = None)
# Leaky ReLU
h1 = tf.maximum(x = h1,
y = h1 * alpha)
logits = tf.layers.dense(inputs = h1,
units = 1,
activation = None)
out = tf.sigmoid(logits)
return out, logits
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, input_size, n_units=g_hidden_size, reuse=False, alpha=alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, reuse=False, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, n_units=d_hidden_size, reuse=True, alpha=alpha)
# Calculate losses
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = d_logits_real,
labels = tf.ones_like(d_logits_real)*(1-smooth)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = d_logits_fake,
labels = tf.ones_like(d_logits_fake)*0))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = d_logits_fake,
labels = tf.ones_like(d_logits_fake)*(1-smooth)))
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Inputs
Step2: Generator network
Step3: Discriminator
Step4: Hyperparameters
Step5: Build network
Step6: Discriminator and Generator Losses
Step7: Optimizers
Step8: Training
Step9: Training loss
Step10: Generator samples from training
Step11: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
Step12: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
Step13: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
|
14,370 | <ASSISTANT_TASK:>
Python Code:
# Model category name used throughout the subsequent analysis
model_cat_id = "03"
# Which features from the dataset should be loaded:
# ['all', 'actual', 'entsoe', 'weather_t', 'weather_i', 'holiday', 'weekday', 'hour', 'month']
features = ['actual', 'weather']
# LSTM Layer configuration
# ========================
# Stateful True or false
layer_conf = [ True, True, True ]
# Number of neurons per layer
cells = [[ 5, 10, 20, 30, 50, 75, 100, 125, 150 ], [0, 10, 20, 50], [0, 10, 15, 20]]
# Regularization per layer
dropout = [0, 0.1, 0.2]
# Size of how many samples are used for one forward/backward pass
batch_size = [8]
# In a sense this is the output neuron dimension, or how many timesteps the neuron should output. Currently not implemented, defaults to 1.
timesteps = [1]
import os
import sys
import math
import itertools
import datetime as dt
import pytz
import time as t
import numpy as np
import pandas as pd
from pandas import read_csv
from pandas import datetime
from numpy import newaxis
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.stats as stats
from statsmodels.tsa import stattools
from tabulate import tabulate
import math
import keras as keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout, LSTM
from keras.callbacks import TensorBoard
from keras.utils import np_utils
from keras.models import load_model
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
from IPython.display import HTML
from IPython.display import display
%matplotlib notebook
mpl.rcParams['figure.figsize'] = (9,5)
# Import custom module functions
module_path = os.path.abspath(os.path.join('../'))
if module_path not in sys.path:
sys.path.append(module_path)
from lstm_load_forecasting import data, lstm
# Directory with dataset
path = os.path.join(os.path.abspath(''), '../data/fulldataset.csv')
# Splitdate for train and test data. As the TBATS and ARIMA benchmark needs 2 full cycle of all seasonality, needs to be after jan 01.
loc_tz = pytz.timezone('Europe/Zurich')
split_date = loc_tz.localize(dt.datetime(2017,2,1,0,0,0,0))
# Validation split percentage
validation_split = 0.2
# How many epochs in total
epochs = 30
# Set verbosity level. 0 for only per model, 1 for progress bar...
verbose = 0
# Dataframe containing the relevant data from training of all models
results = pd.DataFrame(columns=['model_name', 'config', 'dropout',
'train_loss', 'train_rmse', 'train_mae', 'train_mape',
'valid_loss', 'valid_rmse', 'valid_mae', 'valid_mape',
'test_rmse', 'test_mae', 'test_mape',
'epochs', 'batch_train', 'input_shape',
'total_time', 'time_step', 'splits'
])
# Early stopping parameters
early_stopping = True
min_delta = 0.006
patience = 2
# Generate output folders and files
res_dir = '../results/notebook_' + model_cat_id + '/'
plot_dir = '../plots/notebook_' + model_cat_id + '/'
model_dir = '../models/notebook_' + model_cat_id + '/'
os.makedirs(res_dir, exist_ok=True)
os.makedirs(model_dir, exist_ok=True)
output_table = res_dir + model_cat_id + '_results_' + t.strftime("%Y%m%d") + '.csv'
test_output_table = res_dir + model_cat_id + '_test_results' + t.strftime("%Y%m%d") + '.csv'
# Generate model combinations
models = []
models = lstm.generate_combinations(
model_name=model_cat_id + '_', layer_conf=layer_conf, cells=cells, dropout=dropout,
batch_size=batch_size, timesteps=[1])
# Load data and prepare for standardization
df = data.load_dataset(path=path, modules=features)
df_scaled = df.copy()
df_scaled = df_scaled.dropna()
# Get all float type columns and standardize them
floats = [key for key in dict(df_scaled.dtypes) if dict(df_scaled.dtypes)[key] in ['float64']]
scaler = StandardScaler()
scaled_columns = scaler.fit_transform(df_scaled[floats])
df_scaled[floats] = scaled_columns
# Split in train and test dataset
df_train = df_scaled.loc[(df_scaled.index < split_date )].copy()
df_test = df_scaled.loc[df_scaled.index >= split_date].copy()
# Split in features and label data
y_train = df_train['actual'].copy()
X_train = df_train.drop('actual', 1).copy()
y_test = df_test['actual'].copy()
X_test = df_test.drop('actual', 1).copy()
start_time = t.time()
for idx, m in enumerate(models):
stopper = t.time()
print('========================= Model {}/{} ========================='.format(idx+1, len(models)))
print(tabulate([['Starting with model', m['name']], ['Starting time', datetime.fromtimestamp(stopper)]],
tablefmt="jira", numalign="right", floatfmt=".3f"))
try:
# Creating the Keras Model
model = lstm.create_model(layers=m['layers'], sample_size=X_train.shape[0], batch_size=m['batch_size'],
timesteps=m['timesteps'], features=X_train.shape[1])
# Training...
history = lstm.train_model(model=model, mode='fit', y=y_train, X=X_train,
batch_size=m['batch_size'], timesteps=m['timesteps'], epochs=epochs,
rearrange=False, validation_split=validation_split, verbose=verbose,
early_stopping=early_stopping, min_delta=min_delta, patience=patience)
# Write results
min_loss = np.min(history.history['val_loss'])
min_idx = np.argmin(history.history['val_loss'])
min_epoch = min_idx + 1
if verbose > 0:
print('______________________________________________________________________')
print(tabulate([['Minimum validation loss at epoch', min_epoch, 'Time: {}'.format(t.time()-stopper)],
['Training loss & MAE', history.history['loss'][min_idx], history.history['mean_absolute_error'][min_idx] ],
['Validation loss & mae', history.history['val_loss'][min_idx], history.history['val_mean_absolute_error'][min_idx] ],
], tablefmt="jira", numalign="right", floatfmt=".3f"))
print('______________________________________________________________________')
result = [{'model_name': m['name'], 'config': m, 'train_loss': history.history['loss'][min_idx], 'train_rmse': 0,
'train_mae': history.history['mean_absolute_error'][min_idx], 'train_mape': 0,
'valid_loss': history.history['val_loss'][min_idx], 'valid_rmse': 0,
'valid_mae': history.history['val_mean_absolute_error'][min_idx],'valid_mape': 0,
'test_rmse': 0, 'test_mae': 0, 'test_mape': 0, 'epochs': '{}/{}'.format(min_epoch, epochs), 'batch_train':m['batch_size'],
'input_shape':(X_train.shape[0], timesteps, X_train.shape[1]), 'total_time':t.time()-stopper,
'time_step':0, 'splits':str(split_date), 'dropout': m['layers'][0]['dropout']
}]
results = results.append(result, ignore_index=True)
# Saving the model and weights
model.save(model_dir + m['name'] + '.h5')
# Write results to csv
results.to_csv(output_table, sep=';')
K.clear_session()
import tensorflow as tf
tf.reset_default_graph()
# Shouldn't catch all errors, but for now...
except BaseException as e:
print('=============== ERROR {}/{} ============='.format(idx+1, len(models)))
print(tabulate([['Model:', m['name']], ['Config:', m]], tablefmt="jira", numalign="right", floatfmt=".3f"))
print('Error: {}'.format(e))
result = [{'model_name': m['name'], 'config': m, 'train_loss': str(e)}]
results = results.append(result, ignore_index=True)
results.to_csv(output_table,sep=';')
continue
# Number of the selected top models
selection = 5
# If run in the same instance not necessary. If run on the same day, then just use output_table
results_fn = res_dir + model_cat_id + '_results_' + '20170616' + '.csv'
results_csv = pd.read_csv(results_fn, delimiter=';', encoding='latin1')
top_models = results_csv.nsmallest(selection, 'valid_mae')
# Init test results table
test_results = pd.DataFrame(columns=['Model name', 'Mean absolute error', 'Mean squared error'])
# Init empty predictions
predictions = {}
# Loop through models
for index, row in top_models.iterrows():
filename = model_dir + row['model_name'] + '.h5'
model = load_model(filename)
batch_size = int(row['batch_train'])
# Calculate scores
loss, mae = lstm.evaluate_model(model=model, X=X_test, y=y_test, batch_size=batch_size, timesteps=1, verbose=verbose)
# Store results
result = [{'Model name': row['model_name'],
'Mean squared error': loss, 'Mean absolute error': mae
}]
test_results = test_results.append(result, ignore_index=True)
# Generate predictions
model.reset_states()
model_predictions = lstm.get_predictions(model=model, X=X_test, batch_size=batch_size, timesteps=timesteps[0], verbose=verbose)
# Save predictions
predictions[row['model_name']] = model_predictions
K.clear_session()
import tensorflow as tf
tf.reset_default_graph()
test_results = test_results.sort_values('Mean absolute error', ascending=True)
test_results = test_results.set_index(['Model name'])
if not os.path.isfile(test_output_table):
test_results.to_csv(test_output_table, sep=';')
else: # else it exists so append without writing the header
test_results.to_csv(test_output_table,mode = 'a',header=False, sep=';')
print('Test dataset performance of the best {} (out of {} tested models):'.format(min(selection, len(models)), len(models)))
print(tabulate(test_results, headers='keys', tablefmt="grid", numalign="right", floatfmt=".3f"))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Module imports
Step2: Overall configuration
Step3: Preparation and model generation
Step4: Loading the data
Step5: Running through all generated models
Step6: Model selection based on the validation MAE
Step7: Evaluate top 5 models
|
14,371 | <ASSISTANT_TASK:>
Python Code:
# Import Numpy, TensorFlow, TFLearn, and MNIST data
import numpy as np
import tensorflow as tf
import tflearn
import tflearn.datasets.mnist as mnist
# Retrieve the training and test data
trainX, trainY, testX, testY = mnist.load_data(one_hot=True)
# Visualizing the data
import matplotlib.pyplot as plt
%matplotlib inline
# Function for displaying a training image by it's index in the MNIST set
def show_digit(index):
label = trainY[index].argmax(axis=0)
# Reshape 784 array into 28x28 image
image = trainX[index].reshape([28,28])
plt.title('Training data, index: %d, Label: %d' % (index, label))
plt.imshow(image, cmap='gray_r')
plt.show()
# Display the first (index 0) training image
show_digit(0)
# Define the neural network
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
#### Your code ####
# Include the input layer, hidden layer(s), and set how you wanyt to train the model
net = tflearn.input_data([None, 784])
net = tflearn.fully_connected(net, 100, activation="ReLU")
net = tflearn.fully_connected(net, 50, activation="ReLU")
net = tflearn.fully_connected(net, 25, activation="ReLU")
net = tflearn.fully_connected(net, 10, activation="softmax")
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.01, loss="categorical_crossentropy")
# This model assumes that your network is named "net"
model = tflearn.DNN(net)
return model
# Build the model
model = build_model()
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=100, n_epoch=100)
# Compare the labels that our model predicts with the actual labels
# Find the indices of the most confident prediction for each item. That tells us the predicted digit for that sample.
predictions = np.array(model.predict(testX)).argmax(axis=1)
# Calculate the accuracy, which is the percentage of times the predicated labels matched the actual labels
actual = testY.argmax(axis=1)
test_accuracy = np.mean(predictions == actual, axis=0)
# Print out the result
print("Test accuracy: ", test_accuracy)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Retrieving training and test data
Step2: Visualize the training data
Step3: Building the network
Step4: Training the network
Step5: Testing
|
14,372 | <ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'noaa-gfdl', 'sandbox-3', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
14,373 | <ASSISTANT_TASK:>
Python Code:
for i in range(5):
print (i)
for character in "Hi There!":
print (character)
dico={"a":1,"b":2,"c":3}
for k in dico.keys():
print (k)
# Si une seule variable itérative est spécifiée, celle-ci est un-tuple.
for kv in dico.items():
print (kv)
# Si deux variables itératives sont spécifiées, la première est la clé, la seconde la valeur correspondante
for k,v in dico.items():
print (k)
print (v)
# Si la clé ou la valeur du dictionnaire n'est pas nécessaire, elle n'est pas stockée dans une variable en utilisant "_"
for k,_ in dico.items():
print (k)
import numpy as np
a = np.arange(6).reshape(2,3)
for x in np.nditer(a):
print (x)
for x in a:
print (x)
for x in a.T:
print (x)
mina = a.max() # Retourne la valeur maximum de l'array a
minxa = a.max(axis=1) # Retourne la valeur maximum de chaque ligne de l'array a
minya = a.max(axis=0) # Retourne la valeur maximum de chaque colonne de l'array a
print(mina, minxa, minya)
def scale(x):
xcr = (x-np.mean(x))/np.std(x)
return xcr
X = np.random.randint(5, size=(3, 3))
Xcr = np.apply_along_axis(scale,1,X)
print(X, Xcr)
import pandas as pd
df = pd.DataFrame([["A",4],["B",5],["C",6]],index=[1,2,3],columns=["Letter","Number"])
for i,r in df.iterrows():
print(i,r)
for ir in df.itertuples():
print(ir)
import itertools
#zip : Concatenne les éléments de plusieurs objets itérables
zip_list = []
for k in zip('ABCD',[1,2,3,4],"wxyz"):
zip_list.append(k)
print("zip")
print(zip_list)
#permutation : retourne tous les arrangements possibles de liste de longueur n.
permutation_list = []
for k in itertools.permutations("ABCD",2):
permutation_list.append(k)
print("permutations")
print(permutation_list)
# Version1
A1=[]
for k in range(10):
A1.append(k*k)
# Version 2
A2 = [k*k for k in range(10)]
print(A1,A2)
a = 17
s = "hi"
a += 3 # Equivalent to a = a + 3
a -= 3 # Equivalent to a = a - 3
a *= 3 # Equivalent to a = a * 3
a /= 3 # Equivalent to a = a / 3
a %= 3 # Equivalent to a = a % 3
s += " there" # Equivalent to s = s + “ there"
# Incrémentation de `count` jusqu'à ce qu'elle dépasse la valeur 100.000
count = 1
while count <= 100000:
count += 1
print (count)
while True:
number = int(input("Enter the numeric grade: "))
if number >= 0 and number <= 100:
break
else:
print ("Error: grade must be between 100 and 0" )
print (number)
number=1
if number==1:
print (True)
else:
print (False)
number=13
if number<5:
print("A")
elif number <10:
print("B")
elif number <20:
print("C")
else:
print("D")
number=10
"A" if number >10 else "B"
#Sélectionne uniquement les valeur paire
l1 = [k for k in range(10) if k%2==0]
l1
#Retourne "even" si l'élement k est pair, "odd" sinon.
l2 = ["even" if k%2==0 else "odd" for k in range(10)]
l2
import random
numbers = [random.randrange(-10,10) for k in range(10)]
abs_numbers = map(abs,numbers) # Applique la fonction "valeur absolue" à tout les élements de la liste
print(numbers,list(abs_numbers))
def first_capital_letters(txt):
if txt[0].islower():
txt = txt[0].upper()+txt[1:]
return txt
name=["Jason","bryan","hercule","Karim"]
list(map(first_capital_letters,name))
def is_odd(n):
return n % 2 == 1
list(filter(is_odd,range(20)))
import functools
def sum_and_print(x,y):
print("Input: ", x,y)
print("Output: ", y)
return x+y
r10 = range(10)
res =functools.reduce(sum_and_print, r10)
print(res)
def somme(x,y):
return x+y
r10 = range(10)
res =functools.reduce(somme, r10,1000)
print(res)
name=["Jason","bryan","hercule","Karim"]
list(map(lambda x : x[0].upper()+x[1:] if x[0].islower() else x,name))
list(filter(lambda x : x % 2 == 1 ,range(10)))
r10 = range(10)
res =functools.reduce(lambda x,y:x+y, r10,1000)
res
class Eleve:
Classe définissant un élève caractérisé par:
- son nom
- son prénom
- ses notes
def __init__(self, nom, prenom): #constructeur de la classe
Construit un élève avec les nom et prenom passé en paramètre et une liste de notes vide.
self._nom = nom
self._prenom=prenom
self._notes = []
def getNom(self):
retourne le nom de l'élève
return self._nom
def getNotes(self):
retourne les notes de l'élève
return self._notes
def getNoteMax(self):
retourne la note max de l'élève
return max(self._notes)
def getMean(self):
retourne la moyenne de l'élève
return np.mean(self._notes)
def getNbNote(self):
retourne le nombre de note de l'élève
return len(self._notes)
def addNote(self, note):
ajoute la note 'note' à la liste de note de l'élève
self._notes.append(note)
eleve1 = Eleve("Jean","Bon")
eleve1._nom
eleve1.getNom()
print(eleve1.getNotes())
eleve1.addNote(15)
print(eleve1.getNotes())
for k in range(10):
eleve1.addNote(np.random.randint(20))
print (eleve1.getNbNote())
print (eleve1.getNoteMax())
print (eleve1.getMean())
class EleveSpecial(Eleve):
def __init__(self, nom, prenom, optionName):
Eleve.__init__(self, nom, prenom)
self._optionName = optionName
self._optionNotes = []
def getNotesOption(self):
retourne les notes de l'élève
return self._optionNotes
def addNoteOption(self, note):
ajoute la note 'note' à la liste de note de l'élève
self._optionNotes.append(note)
eleve2 = EleveSpecial("Sam","Stress","latin")
eleve2.addNote(14)
print (eleve2.getNotes())
eleve2.addNoteOption(12)
print (eleve2.getNotesOption())
from sklearn.linear_model import LinearRegression
lr = LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=1)
print (lr.fit_intercept, lr.normalize, lr.copy_X, lr.n_jobs)
X_train=[[0, 0], [1, 1], [2, 2]]
Y_train = [0, 1, 2]
lr.fit (X_train, Y_train)
lr.coef_
X_test = [[1.5,1.5],[2,4],[7.3,7.1]]
Y_test = [1.5,2.4,7]
pred = lr.predict(X_test)
s = lr.score(X_test,Y_test)
print(pred,s)
def unpacking_list_and_print(a, b):
print (a)
print (b)
listarg = [3,4]
unpacking_list_and_print(*listarg)
def unpacking_dict_and_print(k1=0, k2=0):
print (k1)
print (k2)
dictarg = {'k1':4, 'k2':8}
unpacking_dict_and_print(**dictarg)
def packing_and_print_args(required_arg, *args):
print ("arg Nécessaire:", required_arg)
for i, arg in enumerate(args):
print ("args %d:" %i, arg)
packing_and_print_args(1, "two", 3)
packing_and_print_args(1, "two", [1,2,3],{"a":1,"b":2,"c":3})
def packing_and_print_kwargs(def_kwarg=2, **kwargs):
print ("kwarg défini:", def_kwarg)
for i,(k,v) in enumerate(kwargs.items()):
print ("kwarg %d:" %i ,k , v)
packing_and_print_kwargs(def_kwarg=1, sup_arg1="two", sup_arg2=3)
packing_and_print_kwargs(sup_arg1="two", sup_arg2=3, sup_arg3=[1,2,3])
def packing_and_print_args_and_kwargs(required_arg ,def_kwarg=2, *args, **kwargs):
print ("arg Nécessaire:", required_arg)
for i, arg in enumerate(args):
print ("args %d:" %i, arg)
print ("kwarg défini:", def_kwarg)
for i,(k,v) in enumerate(kwargs.items()):
print ("kwarg %d:" %i ,k , v )
packing_and_print_args_and_kwargs(1, "two", [1,2,3] ,sup_arg1="two", sup_arg2=3 )
class Objet(object):
def __init__(self, attribut=None, *args, **kwargs):
print (attribut)
class Objet2Point0(Objet):
def __init__(self, *args, **kwargs):
super(Objet, self).__init__(*args, **kwargs)
class Objet3Point0(Objet2Point0):
def __init__(self,attribut2=None, *args, **kwargs):
super(Objet2Point0, self).__init__(*args, **kwargs)
print (attribut2)
my_data = {'attribut': 'Argument1', 'attribut2': 'Argument2'}
Objet3Point0(**my_data)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: L'appel à la fonction range(n) permet d'itérer sur les entiers de 0 à n-1 mais il est possible de spécifier des intervalles des valeurs de chaque pas d'itération
Step2: Dictionnaires
Step3: Les fonctions .items() et .iteritems() permettent de parcourir les couples (clés, objets) des dictionnaires.
Step4: Numpy Array
Step5: Appliquer directement la boucle for sur le tableau Numpy permet de parcourir les différentes lignes de ce tableau.
Step6: Pour parcourir les colonnes, la métode la plus simple consiste à parcourir les lignes de la matrice transposée.
Step7: Il est cependant très important de comprendre que les arrays numpy ont été conçus pour appliquer des fonctions directement sur l'ensemble du tableau en évitant des boucles. Ainsi nombre de fonctions natives permettent de résoudre de nombreux problèmes sans avoir à parcourir l'ensemble de la matrice
Step8: Pour appliquer des fonctions plus complexes sur chaque ligne et/ou colonne de l'array, il existe également des fonctions natives de la librairie numpy permettant d'appliquer ces fonctions efficacement comme par exemple apply_along_axis, similaire à apply de R.
Step9: Pandas DataFrame
Step10: Librarie Itertools
Step11: One-Line Statement
Step12: Affectation des indices
Step13: 1.2 Structure itérative while
Step14: L'instruction break permet de sortir de la boucle while même si sa condition est respectée.
Step15: 1.3 Structures conditionnelles if - else
Step16: Lorsque plus de deux alternative sont possibles, utiliser l'instruction elif pour énumérer les différentes possibilités.
Step17: One-Line Statement
Step18: One line with for Loop
Step19: 2 Programmation fonctionnelle
Step20: 2.1 filter
Step21: 2.2 reduce
Step22: Par défaut, la fonction passée en paramètre de la fonction reduce effectue sa première opérations sur les deux premiers éléments de la listes passés en paramètre. Mais il est possible de spécifier une valeur initiale en troisième paramètre. La première opération sera alors effectuée sur cette valeur initiale et le premier élément de la liste.
Step23: 2.4 lambda
Step32: 3 Classes et objets
Step33: Toutes les classes sont composées d'un constructeur qui a pour nom __init__. Il s'agit d'une méthode spéciale d'instance que Python reconnaît et sait utiliser dans certains contextes. La fonction __init__ est automatiquement appelée à la création d'une nouvelle classe et prend en paramètre self, qui représente l'objet instantié, et les différents attributs nécessaires à sa création.
Step34: Les attributs de la classe sont directement accessibles de la manière suivante
Step35: Les méthodes permettant de modifier les attributs d'un objet sont appelées des mutators. La fonction addNote, qui permet d'ajouter une note à la liste de notes de l'élève est un mutator.
Step38: 3.2 Héritage
Step39: 3.3 Classes de Scikit-learn
Step40: La classe LinearRegression possède également des attributs qui sont mis à jour à l'aide des méthodes
Step41: La classe LinearRegression possède aussi d'autres méthodes qui utilisent les attributs de la classe. Par exemple
Step42: 4 Packing et Unpacking
Step43: 4.2 Packing
Step44: L'argument **kwargs permet à la fonction de recevoir un nombre supplémentaire inconnu d'arguments avec mot-clef.
Step45: Les arguments *args et **kwargs peuvent être combinés dans une autre fonctions.
Step46: Ces deux opérateurs sont très utiles pour gérer des classes liées par des héritages. Les arguments *args **kwargs permettent alors de gérer la tranmission de cet héritage sans avoir à redéfinir les arguments à chaque étape.
|
14,374 | <ASSISTANT_TASK:>
Python Code:
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 3
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.beamformer import make_lcmv, apply_lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Pick the channels of interest
raw.pick_channels([raw.ch_names[pick] for pick in picks])
# Re-normalize our empty-room projectors, so they are fine after subselection
raw.info.normalize_proj()
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd)
forward = mne.convert_forward_solution(forward, surf_ori=True)
# Compute regularized noise and data covariances
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk',
rank=None)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk', rank=None)
evoked.plot(time_unit='s')
pick_oris = [None, 'normal', 'max-power', None]
descriptions = ['Free', 'Normal', 'Max-power', 'Fixed']
fig, ax = plt.subplots(1)
max_voxs = list()
colors = list()
for pick_ori, desc in zip(pick_oris, descriptions):
# compute unit-noise-gain beamformer with whitening of the leadfield and
# data (enabled by passing a noise covariance matrix)
if desc == 'Fixed':
use_forward = mne.convert_forward_solution(forward, force_fixed=True)
else:
use_forward = forward
filters = make_lcmv(evoked.info, use_forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori=pick_ori,
weight_norm='unit-noise-gain', rank=None)
print(filters)
# apply this spatial filter to source-reconstruct the evoked data
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
# View activation time-series in maximum voxel at 100 ms:
time_idx = stc.time_as_index(0.1)
max_idx = np.argmax(np.abs(stc.data[:, time_idx]))
# we know these are all left hemi, so we can just use vertices[0]
max_voxs.append(stc.vertices[0][max_idx])
h = ax.plot(stc.times, stc.data[max_idx, :],
label='%s, voxel: %i' % (desc, max_idx))[0]
colors.append(h.get_color())
if pick_ori == 'max-power':
max_stc = stc
ax.axhline(0, color='k')
ax.set(xlabel='Time (ms)', ylabel='LCMV value',
title='LCMV in maximum voxel')
ax.legend(loc='lower right')
mne.viz.utils.plt_show()
# Plot last stc in the brain in 3D with PySurfer if available
brain = max_stc.plot(hemi='lh', views='lat', subjects_dir=subjects_dir,
initial_time=0.1, time_unit='s', smoothing_steps=5)
for color, vertex in zip(colors, max_voxs):
brain.add_foci([vertex], coords_as_verts=True, scale_factor=0.5,
hemi='lh', color=color)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get epochs
Step2: Run beamformers and look at maximum outputs
Step3: We can also look at the spatial distribution
|
14,375 | <ASSISTANT_TASK:>
Python Code:
# This to clear all variable values
%reset
# Import the required modules
import pandas as pd
import numpy as np
#import scipy as sp
# simple function to read in the user data file.
# the argument parse_dates takes in a list of colums, which are to be parsed as date format
user_data_raw = pd.read_csv("janacare_user-engagement_Aug2014-Apr2016.csv", parse_dates = [-3,-2,-1])
# data metrics
user_data_raw.shape # Rows , colums
# data metrics
user_data_raw.dtypes # data type of colums
user_data_to_clean = user_data_raw.rename(columns = {'watching_videos (binary - 1 for yes, blank/0 for no)':'watching_videos'})
# Some basic statistical information on the data
user_data_to_clean.describe()
# Lets check the health of the data set
user_data_to_clean.info()
# Lets first delete the last column
user_data_to_clean_del_last_col = user_data_to_clean.drop("age_on_platform", 1)
# Check if colums has been deleted. Number of column changed from 19 to 18
user_data_to_clean_del_last_col.shape
# Copy data frame 'user_data_del_last_col' into a new one
user_data_to_clean = user_data_to_clean_del_last_col
# Run a loop through the data frame and check each row for this anamoly, if found swap
for index, row in user_data_to_clean.iterrows():
if row.first_login > row.last_activity:
temp_date_var = row.first_login
user_data_to_clean.set_value(index, 'first_login', row.last_activity)
user_data_to_clean.set_value(index, 'last_activity', temp_date_var)
#print "\tSw\t" + "first\t" + row.first_login.isoformat() + "\tlast\t" + row.last_activity.isoformat()
# Create new column 'age_on_platform' which has the corresponding value in date type format
user_data_to_clean["age_on_platform"] = user_data_to_clean["last_activity"] - user_data_to_clean["first_login"]
# Check the result in first few rows
user_data_to_clean["age_on_platform"].head(5)
# Lets check the health of the data set
user_data_to_clean.info()
# Lets remove all columns from the data set that do not have to be imputed -
user_data_to_impute = user_data_to_clean.drop(["user_id", "watching_videos", "num_of_days_steps_tracked", "num_of_days_weight_tracked", "insulin_a1c_count", "weight", "height", "bmi", "age", "gender", "has_diabetes", "first_login", "last_activity", "age_on_platform", "hemoglobin_count", "cholesterol_count"], 1 )
user_data_to_impute.info()
# Import Imputation method KNN
##from fancyimpute import KNN
# First lets convert the Pandas Dataframe into a Numpy array. We do this since the data frame needs to be transposed,
# which is only possible if the format is an Numpy array.
##user_data_to_impute_np_array = user_data_to_impute.as_matrix()
# Lets Transpose it
##user_data_to_impute_np_array_transposed = user_data_to_impute_np_array.T
# Run the KNN method on the data. function usage X_filled_knn = KNN(k=3).complete(X_incomplete)
##user_data_imputed_knn_np_array = KNN(k=5).complete(user_data_to_impute_np_array_transposed)
# Lets use simpler method that is provided by Scikit Learn itself
# import the function
from sklearn.preprocessing import Imputer
# Create an object of class Imputer, with the relvant parameters
imputer_object = Imputer(missing_values='NaN', strategy='mean', axis=0, copy=False)
# Impute the data and save the generated Numpy array
user_data_imputed_np_array = imputer_object.fit_transform(user_data_to_impute)
# create a list of tuples, with the column name and data type for all existing columns in the Numpy array.
# exact order of columns has to be maintained
column_names_of_imputed_np_array = ['num_modules_consumed', 'num_glucose_tracked', 'num_of_days_food_tracked']
# create the Pandas data frame from the Numpy array
user_data_imputed_data_frame = pd.DataFrame(user_data_imputed_np_array, columns=column_names_of_imputed_np_array)
# Check if the data frame created now is proper
user_data_imputed_data_frame.info()
# using the Series contructor from Pandas
user_data_imputed_data_frame['last_activity'] = pd.Series(user_data_to_clean['last_activity'])
user_data_imputed_data_frame['age_on_platform'] = pd.Series(user_data_to_clean['age_on_platform'])
# Check if every thing is Ok
user_data_imputed_data_frame.info()
# fillna(0) function will fill all blank cells with '0'
user_data_imputed_data_frame['watching_videos'] = pd.Series(user_data_to_clean['watching_videos'].fillna(0))
user_data_imputed_data_frame.info()
# Since only these two columns are having null values, we can run the function *dropna()* on the whole data frame
# All rows with missing data get dropped
user_data_imputed_data_frame.dropna(axis=0, inplace=True)
user_data_imputed_data_frame.info()
# This if else section will bin the rows based on the critiria for labels mentioned in the table above
user_data_imputed_data_frame_labeled = user_data_imputed_data_frame
for index, row in user_data_imputed_data_frame.iterrows():
if row["age_on_platform"] >= np.timedelta64(30, 'D') and row["age_on_platform"] < np.timedelta64(180, 'D'):
if row['last_activity'] <= np.datetime64(2, 'D') and\
row['num_modules_consumed'] >= 12 and\
row['num_of_days_food_tracked'] >= 20 and\
row['num_glucose_tracked'] >= 16 and\
row['watching_videos'] == 1:
user_data_imputed_data_frame_labeled.set_value(index, 'label', 1)
else:
user_data_imputed_data_frame_labeled.set_value(index, 'label', 2)
elif row["age_on_platform"] >= np.timedelta64(180, 'D') and row["age_on_platform"] < np.timedelta64(360, 'D'):
if row['last_activity'] <= np.datetime64(7, 'D') and\
row['num_modules_consumed'] >= 48 and\
row['num_of_days_food_tracked'] >= 30 and\
row['num_glucose_tracked'] >= 96 and\
row['watching_videos'] == 1:
user_data_imputed_data_frame_labeled.set_value(index, 'label', 3)
else:
user_data_imputed_data_frame_labeled.set_value(index, 'label', 4)
elif row["age_on_platform"] >= np.timedelta64(360, 'D'):
if row['last_activity'] <= np.datetime64(14, 'D') and\
row['num_modules_consumed'] >= 48 and\
row['num_of_days_food_tracked'] >= 30 and\
row['num_glucose_tracked'] >= 192 and\
row['watching_videos'] == 1:
user_data_imputed_data_frame_labeled.set_value(index, 'label', 5)
else:
user_data_imputed_data_frame_labeled.set_value(index, 'label', 6)
else:
user_data_imputed_data_frame_labeled.set_value(index, 'label', 0)
user_data_imputed_data_frame_labeled['label'].unique()
# Look at basic info for this Labeled data frame
user_data_imputed_data_frame_labeled.info()
# Lets start with the column last_activity
# ts = (dt64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
# This function takes a datetime64 value and converts it into float value that represents time from epoch
def convert_datetime64_to_from_epoch(dt64):
ts = (dt64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
return ts
# Lets apply this function on last_activity column
user_data_imputed_data_frame_labeled_datetime64_converted = user_data_imputed_data_frame_labeled
user_data_imputed_data_frame_labeled_datetime64_converted['last_activity'] = user_data_imputed_data_frame_labeled['last_activity'].apply(convert_datetime64_to_from_epoch)
user_data_imputed_data_frame_labeled_datetime64_converted.info()
# Now its time to convert the timedelta64 column named age_on_platform
def convert_timedelta64_to_sec(td64):
ts = (td64 / np.timedelta64(1, 's'))
return ts
user_data_imputed_data_frame_labeled_datetime64_timedelta64_converted = user_data_imputed_data_frame_labeled_datetime64_converted
user_data_imputed_data_frame_labeled_datetime64_timedelta64_converted['age_on_platform'] = user_data_imputed_data_frame_labeled_datetime64_converted['age_on_platform'].apply(convert_timedelta64_to_sec)
user_data_imputed_data_frame_labeled_datetime64_timedelta64_converted.info()
user_data_imputed_data_frame_labeled_datetime64_timedelta64_converted.describe()
# Save the labeled data frame as excel file
from pandas import options
options.io.excel.xlsx.writer = 'xlsxwriter'
user_data_imputed_data_frame_labeled_datetime64_timedelta64_converted.to_excel('user_data_imputed_data_frame_labeled.xlsx')
# Total number of rows is 302; 30% of that is ~90
user_data_imputed_data_frame_labeled_training = user_data_imputed_data_frame_labeled_datetime64_timedelta64_converted.ix[90:]
user_data_imputed_data_frame_labeled_training.info()
# Lets first make our list of Labels column
#for index, row in user_data_imputed_data_frame.iterrows():
label_list = user_data_imputed_data_frame_labeled_training['label'].values.tolist()
# Check data type of elements of the list
type(label_list[0])
# Lets convert the data type of all elements of the list to int
label_list_training = map(int, label_list)
# Check data type of elements of the list
type(label_list_training[5])
# Now to create the other list of lists with features as elements
# before that we will have to remove the Labels column
user_data_imputed_data_frame_UNlabeled_training = user_data_imputed_data_frame_labeled_training.drop(['label'] ,1)
user_data_imputed_data_frame_UNlabeled_training.info()
# As you may notice, the data type of watching_videos is float, while it should be int
user_data_imputed_data_frame_UNlabeled_training['watching_videos'] = user_data_imputed_data_frame_UNlabeled_training['watching_videos'].apply(lambda x: int(x))
user_data_imputed_data_frame_UNlabeled_training.info()
# Finally lets create the list of list from the row contents
features_list_training = map(list, user_data_imputed_data_frame_UNlabeled_training.values)
from sklearn import tree
classifier = tree.DecisionTreeClassifier() # We create an instance of the Decision tree object
classifier = classifier.fit(features_list_training, label_list_training) # Train the classifier
# Testing data is the first 90 rows
user_data_imputed_data_frame_labeled_testing = user_data_imputed_data_frame_labeled_datetime64_timedelta64_converted.ix[:90]
# take the labels in seprate list
label_list_test = user_data_imputed_data_frame_labeled_testing['label'].values.tolist()
label_list_test = map(int, label_list_test)
# Drop the time and Label columns
user_data_imputed_data_frame_UNlabeled_testing = user_data_imputed_data_frame_labeled_testing.drop(['label'] ,1)
# Check if every thing looks ok
user_data_imputed_data_frame_UNlabeled_testing.info()
# Finally lets create the list of list from the row contents for testing
features_list_test = map(list, user_data_imputed_data_frame_UNlabeled_testing.values)
len(features_list_test)
# the prediction results for first ten values of test data set
print list(classifier.predict(features_list_test[:20]))
# The labels for test data set as labeled by code
print label_list_test[:20]
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The column name watching_videos (binary - 1 for yes, blank/0 for no) is too long and has special chars, lets change it to watching_videos
Step2: Data Clean up
Step3: As is visible from the last column (age_on_platform) data type, Pandas is not recognising it as date type format.
Step4: But on eyeballing I noticed some, cells of column first_login have greater value than corresponding cell of last_activity. These cells need to be swapped, since its not possible to have first_login > last_activity
Step5: The second column of the above table describes, the number of non-null values in the respective column.
Step6: The next 3 cells describes the steps to Impute data using KNN strategy, sadly this is not working well for our data set! One possible reason could be that the column is too sparse to find a neighbourer !
Step7: The above 3 steps are for KNN based Imputation, did not work well. As visible 804 items could not be imputed for and get replaced with zero
Step8: the user_data_imputed_np_array is a NumPy array, we need to convert it back to Pandas data frame
Step9: Now lets add back the useful colums that we had removed from data set, these are
Step10: As mentioned in column description for watching_videos a blank or no value, means '0' also know as 'Not watching'
Step11: Finally the columns last_activity, age_on_platform have missing values, as evident from above table. Since this is time data, that in my opinion should not be imputed, we will drop/delete the columns.
Step12: Labelling the Raw data
Step13: The output above for the array says only 2,4,6,0 were selected as labels. Which means there are no good users in all three new, mid, long - term categories.
Step14: One major limitation with Sci Kit Learn is with the datatypes it can deal with for features
Step15: Training and Testing the ML algorithm
Step16: Here we remove the datetime64 & timedelta64 columns too, the issue is Sci Kit learn methods can only deal with numerical and string features. I am trying to sort this issue
Step17: Its time to train the model
|
14,376 | <ASSISTANT_TASK:>
Python Code:
pa = 0.001
pbga = 0.95
pac = 1-pa
pbgac = 0.05
print "Total probability of P(B) is " + \
str(0.001*0.95 + 0.05* 0.999)
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import mpld3
mpld3.enable_notebook()
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE'))
ax.grid(color='white', linestyle='solid')
N = 50
scatter = ax.scatter(np.random.normal(size=N),
np.random.normal(size=N),
c=np.random.random(size=N),
s = 1000 * np.random.random(size=N),
alpha=0.3,
cmap=plt.cm.jet)
ax.set_title("D3 Scatter Plot", size=18);
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 你被檢出,但妳趁的事
|
14,377 | <ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
import time
from matplotlib import rcParams
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from six import iteritems
from nilmtk import DataSet, TimeFrame, MeterGroup, HDFDataStore
from nilmtk.legacy.disaggregate import CombinatorialOptimisation, FHMM
import nilmtk.utils
%matplotlib inline
rcParams['figure.figsize'] = (13, 6)
train = DataSet('/data/redd.h5')
test = DataSet('/data/redd.h5')
building = 1
train.set_window(end="2011-04-30")
test.set_window(start="2011-04-30")
train_elec = train.buildings[1].elec
test_elec = test.buildings[1].elec
train_elec.plot()
test_elec.mains().plot()
fridge_meter = train_elec['fridge']
fridge_df = next(fridge_meter.load())
fridge_df.head()
mains = train_elec.mains()
mains_df = next(mains.load())
mains_df.head()
top_5_train_elec = train_elec.submeters().select_top_k(k=5)
top_5_train_elec
def predict(clf, test_elec, sample_period, timezone):
pred = {}
gt= {}
# "ac_type" varies according to the dataset used.
# Make sure to use the correct ac_type before using the default parameters in this code.
for i, chunk in enumerate(test_elec.mains().load(physical_quantity = 'power', ac_type = 'apparent', sample_period=sample_period)):
chunk_drop_na = chunk.dropna()
pred[i] = clf.disaggregate_chunk(chunk_drop_na)
gt[i]={}
for meter in test_elec.submeters().meters:
# Only use the meters that we trained on (this saves time!)
gt[i][meter] = next(meter.load(physical_quantity = 'power', ac_type = 'active', sample_period=sample_period))
gt[i] = pd.DataFrame({k:v.squeeze() for k,v in iteritems(gt[i]) if len(v)}, index=next(iter(gt[i].values())).index).dropna()
# If everything can fit in memory
gt_overall = pd.concat(gt)
gt_overall.index = gt_overall.index.droplevel()
pred_overall = pd.concat(pred)
pred_overall.index = pred_overall.index.droplevel()
# Having the same order of columns
gt_overall = gt_overall[pred_overall.columns]
#Intersection of index
gt_index_utc = gt_overall.index.tz_convert("UTC")
pred_index_utc = pred_overall.index.tz_convert("UTC")
common_index_utc = gt_index_utc.intersection(pred_index_utc)
common_index_local = common_index_utc.tz_convert(timezone)
gt_overall = gt_overall.loc[common_index_local]
pred_overall = pred_overall.loc[common_index_local]
appliance_labels = [m for m in gt_overall.columns.values]
gt_overall.columns = appliance_labels
pred_overall.columns = appliance_labels
return gt_overall, pred_overall
classifiers = {'CO':CombinatorialOptimisation(), 'FHMM':FHMM()}
predictions = {}
sample_period = 120
for clf_name, clf in classifiers.items():
print("*"*20)
print(clf_name)
print("*" *20)
start = time.time()
# Note that we have given the sample period to downsample the data to 1 minute.
# If instead of top_5 we wanted to train on all appliance, we would write
# fhmm.train(train_elec, sample_period=60)
clf.train(top_5_train_elec, sample_period=sample_period)
end = time.time()
print("Runtime =", end-start, "seconds.")
gt, predictions[clf_name] = predict(clf, test_elec, sample_period, train.metadata['timezone'])
appliance_labels = [m.label() for m in gt.columns.values]
gt.columns = appliance_labels
predictions['CO'].columns = appliance_labels
predictions['FHMM'].columns = appliance_labels
gt.head()
predictions['CO'].head()
predictions['FHMM'].head()
predictions['CO']['Fridge'].head(300).plot(label="Pred")
gt['Fridge'].head(300).plot(label="GT")
plt.legend()
predictions['FHMM']['Fridge'].head(300).plot(label="Pred")
gt['Fridge'].head(300).plot(label="GT")
plt.legend()
? nilmtk.utils.compute_rmse
rmse = {}
for clf_name in classifiers.keys():
rmse[clf_name] = nilmtk.utils.compute_rmse(gt, predictions[clf_name])
rmse = pd.DataFrame(rmse)
rmse
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dividing data into train and test set
Step2: Let us use building 1 for demo purposes
Step3: Let's split data at April 30th
Step4: Visualizing the data
Step5: REDD data set has got appliance level data sampled every 3 or 4 seconds and mains data sampled every 1 second. Let us verify the same.
Step6: Since, both of these are sampled at different frequencies, we will downsample both to 1 minute resolution. We will also select the top-5 appliances in terms of energy consumption and use them for training our FHMM and CO models.
Step7: Training and disaggregation
Step8: Train using 2 benchmarking algorithms - Combinatorial Optimisation (CO) and Factorial Hidden Markov Model (FHMM)
Step9: Using prettier labels!
Step10: Taking a look at the ground truth of top 5 appliance power consumption
Step11: Plotting the predictions against the actual usage
Step12: Comparing NILM algorithms (CO vs FHMM)
|
14,378 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import optimize
import pymc3 as pm
import theano as thno
import theano.tensor as T
# configure some basic options
sns.set(style="darkgrid", palette="muted")
pd.set_option('display.notebook_repr_html', True)
plt.rcParams['figure.figsize'] = 12, 8
np.random.seed(0)
#### cut & pasted directly from the fetch_hogg2010test() function
## identical to the original dataset as hardcoded in the Hogg 2010 paper
dfhogg = pd.DataFrame(np.array([[1, 201, 592, 61, 9, -0.84],
[2, 244, 401, 25, 4, 0.31],
[3, 47, 583, 38, 11, 0.64],
[4, 287, 402, 15, 7, -0.27],
[5, 203, 495, 21, 5, -0.33],
[6, 58, 173, 15, 9, 0.67],
[7, 210, 479, 27, 4, -0.02],
[8, 202, 504, 14, 4, -0.05],
[9, 198, 510, 30, 11, -0.84],
[10, 158, 416, 16, 7, -0.69],
[11, 165, 393, 14, 5, 0.30],
[12, 201, 442, 25, 5, -0.46],
[13, 157, 317, 52, 5, -0.03],
[14, 131, 311, 16, 6, 0.50],
[15, 166, 400, 34, 6, 0.73],
[16, 160, 337, 31, 5, -0.52],
[17, 186, 423, 42, 9, 0.90],
[18, 125, 334, 26, 8, 0.40],
[19, 218, 533, 16, 6, -0.78],
[20, 146, 344, 22, 5, -0.56]]),
columns=['id','x','y','sigma_y','sigma_x','rho_xy'])
## for convenience zero-base the 'id' and use as index
dfhogg['id'] = dfhogg['id'] - 1
dfhogg.set_index('id', inplace=True)
## standardize (mean center and divide by 1 sd)
dfhoggs = (dfhogg[['x','y']] - dfhogg[['x','y']].mean(0)) / dfhogg[['x','y']].std(0)
dfhoggs['sigma_y'] = dfhogg['sigma_y'] / dfhogg['y'].std(0)
dfhoggs['sigma_x'] = dfhogg['sigma_x'] / dfhogg['x'].std(0)
## create xlims ylims for plotting
xlims = (dfhoggs['x'].min() - np.ptp(dfhoggs['x'])/5
,dfhoggs['x'].max() + np.ptp(dfhoggs['x'])/5)
ylims = (dfhoggs['y'].min() - np.ptp(dfhoggs['y'])/5
,dfhoggs['y'].max() + np.ptp(dfhoggs['y'])/5)
## scatterplot the standardized data
g = sns.FacetGrid(dfhoggs, size=8)
_ = g.map(plt.errorbar, 'x', 'y', 'sigma_y', 'sigma_x', marker="o", ls='')
_ = g.axes[0][0].set_ylim(ylims)
_ = g.axes[0][0].set_xlim(xlims)
plt.subplots_adjust(top=0.92)
_ = g.fig.suptitle('Scatterplot of Hogg 2010 dataset after standardization', fontsize=16)
with pm.Model() as mdl_ols:
## Define weakly informative Normal priors to give Ridge regression
b0 = pm.Normal('b0_intercept', mu=0, sd=100)
b1 = pm.Normal('b1_slope', mu=0, sd=100)
## Define linear model
yest = b0 + b1 * dfhoggs['x']
## Use y error from dataset, convert into theano variable
sigma_y = thno.shared(np.asarray(dfhoggs['sigma_y'],
dtype=thno.config.floatX), name='sigma_y')
## Define Normal likelihood
likelihood = pm.Normal('likelihood', mu=yest, sd=sigma_y, observed=dfhoggs['y'])
with mdl_ols:
## take samples
traces_ols = pm.sample(2000, tune=1000)
_ = pm.traceplot(traces_ols[-1000:], figsize=(12,len(traces_ols.varnames)*1.5),
lines={k: v['mean'] for k, v in pm.df_summary(traces_ols[-1000:]).iterrows()})
with pm.Model() as mdl_studentt:
## Define weakly informative Normal priors to give Ridge regression
b0 = pm.Normal('b0_intercept', mu=0, sd=100)
b1 = pm.Normal('b1_slope', mu=0, sd=100)
## Define linear model
yest = b0 + b1 * dfhoggs['x']
## Use y error from dataset, convert into theano variable
sigma_y = thno.shared(np.asarray(dfhoggs['sigma_y'],
dtype=thno.config.floatX), name='sigma_y')
## define prior for Student T degrees of freedom
nu = pm.Uniform('nu', lower=1, upper=100)
## Define Student T likelihood
likelihood = pm.StudentT('likelihood', mu=yest, sd=sigma_y, nu=nu,
observed=dfhoggs['y'])
with mdl_studentt:
## take samples
traces_studentt = pm.sample(2000, tune=1000)
_ = pm.traceplot(traces_studentt[-1000:],
figsize=(12,len(traces_studentt.varnames)*1.5),
lines={k: v['mean'] for k, v in pm.df_summary(traces_studentt[-1000:]).iterrows()})
def logp_signoise(yobs, is_outlier, yest_in, sigma_y_in, yest_out, sigma_y_out):
'''
Define custom loglikelihood for inliers vs outliers.
NOTE: in this particular case we don't need to use theano's @as_op
decorator because (as stated by Twiecki in conversation) that's only
required if the likelihood cannot be expressed as a theano expression.
We also now get the gradient computation for free.
'''
# likelihood for inliers
pdfs_in = T.exp(-(yobs - yest_in + 1e-4)**2 / (2 * sigma_y_in**2))
pdfs_in /= T.sqrt(2 * np.pi * sigma_y_in**2)
logL_in = T.sum(T.log(pdfs_in) * (1 - is_outlier))
# likelihood for outliers
pdfs_out = T.exp(-(yobs - yest_out + 1e-4)**2 / (2 * (sigma_y_in**2 + sigma_y_out**2)))
pdfs_out /= T.sqrt(2 * np.pi * (sigma_y_in**2 + sigma_y_out**2))
logL_out = T.sum(T.log(pdfs_out) * is_outlier)
return logL_in + logL_out
with pm.Model() as mdl_signoise:
## Define weakly informative Normal priors to give Ridge regression
b0 = pm.Normal('b0_intercept', mu=0, sd=10, testval=pm.floatX(0.1))
b1 = pm.Normal('b1_slope', mu=0, sd=10, testval=pm.floatX(1.))
## Define linear model
yest_in = b0 + b1 * dfhoggs['x']
## Define weakly informative priors for the mean and variance of outliers
yest_out = pm.Normal('yest_out', mu=0, sd=100, testval=pm.floatX(1.))
sigma_y_out = pm.HalfNormal('sigma_y_out', sd=100, testval=pm.floatX(1.))
## Define Bernoulli inlier / outlier flags according to a hyperprior
## fraction of outliers, itself constrained to [0,.5] for symmetry
frac_outliers = pm.Uniform('frac_outliers', lower=0., upper=.5)
is_outlier = pm.Bernoulli('is_outlier', p=frac_outliers, shape=dfhoggs.shape[0],
testval=np.random.rand(dfhoggs.shape[0]) < 0.2)
## Extract observed y and sigma_y from dataset, encode as theano objects
yobs = thno.shared(np.asarray(dfhoggs['y'], dtype=thno.config.floatX), name='yobs')
sigma_y_in = thno.shared(np.asarray(dfhoggs['sigma_y'], dtype=thno.config.floatX),
name='sigma_y_in')
## Use custom likelihood using DensityDist
likelihood = pm.DensityDist('likelihood', logp_signoise,
observed={'yobs': yobs, 'is_outlier': is_outlier,
'yest_in': yest_in, 'sigma_y_in': sigma_y_in,
'yest_out': yest_out, 'sigma_y_out': sigma_y_out})
with mdl_signoise:
## two-step sampling to create Bernoulli inlier/outlier flags
step1 = pm.Metropolis([frac_outliers, yest_out, sigma_y_out, b0, b1])
step2 = pm.step_methods.BinaryGibbsMetropolis([is_outlier])
## take samples
traces_signoise = pm.sample(20000, step=[step1, step2], tune=10000, progressbar=True)
traces_signoise[-10000:]['b0_intercept']
_ = pm.traceplot(traces_signoise[-10000:], figsize=(12,len(traces_signoise.varnames)*1.5),
lines={k: v['mean'] for k, v in pm.df_summary(traces_signoise[-1000:]).iterrows()})
outlier_melt = pd.melt(pd.DataFrame(traces_signoise['is_outlier', -1000:],
columns=['[{}]'.format(int(d)) for d in dfhoggs.index]),
var_name='datapoint_id', value_name='is_outlier')
ax0 = sns.pointplot(y='datapoint_id', x='is_outlier', data=outlier_melt,
kind='point', join=False, ci=None, size=4, aspect=2)
_ = ax0.vlines([0,1], 0, 19, ['b','r'], '--')
_ = ax0.set_xlim((-0.1,1.1))
_ = ax0.set_xticks(np.arange(0, 1.1, 0.1))
_ = ax0.set_xticklabels(['{:.0%}'.format(t) for t in np.arange(0,1.1,0.1)])
_ = ax0.yaxis.grid(True, linestyle='-', which='major', color='w', alpha=0.4)
_ = ax0.set_title('Prop. of the trace where datapoint is an outlier')
_ = ax0.set_xlabel('Prop. of the trace where is_outlier == 1')
cutoff = 5
dfhoggs['outlier'] = np.percentile(traces_signoise[-1000:]['is_outlier'],cutoff, axis=0)
dfhoggs['outlier'].value_counts()
g = sns.FacetGrid(dfhoggs, size=8, hue='outlier', hue_order=[True,False],
palette='Set1', legend_out=False)
lm = lambda x, samp: samp['b0_intercept'] + samp['b1_slope'] * x
pm.plot_posterior_predictive_glm(traces_ols[-1000:],
eval=np.linspace(-3, 3, 10), lm=lm, samples=200, color='#22CC00', alpha=.2)
pm.plot_posterior_predictive_glm(traces_studentt[-1000:], lm=lm,
eval=np.linspace(-3, 3, 10), samples=200, color='#FFA500', alpha=.5)
pm.plot_posterior_predictive_glm(traces_signoise[-1000:], lm=lm,
eval=np.linspace(-3, 3, 10), samples=200, color='#357EC7', alpha=.3)
_ = g.map(plt.errorbar, 'x', 'y', 'sigma_y', 'sigma_x', marker="o", ls='').add_legend()
_ = g.axes[0][0].annotate('OLS Fit: Green\nStudent-T Fit: Orange\nSignal Vs Noise Fit: Blue',
size='x-large', xy=(1,0), xycoords='axes fraction',
xytext=(-160,10), textcoords='offset points')
_ = g.axes[0][0].set_ylim(ylims)
_ = g.axes[0][0].set_xlim(xlims)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and Prepare Data
Step2: Observe
Step3: Sample
Step4: View Traces
Step5: NOTE
Step6: Sample
Step7: View Traces
Step8: Observe
Step9: Sample
Step10: View Traces
Step11: NOTE
Step12: Observe
Step13: Posterior Prediction Plots for OLS vs StudentT vs SignalNoise
|
14,379 | <ASSISTANT_TASK:>
Python Code:
import io, os, sys, types
from IPython.nbformat import current
from IPython.core.interactiveshell import InteractiveShell
def find_notebook(fullname, path=None):
find a notebook, given its fully qualified name and an optional path
This turns "foo.bar" into "foo/bar.ipynb"
and tries turning "Foo_Bar" into "Foo Bar" if Foo_Bar
does not exist.
name = fullname.rsplit('.', 1)[-1]
if not path:
path = ['']
for d in path:
nb_path = os.path.join(d, name + ".ipynb")
if os.path.isfile(nb_path):
return nb_path
# let import Notebook_Name find "Notebook Name.ipynb"
nb_path = nb_path.replace("_", " ")
if os.path.isfile(nb_path):
return nb_path
class NotebookLoader(object):
Module Loader for IPython Notebooks
def __init__(self, path=None):
self.shell = InteractiveShell.instance()
self.path = path
def load_module(self, fullname):
import a notebook as a module
path = find_notebook(fullname, self.path)
print ("importing IPython notebook from %s" % path)
# load the notebook object
with io.open(path, 'r', encoding='utf-8') as f:
nb = current.read(f, 'json')
# create the module and add it to sys.modules
# if name in sys.modules:
# return sys.modules[name]
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__loader__ = self
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = self.shell.user_ns
self.shell.user_ns = mod.__dict__
try:
for cell in nb.worksheets[0].cells:
if cell.cell_type == 'code' and cell.language == 'python':
# transform the input to executable Python
code = self.shell.input_transformer_manager.transform_cell(cell.input)
# run the code in themodule
exec(code, mod.__dict__)
finally:
self.shell.user_ns = save_user_ns
return mod
class NotebookFinder(object):
Module finder that locates IPython Notebooks
def __init__(self):
self.loaders = {}
def find_module(self, fullname, path=None):
nb_path = find_notebook(fullname, path)
if not nb_path:
return
key = path
if path:
# lists aren't hashable
key = os.path.sep.join(path)
if key not in self.loaders:
self.loaders[key] = NotebookLoader(path)
return self.loaders[key]
sys.meta_path.append(NotebookFinder())
ls nbpackage
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from IPython.display import display, HTML
formatter = HtmlFormatter()
lexer = PythonLexer()
# publish the CSS for pygments highlighting
display(HTML(
<style type='text/css'>
%s
</style>
% formatter.get_style_defs()
))
def show_notebook(fname):
display a short summary of the cells of a notebook
with io.open(fname, 'r', encoding='utf-8') as f:
nb = current.read(f, 'json')
html = []
for cell in nb.worksheets[0].cells:
html.append("<h4>%s cell</h4>" % cell.cell_type)
if cell.cell_type == 'code':
html.append(highlight(cell.input, lexer, formatter))
else:
html.append("<pre>%s</pre>" % cell.source)
display(HTML('\n'.join(html)))
show_notebook(os.path.join("nbpackage", "mynotebook.ipynb"))
from nbpackage import mynotebook
mynotebook.foo()
mynotebook.has_ip_syntax()
ls nbpackage/nbs
show_notebook(os.path.join("nbpackage", "nbs", "other.ipynb"))
from nbpackage.nbs import other
other.bar(5)
import shutil
from IPython.utils.path import get_ipython_package_dir
utils = os.path.join(get_ipython_package_dir(), 'utils')
shutil.copy(os.path.join("nbpackage", "mynotebook.ipynb"),
os.path.join(utils, "inside_ipython.ipynb")
)
from IPython.utils import inside_ipython
inside_ipython.whatsmyname()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Import hooks typically take the form of two objects
Step5: Notebook Loader
Step7: The Module Finder
Step8: Register the hook
Step9: After this point, my notebooks should be importable.
Step12: So I should be able to import nbimp.mynotebook.
Step13: So my notebook has a heading cell and some code cells,
Step14: Hooray, it imported! Does it work?
Step15: Hooray again!
Step16: Notebooks in packages
Step17: Note that the __init__.py is necessary for nb to be considered a package,
Step18: So now we have importable notebooks, from both the local directory and inside packages.
Step19: and import the notebook from IPython.utils
|
14,380 | <ASSISTANT_TASK:>
Python Code:
PATH=Config().data_path()/Path('rossmann/')
table_names = ['train', 'store', 'store_states', 'state_names', 'googletrend', 'weather', 'test']
tables = [pd.read_csv(PATH/f'{fname}.csv', low_memory=False) for fname in table_names]
train, store, store_states, state_names, googletrend, weather, test = tables
len(train),len(test)
train.StateHoliday = train.StateHoliday!='0'
test.StateHoliday = test.StateHoliday!='0'
def join_df(left, right, left_on, right_on=None, suffix='_y'):
if right_on is None: right_on = left_on
return left.merge(right, how='left', left_on=left_on, right_on=right_on,
suffixes=("", suffix))
weather = join_df(weather, state_names, "file", "StateName")
googletrend['Date'] = googletrend.week.str.split(' - ', expand=True)[0]
googletrend['State'] = googletrend.file.str.split('_', expand=True)[2]
googletrend.loc[googletrend.State=='NI', "State"] = 'HB,NI'
def add_datepart(df, fldname, drop=True, time=False):
"Helper function that adds columns relevant to a date."
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True)
targ_pre = re.sub('[Dd]ate$', '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9
if drop: df.drop(fldname, axis=1, inplace=True)
add_datepart(weather, "Date", drop=False)
add_datepart(googletrend, "Date", drop=False)
add_datepart(train, "Date", drop=False)
add_datepart(test, "Date", drop=False)
trend_de = googletrend[googletrend.file == 'Rossmann_DE']
store = join_df(store, store_states, "Store")
len(store[store.State.isnull()])
joined = join_df(train, store, "Store")
joined_test = join_df(test, store, "Store")
len(joined[joined.StoreType.isnull()]),len(joined_test[joined_test.StoreType.isnull()])
joined = join_df(joined, googletrend, ["State","Year", "Week"])
joined_test = join_df(joined_test, googletrend, ["State","Year", "Week"])
len(joined[joined.trend.isnull()]),len(joined_test[joined_test.trend.isnull()])
joined = joined.merge(trend_de, 'left', ["Year", "Week"], suffixes=('', '_DE'))
joined_test = joined_test.merge(trend_de, 'left', ["Year", "Week"], suffixes=('', '_DE'))
len(joined[joined.trend_DE.isnull()]),len(joined_test[joined_test.trend_DE.isnull()])
joined = join_df(joined, weather, ["State","Date"])
joined_test = join_df(joined_test, weather, ["State","Date"])
len(joined[joined.Mean_TemperatureC.isnull()]),len(joined_test[joined_test.Mean_TemperatureC.isnull()])
for df in (joined, joined_test):
for c in df.columns:
if c.endswith('_y'):
if c in df.columns: df.drop(c, inplace=True, axis=1)
for df in (joined,joined_test):
df['CompetitionOpenSinceYear'] = df.CompetitionOpenSinceYear.fillna(1900).astype(np.int32)
df['CompetitionOpenSinceMonth'] = df.CompetitionOpenSinceMonth.fillna(1).astype(np.int32)
df['Promo2SinceYear'] = df.Promo2SinceYear.fillna(1900).astype(np.int32)
df['Promo2SinceWeek'] = df.Promo2SinceWeek.fillna(1).astype(np.int32)
for df in (joined,joined_test):
df["CompetitionOpenSince"] = pd.to_datetime(dict(year=df.CompetitionOpenSinceYear,
month=df.CompetitionOpenSinceMonth, day=15))
df["CompetitionDaysOpen"] = df.Date.subtract(df.CompetitionOpenSince).dt.days
for df in (joined,joined_test):
df.loc[df.CompetitionDaysOpen<0, "CompetitionDaysOpen"] = 0
df.loc[df.CompetitionOpenSinceYear<1990, "CompetitionDaysOpen"] = 0
for df in (joined,joined_test):
df["CompetitionMonthsOpen"] = df["CompetitionDaysOpen"]//30
df.loc[df.CompetitionMonthsOpen>24, "CompetitionMonthsOpen"] = 24
joined.CompetitionMonthsOpen.unique()
# If needed, uncomment:
# ! pip install isoweek
from isoweek import Week
for df in (joined,joined_test):
df["Promo2Since"] = pd.to_datetime(df.apply(lambda x: Week(
x.Promo2SinceYear, x.Promo2SinceWeek).monday(), axis=1))
df["Promo2Days"] = df.Date.subtract(df["Promo2Since"]).dt.days
for df in (joined,joined_test):
df.loc[df.Promo2Days<0, "Promo2Days"] = 0
df.loc[df.Promo2SinceYear<1990, "Promo2Days"] = 0
df["Promo2Weeks"] = df["Promo2Days"]//7
df.loc[df.Promo2Weeks<0, "Promo2Weeks"] = 0
df.loc[df.Promo2Weeks>25, "Promo2Weeks"] = 25
df.Promo2Weeks.unique()
joined.to_pickle(PATH/'joined')
joined_test.to_pickle(PATH/'joined_test')
def get_elapsed(fld, pre):
day1 = np.timedelta64(1, 'D')
last_date = np.datetime64()
last_store = 0
res = []
for s,v,d in zip(df.Store.values,df[fld].values, df.Date.values):
if s != last_store:
last_date = np.datetime64()
last_store = s
if v: last_date = d
res.append(((d-last_date).astype('timedelta64[D]') / day1))
df[pre+fld] = res
columns = ["Date", "Store", "Promo", "StateHoliday", "SchoolHoliday"]
#df = train[columns]
df = train[columns].append(test[columns])
fld = 'SchoolHoliday'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
fld = 'StateHoliday'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
fld = 'Promo'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
df = df.set_index("Date")
columns = ['SchoolHoliday', 'StateHoliday', 'Promo']
for o in ['Before', 'After']:
for p in columns:
a = o+p
df[a] = df[a].fillna(0).astype(int)
bwd = df[['Store']+columns].sort_index().groupby("Store").rolling(7, min_periods=1).sum()
fwd = df[['Store']+columns].sort_index(ascending=False
).groupby("Store").rolling(7, min_periods=1).sum()
bwd.drop('Store',1,inplace=True)
bwd.reset_index(inplace=True)
fwd.drop('Store',1,inplace=True)
fwd.reset_index(inplace=True)
df.reset_index(inplace=True)
df = df.merge(bwd, 'left', ['Date', 'Store'], suffixes=['', '_bw'])
df = df.merge(fwd, 'left', ['Date', 'Store'], suffixes=['', '_fw'])
df.drop(columns,1,inplace=True)
df.head()
df.to_pickle(PATH/'df')
df["Date"] = pd.to_datetime(df.Date)
df.columns
joined = pd.read_pickle(PATH/'joined')
joined_test = pd.read_pickle(PATH/f'joined_test')
joined = join_df(joined, df, ['Store', 'Date'])
joined_test = join_df(joined_test, df, ['Store', 'Date'])
joined = joined[joined.Sales!=0]
joined.reset_index(inplace=True)
joined_test.reset_index(inplace=True)
joined.to_pickle(PATH/'train_clean')
joined_test.to_pickle(PATH/'test_clean')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We turn state Holidays to booleans, to make them more convenient for modeling. We can do calculations on pandas fields using notation very similar (often identical) to numpy.
Step2: join_df is a function for joining tables on specific fields. By default, we'll be doing a left outer join of right on the left argument using the given fields for each table.
Step3: Join weather/state names.
Step4: In pandas you can add new columns to a dataframe by simply defining it. We'll do this for googletrends by extracting dates and state names from the given data and adding those columns.
Step5: The following extracts particular date fields from a complete datetime for the purpose of constructing categoricals.
Step6: The Google trends data has a special category for the whole of the Germany - we'll pull that out so we can use it explicitly.
Step7: Now we can outer join all of our data into a single dataframe. Recall that in outer joins everytime a value in the joining field on the left table does not have a corresponding value on the right table, the corresponding row in the new table has Null values for all right table fields. One way to check that all records are consistent and complete is to check for Null values post-join, as we do here.
Step8: Next we'll fill in missing values to avoid complications with NA's. NA (not available) is how Pandas indicates missing values; many models have problems when missing values are present, so it's always important to think about how to deal with them. In these cases, we are picking an arbitrary signal value that doesn't otherwise appear in the data.
Step9: Next we'll extract features "CompetitionOpenSince" and "CompetitionDaysOpen". Note the use of apply() in mapping a function across dataframe values.
Step10: We'll replace some erroneous / outlying data.
Step11: We add "CompetitionMonthsOpen" field, limiting the maximum to 2 years to limit number of unique categories.
Step12: Same process for Promo dates. You may need to install the isoweek package first.
Step13: Durations
Step14: We'll be applying this to a subset of columns
Step15: Let's walk through an example.
Step16: We'll do this for two more fields.
Step17: We're going to set the active index to Date.
Step18: Then set null values from elapsed field calculations to 0.
Step19: Next we'll demonstrate window functions in pandas to calculate rolling quantities.
Step20: Next we want to drop the Store indices grouped together in the window function.
Step21: Now we'll merge these values onto the df.
Step22: It's usually a good idea to back up large tables of extracted / wrangled features before you join them onto another one, that way you can go back to it easily if you need to make changes to it.
Step23: The authors also removed all instances where the store had zero sale / was closed. We speculate that this may have cost them a higher standing in the competition. One reason this may be the case is that a little exploratory data analysis reveals that there are often periods where stores are closed, typically for refurbishment. Before and after these periods, there are naturally spikes in sales that one might expect. By ommitting this data from their training, the authors gave up the ability to leverage information about these periods to predict this otherwise volatile behavior.
Step24: We'll back this up as well.
|
14,381 | <ASSISTANT_TASK:>
Python Code:
import numpy
import pandas
from matplotlib import pyplot
import pycwt
%matplotlib inline
url = '../dat/GSPC.csv.gz'
dat = pandas.read_csv(url, index_col=0, parse_dates=[0])
dat.describe()
fig = pyplot.figure(figsize=[8, 4])
ax = dat['Close'].plot()
ax.set_ylabel('Close')
ax.grid('on')
fig.tight_layout()
dat['LnClose'] = dat['Close'].apply(lambda x: numpy.log(x))
fig = pyplot.figure(figsize=[8, 4])
ax = dat['LnClose'].plot()
ax.set_ylabel('ln(Close)')
ax.grid('on')
fig.tight_layout()
p = numpy.polyfit(dat.index.values.astype(float), dat['LnClose'].values, 1)
dat['LnCloseTrend'] = numpy.polyval(p, dat.index.values.astype(float))
fig = pyplot.figure(figsize=[8, 4])
ax = dat['LnClose'].plot()
_ = dat['LnCloseTrend'].plot(ax=ax)
ax.set_ylabel('ln(Close)')
ax.grid('on')
fig.tight_layout()
dat['LnCloseDetrend'] = dat['LnClose'] - dat['LnCloseTrend']
fig = pyplot.figure(figsize=[8, 4])
ax = dat['LnCloseDetrend'].plot()
ax.axhline(dat['LnCloseDetrend'].std(), color='#333333', ls='--', zorder=1)
ax.axhline(-dat['LnCloseDetrend'].std(), color='#333333', ls='--', zorder=1)
ax.set_ylabel('ln(Close) - Trend')
ax.grid('on')
fig.tight_layout()
dat['LnCloseDetrendSmooth'] = dat['LnCloseDetrend'].rolling(33, center=True, win_type='blackman').mean().fillna(0)
fig = pyplot.figure(figsize=[8, 4])
ax = dat['LnCloseDetrendSmooth'].plot()
ax.axhline(dat['LnCloseDetrend'].std(), color='#333333', ls='--', zorder=1)
ax.axhline(-dat['LnCloseDetrend'].std(), color='#333333', ls='--', zorder=1)
ax.set_ylabel('ln(Close) - Trend')
ax.grid('on')
fig.tight_layout()
t = dat.index.values
y = dat['LnCloseDetrendSmooth'].values
std = y.std() # Desvio padrão
var = std ** 2 # Variância
y_norm = y / std
N = len(y_norm)
# A ondaleta-mãe
mother = pycwt.Morlet(6)
# Intervalo de amostragem em anos (não consideramos finais de semana, tampouco
# feriados)
dt = 1 / 365
# Escala inicial
s0 = 15 * dt
# Duas sub-oitavas por oitava
dj = 1 / 4
# Onze potências de dois, com `dj` sub-oitavas
J = 8 / dj
# Coeficiente de autocorrelação de lag-1 para estimar ruído
alpha, _, _ = pycwt.ar1(y_norm)
print('O coeficiente the correlação de lag-1 é {:.2f}'.format(alpha))
wave, scales, freqs, coi, fft, fftfreqs = pycwt.cwt(y_norm, dt, dj, s0, J,
mother)
power = (numpy.abs(wave)) ** 2
fft_power = numpy.abs(fft) ** 2
period = 1 / freqs
# Retificação do espectro de potência segundo Liu et al. (2007)
power /= scales[:, None]
signif, fft_theor = pycwt.significance(1.0, dt, scales, 0, alpha,
significance_level=0.95, wavelet=mother)
sig95 = numpy.ones([1, N]) * signif[:, None]
sig95 = power / sig95
glbl_power = power.mean(axis=1)
dof = N - scales # Correção nas bordas
glbl_signif, tmp = pycwt.significance(var, dt, scales, 1, alpha,
significance_level=0.95, dof=dof,
wavelet=mother)
fig = pyplot.figure(figsize=[8, 4])
ax = pyplot.subplot2grid((1, 4), (0, 0), colspan=3)
bx = pyplot.subplot2grid((1, 4), (0, 3), colspan=1, sharey=ax)
# Espectro de potência de ondaletas
# ---------------------------------
levels = [2. ** i for i in numpy.arange(-4, 5)]
ax.contourf(t, numpy.log2(period), numpy.log2(power), numpy.log2(levels),
extend='both')
extent = [t.min(), t.max(), 0, max(period)]
ax.contour(t, numpy.log2(period), sig95, [-99, 1], colors='k', linewidths=2,
extent=extent)
DT = numpy.timedelta64(1, 'D')
ax.fill(numpy.concatenate([t, t[-1:] + DT, t[-1:] + DT,
t[:1] - DT, t[:1] - DT]),
numpy.concatenate([numpy.log2(coi), [1e-9], numpy.log2(period[-1:]),
numpy.log2(period[-1:]), [1e-9]]),
'k', alpha=0.3, hatch='x')
ax.set_title('Espectro de potência ({})'.format(mother.name))
ax.set_ylabel('Período (anos)')
#
ylim = [numpy.ceil(numpy.log2(period.min())),
numpy.floor(numpy.log2(period.max()))]
ax.set_ylim(ylim)
Yticks = 2 ** numpy.arange(*ylim)
ax.set_yticks(numpy.log2(Yticks))
ax.set_yticklabels(Yticks)
# Espectro de ondaletas global e espectro de Fourier
# --------------------------------------------------
bx.semilogx(fft_power, numpy.log2(1./fftfreqs), '-', color='#cccccc',
linewidth=1.)
bx.semilogx(glbl_power, numpy.log2(period), 'k-', linewidth=1.5)
bx.semilogx(glbl_signif, numpy.log2(period), 'k--')
bx.semilogx(fft_theor, numpy.log2(period), '--', color='#cccccc')
bx.set_title('Espectro de\nondaletas global')
bx.set_xlabel(r'Potência normalizada')
#bx.set_xlim([0, 0.1])
bx.set_ylim(numpy.log2([period.min(), period.max()]))
bx.set_yticks(numpy.log2(Yticks))
bx.set_yticklabels(Yticks)
pyplot.setp(bx.get_yticklabels(), visible=False)
# Perfumarias
ax.grid('on')
bx.grid('on')
pyplot.tight_layout()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Série temporal S&P 500
Step2: Nota-se que o índice possui um aumento com tendência exponencial e dois instantes de queda abrupta -- durante as crises de 2002 e 2008. A série temporal tende a aumentar. Isto dificulta a análise espectral, pois assume-se que os dados são estacionários. Vamos fazer uma transformação logarítmica da série original.
Step3: A série transformada apresenta uma certa tendência de aumento linear. Vamos determinar os parâmetros que melhor ajustam uma reta aos dados.
Step4: No próximo passo vamos remover a tendência da série temporal transformada.
Step5: Esta série temporal possui ruídos causados pelas flutuações diárias. Estas flutuações de alta frequência podem ser suavizados aplicando-se um filtro de janela móvel. Neste caso, aplicaremos a janela móvel do tipo Blackman
Step6: A transformada de ondaletas
Step7: Em seguida calculamos a transformada de ondaletas, os espectros de potência normalizados de ondaleta e de Fourier, bem como os períodos de Fourier equivalentes para cada escala.
Step8: Agora os testes de significância.
Step9: E o espectro de ondaleta global.
Step10: Finalmente estamos prontos para exibir nossos resultados.
|
14,382 | <ASSISTANT_TASK:>
Python Code:
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
def front_x(words):
# +++your code here+++
return
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
def sort_last(tuples):
# +++your code here+++
return
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
def linear_merge(list1, list2):
# +++your code here+++
return
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fill in the code for the functions below. main() is already set up
Step2: B. sort_last
Step3: C. linear_merge
|
14,383 | <ASSISTANT_TASK:>
Python Code:
# modules
from keras.layers import Input, Dense, Dropout
from keras.models import Model
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.optimizers import RMSprop
from keras.callbacks import TensorBoard
from __future__ import print_function
from keras.utils import plot_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from sklearn import preprocessing
from keras import layers
from keras import initializers
from matplotlib import axes
from matplotlib import rc
import keras
import matplotlib.pyplot as plt
import numpy as np
import math
import pydot
import graphviz
import pandas as pd
import IPython
%matplotlib inline
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 20}
rc('font', **font)
# import
data_raw = pd.read_csv('data/sensor_readings_24.csv', sep=",", header=None)
data = data_raw.copy()
data.head()
df_tab = data_raw
df_tab[24] = df_tab[24].astype('category')
tab = pd.crosstab(index=df_tab[24], columns="frequency")
tab.index.name = 'Class/Direction'
tab/tab.sum()
mapping = {key: value for (key, value) in zip(data[24].unique(), range(len(data[24].unique())))}
print(mapping)
data.replace({24:mapping}, inplace=True)
data[24].unique()
data_train = data.sample(frac=0.9, random_state=42)
data_val = data.drop(data_train.index)
df_x_train = data_train.iloc[:,:-1]
df_y_train = data_train.iloc[:,-1]
df_x_val = data_val.iloc[:,:-1]
df_y_val = data_val.iloc[:,-1]
x_train = df_x_train.values
x_train = (x_train - x_train.min()) / (x_train.max() - x_train.min())
y_train = df_y_train.values
x_val = df_x_val.values
x_val = (x_val - x_val.min()) / (x_val.max() - x_val.min())
y_val = df_y_val.values
y_eval = y_val
y_train = keras.utils.to_categorical(y_train, 4)
y_val = keras.utils.to_categorical(y_val, 4)
epochsize = 150
batchsize = 24
shuffle = False
dropout = 0.1
num_classes = 4
input_dim = x_train.shape[1]
hidden1_dim = 30
hidden2_dim = 30
class_names = mapping.keys()
input_data = Input(shape=(input_dim,), dtype='float32', name='main_input')
hidden_layer1 = Dense(hidden1_dim, activation='relu', input_shape=(input_dim,), kernel_initializer='normal')(input_data)
dropout1 = Dropout(dropout)(hidden_layer1)
hidden_layer2 = Dense(hidden2_dim, activation='relu', input_shape=(input_dim,), kernel_initializer='normal')(dropout1)
dropout2 = Dropout(dropout)(hidden_layer2)
output_layer = Dense(num_classes, activation='softmax', kernel_initializer='normal')(dropout2)
model = Model(inputs=input_data, outputs=output_layer)
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
plot_model(model, to_file='images/robo1_nn.png', show_shapes=True, show_layer_names=True)
IPython.display.Image("images/robo1_nn.png")
model.fit(x_train, y_train,
batch_size=batchsize,
epochs=epochsize,
verbose=0,
shuffle=shuffle,
validation_split=0.05)
nn_score = model.evaluate(x_val, y_val)[1]
print(nn_score)
fig = plt.figure(figsize=(20,10))
plt.plot(model.history.history['val_acc'])
plt.plot(model.history.history['acc'])
plt.axhline(y=nn_score, c="red")
plt.text(0, nn_score, "test: " + str(round(nn_score, 4)), fontdict=font)
plt.title('model accuracy for neural net with 2 hidden layers')
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.legend(['train', 'valid'], loc='lower right')
plt.show()
import itertools
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_eval, model.predict(x_val).argmax(axis=-1))
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure(figsize=(20,10))
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure(figsize=(20,10))
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
IPython.display.Image("images/2018-01-25 18_44_01-PubMed Central, Table 2_ Sensors (Basel). 2017 Mar; 17(3)_ 549. Published online.png")
encoder_dim = 8
hidden1_dim = 30
hidden2_dim = 30
main_input = Input(shape=(input_dim,), dtype='float32', name='main_input')
encoding_layer = Dense(encoder_dim, activation='relu', kernel_initializer='normal')
encoding_layer_output = encoding_layer(main_input)
decoding_layer_output = Dense(input_dim
,activation='sigmoid'
,name='decoder_output'
,kernel_initializer='normal')(encoding_layer_output)
x = Dense(hidden1_dim, activation='relu', kernel_initializer='normal')(encoding_layer_output)
x = Dropout(dropout)(x)
x = Dense(hidden2_dim, activation='relu', kernel_initializer='normal')(x)
x = Dropout(dropout)(x)
classifier_output = Dense(num_classes
,activation='softmax'
,name='main_output'
,kernel_initializer='normal')(x)
auto_classifier = Model(inputs=main_input, outputs=[classifier_output, decoding_layer_output])
auto_classifier.compile(optimizer=RMSprop(),
loss={'main_output': 'categorical_crossentropy', 'decoder_output': 'mean_squared_error'},
loss_weights={'main_output': 1., 'decoder_output': 1.},
metrics=['accuracy'])
plot_model(auto_classifier, to_file='images/robo4_auto_class_LR.png', show_shapes=True, show_layer_names=True)
IPython.display.Image("images/robo4_auto_class_LR.png")
auto_classifier.fit({'main_input': x_train},
{'main_output': y_train, 'decoder_output': x_train},
epochs=epochsize,
batch_size=batchsize,
shuffle=shuffle,
validation_split=0.05,
verbose=0)
score = auto_classifier.evaluate(x=x_val, y=[y_val, x_val], verbose=1)[3]
print(score)
fig = plt.figure(figsize=(20,10))
plt.plot(auto_classifier.history.history['val_main_output_acc'])
plt.plot(auto_classifier.history.history['main_output_acc'])
plt.axhline(y=score, c="red")
plt.text(0, score, "test: " + str(round(score, 4)), fontdict=font)
plt.title('model accuracy for ' + str(round(input_dim/encoder_dim, 2)) + ' x compression with single layer autoencoder')
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.legend(['train', 'valid'], loc='lower right')
plt.show()
encoder_dim1 = 16
encoder_dim2 = 8
decoder_dim1 = 16
main_input = Input(shape=(input_dim,), dtype='float32', name='main_input')
encoding_layer1 = Dense(encoder_dim1, activation='relu', kernel_initializer='normal')(main_input)
encoding_layer2 = Dense(encoder_dim2, activation='relu', kernel_initializer='normal')(encoding_layer1)
decoding_layer1 = Dense(decoder_dim1
,activation='relu'
,kernel_initializer='normal')(encoding_layer2)
decoding_layer2 = Dense(input_dim
,activation='sigmoid'
,name='decoder_output'
,kernel_initializer='normal')(decoding_layer1)
x = Dense(hidden1_dim, activation='relu', kernel_initializer='normal')(encoding_layer2)
x = Dropout(dropout)(x)
x = Dense(hidden2_dim, activation='relu', kernel_initializer='normal')(x)
x = Dropout(dropout)(x)
classifier_output = Dense(num_classes
,activation='softmax'
,name='main_output'
,kernel_initializer='normal')(x)
stacked_auto_classifier = Model(inputs=main_input, outputs=[classifier_output, decoding_layer2])
stacked_auto_classifier.compile(optimizer=RMSprop(),
loss={'main_output': 'categorical_crossentropy', 'decoder_output': 'mean_squared_error'},
loss_weights={'main_output': 1., 'decoder_output': 1.},
metrics=['accuracy'])
plot_model(stacked_auto_classifier, to_file='images/stacked__auto_class.png', show_shapes=True, show_layer_names=True)
IPython.display.Image("images/stacked__auto_class.png")
stacked_auto_classifier.fit({'main_input': x_train},
{'main_output': y_train, 'decoder_output': x_train},
epochs=epochsize,
batch_size=batchsize,
shuffle=shuffle,
validation_split=0.05,
verbose=0)
stacked_score = stacked_auto_classifier.evaluate(x=x_val, y=[y_val, x_val], verbose=1)[3]
print(stacked_score)
fig = plt.figure(figsize=(20,10))
plt.plot(stacked_auto_classifier.history.history['val_main_output_acc'])
plt.plot(stacked_auto_classifier.history.history['main_output_acc'])
plt.axhline(y=stacked_score, c="red")
plt.text(0, stacked_score, "test: " + str(round(stacked_score, 4)), fontdict=font)
plt.title('model accuracy for ' + str(round(input_dim/encoder_dim, 2)) + ' x compression with stacked autoencoder')
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.legend(['train', 'valid'], loc='lower right')
plt.show()
# the initial coding dimension s.t. there is no feature selection at the beginning
encoding_dim = input_dim
result3 = {'encoding_dim': []
,'auto_classifier_acc': []}
while encoding_dim > 0:
main_input = Input(shape=(input_dim,), dtype='float32', name='main_input')
encoding_layer = Dense(encoding_dim, activation='relu', name='encoder', kernel_initializer='normal')
encoding_layer_output = encoding_layer(main_input)
decoding_layer_output = Dense(input_dim, activation='sigmoid'
,name='decoder_output'
,kernel_initializer='normal')(encoding_layer_output)
x = Dense(hidden1_dim, activation='relu', kernel_initializer='normal')(encoding_layer_output)
x = Dropout(dropout)(x)
x = Dense(hidden2_dim, activation='relu', kernel_initializer='normal')(x)
x = Dropout(dropout)(x)
classifier_output = Dense(num_classes, activation='softmax', name='main_output', kernel_initializer='normal')(x)
auto_classifier = Model(inputs=main_input, outputs=[classifier_output, decoding_layer_output])
auto_classifier.compile(optimizer=RMSprop(),
loss={'main_output': 'categorical_crossentropy', 'decoder_output': 'mean_squared_error'},
loss_weights={'main_output': 1., 'decoder_output': 1.},
metrics=['accuracy'])
auto_classifier.fit({'main_input': x_train},
{'main_output': y_train, 'decoder_output': x_train},
epochs=epochsize,
batch_size=batchsize,
shuffle=shuffle,
validation_split=0.05,
verbose=0)
accuracy = auto_classifier.evaluate(x=x_val, y=[y_val, x_val], verbose=1)[3]
result3['encoding_dim'].append(encoding_dim)
result3['auto_classifier_acc'].append(accuracy)
encoding_dim -=1
print(result3)
result_df = pd.DataFrame(result3)
result_df['neural_net_acc'] = nn_score
result_df
fig = plt.figure(figsize=(20,10))
plt.bar(result_df['encoding_dim'], result_df['auto_classifier_acc'])
plt.axhline(y=result_df['neural_net_acc'][0], c="red")
plt.text(0, result_df['neural_net_acc'][0], "best neural net: " + str(round(result_df['neural_net_acc'][0], 4))
,fontdict=font)
plt.title('model accuracy for different encoding dimensions')
plt.ylabel('accuracy')
plt.xlabel('dimension')
plt.ylim(0.6, 1)
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_eval, auto_classifier.predict(x_val)[0].argmax(axis=-1))
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure(figsize=(20,10))
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure(figsize=(20,10))
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
result_df.to_csv('results/robo_results.csv')
encoding_weights = encoding_layer.get_weights()
sum_of_weights = {index: item.sum() for (index, item) in enumerate(encoding_weights[0])}
weights = sum_of_weights
features = []
for i in range(encoder_dim1):
max_key = max(weights, key=lambda key: weights[key])
features.append(max_key)
del weights[max_key]
print(features)
x_train_selected = np.array([x[features] for x in x_train])
x_val_selected = np.array([x[features] for x in x_val])
input_dim = x_train_selected.shape[1]
hidden1_dim = 26
hidden2_dim = 26
result3 = []
for i in range(1,4):
model_new = Sequential()
model_new.add(Dense(hidden1_dim, activation='relu', input_shape=(input_dim,)))
model_new.add(Dense(hidden2_dim, activation='relu'))
model_new.add(Dense(num_classes, activation='softmax'))
model_new.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
model_new.fit(x_train_selected, y_train,
batch_size=batchsize,
epochs=epochsize,
verbose=0,
shuffle=shuffle,
validation_split=0.1)
score = model_new.evaluate(x_val_selected, y_val)[1]
result3.append(score)
print(result3)
print(np.mean(result3))
# the initial coding dimension s.t. there is no feature selection at the beginning
encoding_dim = 24
# dimension of the neural net layer1
hidden1_dim = 30
# dimension of the second neural net layer
hidden2_dim = 30
epoch_size = 150
batch_size = 24
shuffle = False
result2 = {'encoding_dim/features': []
,'compression_level': []
,'auto_classifier_acc': []
,'selected_classifier_acc': []
,'features': []}
while encoding_dim > 0:
main_input = Input(shape=(input_dim,), dtype='float32', name='main_input')
encoding_layer = Dense(encoding_dim, activation='relu', name='encoder', kernel_initializer='normal')
encoding_layer_output = encoding_layer(main_input)
decoding_layer_output = Dense(input_dim, activation='sigmoid'
,name='decoder_output'
,kernel_initializer='normal')(encoding_layer_output)
x = Dense(hidden1_dim, activation='relu', kernel_initializer='normal')(encoding_layer_output)
x = Dense(hidden2_dim, activation='relu', kernel_initializer='normal')(x)
classifier_output = Dense(num_classes, activation='softmax', name='main_output', kernel_initializer='normal')(x)
auto_classifier = Model(inputs=main_input, outputs=[classifier_output, decoding_layer_output])
auto_classifier.compile(optimizer=RMSprop(),
loss={'main_output': 'categorical_crossentropy', 'decoder_output': 'mean_squared_error'},
loss_weights={'main_output': 1., 'decoder_output': 1.},
metrics=['accuracy'])
auto_classifier.fit({'main_input': x_train},
{'main_output': y_train, 'decoder_output': x_train},
epochs=epoch_size,
batch_size=batch_size,
shuffle=shuffle,
validation_split=0.1,
verbose=0)
accuracy = auto_classifier.evaluate(x=x_val, y=[y_val, x_val], verbose=1)[3]
result2['encoding_dim/features'].append(encoding_dim)
result2['compression_level'].append(1 - encoding_dim/24)
result2['auto_classifier_acc'].append(accuracy)
encoding_weights = encoding_layer.get_weights()
sum_of_weights = {index: item.sum() for (index, item) in enumerate(encoding_weights[0])}
weights = sum_of_weights
features = []
for i in range(encoding_dim):
max_key = max(weights, key=lambda key: weights[key])
features.append(max_key)
del weights[max_key]
result2['features'].append(features)
x_train_selected = np.array([x[features] for x in x_train])
x_val_selected = np.array([x[features] for x in x_val])
input_dim_new = x_train_selected.shape[1]
accuracy_list = []
for i in range(1):
model_new = Sequential()
model_new.add(Dense(hidden1_dim, activation='relu', input_shape=(input_dim_new,), kernel_initializer='normal'))
model_new.add(Dense(hidden2_dim, activation='relu', kernel_initializer='normal'))
model_new.add(Dense(num_classes, activation='softmax', kernel_initializer='normal'))
model_new.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
model_new.fit(x_train_selected, y_train,
batch_size=batch_size,
epochs=epoch_size,
verbose=0,
shuffle=shuffle,
validation_split=0.1)
score = model_new.evaluate(x_val_selected, y_val)[1]
accuracy_list.append(score)
result2['selected_classifier_acc'].append(np.mean(accuracy_list))
encoding_dim -=1
print(result2)
auto_classifier.summary()
plot_model(auto_classifier, to_file='images/robo2_auto_class_LR.png', show_shapes=True, show_layer_names=True, rankdir='LR')
IPython.display.Image("images/robo2_auto_class_LR.png")
result_df = pd.DataFrame(result2)
result_df['neural_net_acc'] = 0.949938949939
result_df
result_df.to_csv('results/robo_results.csv')
result_df.plot(x='encoding_dim/features', y=['selected_classifier_acc', 'neural_net_acc'], kind='bar', figsize=(20,10))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import and basic data inspection
Step2: The dataframe consists of only positive values and the classes are encoded as strings in the variable with index 24
Step3: Whats the distribution of the classes?
Step4: The Move_Forward and the Sharp-Right-Turn Class combine nearly 80% of all observated classes. So it might happen, that the accuracy may still be high with around 75% although most of the features are eliminated.
Step5: 1. Take a random sample of 90% of the rows from the dataframe. To ensure reproducability the random_state variable is set. The other 10% are placed aside for validation after training. The last column is the class column and is stored in the y variables respectively.
Step6: 2. Normalization between 0 and 1
Step7: 3. Make useful categorical variables out of the single column data by one-hot encoding it.
Step8: 4. Set Global Parameters
Step10: Train Neural Net
Step11: Comparison
Step12: One can easily see that our results are better.
Step13: Dimensionality reduction with stacked (multi hidden layer) autoencoder
Step14: Whats the best dimensionality reduction with single autoencoder?
Step15: Prediction for a classifier with a dimension of 16 (accuracy 0.9084)
Step16: Experimental area (not for presentation)
Step17: Check the neural net performance with new selected features
Step18: Finding good features
Step19: The idea is
Step20: Summary
Step21: Architecture
Step22: Results
|
14,384 | <ASSISTANT_TASK:>
Python Code:
import math
def gamblers_ruin(i, p, q, N):
if math.isclose(p,q):
return i/N
else:
return ((1 - (q/p)**i)) / (1 - (q/p)**N)
p = 0.49
q = 1.0 - p
N = 20
i = N/2
print("With N={} and p={}, probability that A wins all is {:.2f}".format(N, p, gamblers_ruin(i, p, q, N)))
N = 100
i = N/2
print("With N={} and p={}, probability that A wins all is {:.2f}".format(N, p, gamblers_ruin(i, p, q, N)))
N = 200
i = N/2
print("With N={} and p={}, probability that A wins all is {:.2f}".format(N, p, gamblers_ruin(i, p, q, N)))
p = 0.5
q = 1.0 - p
N = 20
i = N/2
print("With N={} and p={}, probability that A wins all is {:.2f}".format(N, p, gamblers_ruin(i, p, q, N)))
N = 100
i = N/2
print("With N={} and p={}, probability that A wins all is {:.2f}".format(N, p, gamblers_ruin(i, p, q, N)))
N = 200
i = N/2
print("With N={} and p={}, probability that A wins all is {:.2f}".format(N, p, gamblers_ruin(i, p, q, N)))
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
from scipy.stats import binom
%matplotlib inline
plt.xkcd()
_, ax = plt.subplots(figsize=(12,8))
# a few Binomial parameters n and p
pop_sizes = [240, 120, 60, 24]
p_values = [0.2, 0.3, 0.4, 0.8]
params = list(zip(pop_sizes, p_values))
# colorblind-safe, qualitative color scheme
colors = ['#a6cee3','#1f78b4','#b2df8a','#33a02c']
for i,(n,p) in enumerate(params):
x = np.arange(binom.ppf(0.01, n, p), binom.ppf(0.99, n, p))
y = binom.pmf(x, n, p)
ax.plot(x, y, 'o', ms=8, color=colors[i], label='n={}, p={}'.format(n,p))
ax.vlines(x, 0, y, color=colors[i], alpha=0.3)
# legend styling
legend = ax.legend()
for label in legend.get_texts():
label.set_fontsize('large')
for label in legend.get_lines():
label.set_linewidth(1.5)
# y-axis
ax.set_ylim([0.0, 0.23])
ax.set_ylabel(r'$P(x=k)$')
# x-axis
ax.set_xlim([10, 65])
ax.set_xlabel('# of successes k out of n Bernoulli trials')
# x-axis tick formatting
majorLocator = MultipleLocator(5)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(1)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_major_formatter(majorFormatter)
ax.grid(color='grey', linestyle='-', linewidth=0.3)
plt.suptitle(r'Binomial PMF: $P(x=k) = \binom{n}{k} p^k (1-p)^{n-k}$')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: And assuming a fair game where $p = q = 0.5$
Step2: Could the game ever continue forever on to infinity?
|
14,385 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from ipywidgets import interact
f1 = lambda x: x[0]**2+x[1]**2-1
f2 = lambda x: x[1]-x[0]**2
F = lambda x: np.array([f1(x),f2(x)], dtype=float)
J = lambda x: np.array([[2*x[0], 2*x[1]],[-2*x[0], 1.0]], dtype=float)
# Here we use 'np.linalg.solve', but we could use LU or PALU! Or iterative methods!
NewtonStep = lambda xi: xi-np.linalg.solve(J(xi),F(xi))
n_delta = 50
x = np.linspace(-1.5, 1.5, n_delta)
# We could have used 'x' since it is the same, but for completeness we will define 'y'
y = np.linspace(-1.5, 1.5, n_delta)
X, Y = np.meshgrid(x, y)
Z1 = np.zeros_like(X)
Z2 = np.zeros_like(X)
for i,xi in enumerate(x):
for j,yj in enumerate(y):
Z1[j,i] = f1([xi,yj])
Z2[j,i] = f2([xi,yj])
plt.figure()
CS1 = plt.contour(X, Y, Z1,levels=[0])
CS2 = plt.contour(X, Y, Z2,levels=[0])
plt.grid()
plt.axis('equal')
plt.title(r'Newton $\mathbb{R}^n$')
plt.show()
def Show_Newton(x0=1.2,y0=0.3,n=0):
plt.figure()
CS1 = plt.contour(X, Y, Z1,levels=[0])
CS2 = plt.contour(X, Y, Z2,levels=[0])
plt.grid()
plt.axis('equal')
plt.title(r'Newton $\mathbb{R}^n$')
plt.plot(x0,y0,'rx')
x_previous = np.array([x0,y0])
print('Initial guess: [%.10f, %.10f]' % (x0,y0))
for i in np.arange(n):
x_next=NewtonStep(x_previous)
x1,y1 = x_next
plt.plot(x1,y1,'rx')
plt.plot([x0, x1],[y0, y1],'r')
x0=x1
y0=y1
x_previous = x_next
print('Iteration %d : [%.10f, %.10f]' % (i+1,x1,y1))
plt.show()
interact(Show_Newton,x0=(-1.4,1.4,0.1),y0=(-1.4,1.4,0.1), n=(0,100,1))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <div id='newton' />
Step2: The Newton Step takes advantage of the vectorized implementation!
Step3: The next cell of code is just needed for plotting purposes.
Step4: Here we plot the curves we will be intersecting.
Step5: Here we implement the Newton's method in higher dimension in a widget.
|
14,386 | <ASSISTANT_TASK:>
Python Code:
from urllib.request import urlretrieve
from os.path import isfile
from tqdm import tqdm
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('train.p'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Train Dataset') as pbar:
urlretrieve(
'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/train.p',
'train.p',
pbar.hook)
if not isfile('test.p'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Test Dataset') as pbar:
urlretrieve(
'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/test.p',
'test.p',
pbar.hook)
print('Training and Test data downloaded.')
import pickle
import numpy as np
import math
# Fix error with TF and Keras
import tensorflow as tf
tf.python.control_flow_ops = tf
print('Modules loaded.')
with open('train.p', 'rb') as f:
data = pickle.load(f)
# TODO: Load the feature data to the variable X_train
X_train = data['features']
# TODO: Load the label data to the variable y_train
y_train = data['labels']
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert np.array_equal(X_train, data['features']), 'X_train not set to data[\'features\'].'
assert np.array_equal(y_train, data['labels']), 'y_train not set to data[\'labels\'].'
print('Tests passed.')
# TODO: Shuffle the data
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert X_train.shape == data['features'].shape, 'X_train has changed shape. The shape shouldn\'t change when shuffling.'
assert y_train.shape == data['labels'].shape, 'y_train has changed shape. The shape shouldn\'t change when shuffling.'
assert not np.array_equal(X_train, data['features']), 'X_train not shuffled.'
assert not np.array_equal(y_train, data['labels']), 'y_train not shuffled.'
print('Tests passed.')
# TODO: Normalize the data features to the variable X_normalized
import cv2
def gray_normalize(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
for i in range(image_data.shape[0]):
gray = cv2.resize(cv2.cvtColor(image_data[i], cv2.COLOR_RGB2GRAY), (32, 32)).reshape(1,32,32,1)
if 0==i:
X_normalized = gray
else:
X_normalized = np.append(X_normalized, gray, axis=0)
# TODO: Implement Min-Max scaling for grayscale image data
x_min = np.min(X_normalized)
x_max = np.max(X_normalized)
a = -0.5
b = 0.5
image_data_rescale = a+ (X_normalized - x_min)*(b-a)/(x_max - x_min)
return image_data_rescale
def normalize(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
# TODO: Implement Min-Max scaling for grayscale image data
x_min = np.min(image_data)
x_max = np.max(image_data)
a = -0.5
b = 0.5
image_data_rescale = a+ (image_data - x_min)*(b-a)/(x_max - x_min)
return image_data_rescale
X_normalized = normalize(X_train)
print('Data normalization finished')
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert math.isclose(np.min(X_normalized), -0.5, abs_tol=1e-5) and math.isclose(np.max(X_normalized), 0.5, abs_tol=1e-5), 'The range of the training data is: {} to {}. It must be -0.5 to 0.5'.format(np.min(X_normalized), np.max(X_normalized))
print('Tests passed.')
# TODO: One Hot encode the labels to the variable y_one_hot
# Turn labels into numbers and apply One-Hot Encoding
from sklearn.preprocessing import LabelBinarizer
encoder = LabelBinarizer()
encoder.fit(y_train)
y_one_hot = encoder.transform(y_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
import collections
assert y_one_hot.shape == (39209, 43), 'y_one_hot is not the correct shape. It\'s {}, it should be (39209, 43)'.format(y_one_hot.shape)
assert next((False for y in y_one_hot if collections.Counter(y) != {0: 42, 1: 1}), True), 'y_one_hot not one-hot encoded.'
print('Tests passed.')
# TODO: Build a Multi-layer feedforward neural network with Keras here.
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
# Create the Sequential model
model = Sequential()
# 1st Layer - Add a flatten layer
model.add(Flatten(input_shape=(32, 32, 3)))
# 2nd Layer - Add a fully connected layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a softmax activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.activations import relu, softmax
def check_layers(layers, true_layers):
assert len(true_layers) != 0, 'No layers found'
for layer_i in range(len(layers)):
assert isinstance(true_layers[layer_i], layers[layer_i]), 'Layer {} is not a {} layer'.format(layer_i+1, layers[layer_i].__name__)
assert len(true_layers) == len(layers), '{} layers found, should be {} layers'.format(len(true_layers), len(layers))
check_layers([Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)'
assert model.layers[1].output_shape == (None, 128), 'Second layer output is wrong, it should be (128)'
assert model.layers[2].activation == relu, 'Third layer not a relu activation layer'
assert model.layers[3].output_shape == (None, 43), 'Fourth layer output is wrong, it should be (43)'
assert model.layers[4].activation == softmax, 'Fifth layer not a softmax activation layer'
print('Tests passed.')
# TODO: Compile and train the model here.
# Configures the learning process and metrics
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
# Train the model
# History is a record of training loss and metrics
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=10, validation_split=0.2)
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.optimizers import Adam
assert model.loss == 'categorical_crossentropy', 'Not using categorical_crossentropy loss function'
assert isinstance(model.optimizer, Adam), 'Not using adam optimizer'
assert len(history.history['acc']) == 10, 'You\'re using {} epochs when you need to use 10 epochs.'.format(len(history.history['acc']))
assert history.history['acc'][-1] > 0.92, 'The training accuracy was: %.3f. It shoud be greater than 0.92' % history.history['acc'][-1]
assert history.history['val_acc'][-1] > 0.85, 'The validation accuracy is: %.3f. It shoud be greater than 0.85' % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add a convolutional layer before the flatten layer.
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
from keras import backend as K
# input image dimensions
img_rows, img_cols = 32, 32
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
if K.image_dim_ordering() == 'th':
X_normalized = X_normalized.reshape(X_normalized.shape[0], 3, img_rows, img_cols)
input_shape = (3, img_rows, img_cols)
else:
X_normalized = X_normalized.reshape(X_normalized.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
# Create the Sequential model
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Flatten(input_shape=(32, 32, 3)))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(43))
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
check_layers([Convolution2D, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)'
assert model.layers[0].nb_filter == 32, 'Wrong number of filters, it should be 32'
assert model.layers[0].nb_col == model.layers[0].nb_row == 3, 'Kernel size is wrong, it should be a 3x3'
assert model.layers[0].border_mode == 'valid', 'Wrong padding, it should be valid'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add a pooling layer after the convolutional layer.
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras import backend as K
# input image dimensions
img_rows, img_cols = 32, 32
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
if K.image_dim_ordering() == 'th':
X_normalized = X_normalized.reshape(X_normalized.shape[0], 3, img_rows, img_cols)
input_shape = (3, img_rows, img_cols)
else:
X_normalized = X_normalized.reshape(X_normalized.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
# Create the Sequential model
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Flatten(input_shape=(32, 32, 3)))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(43))
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
check_layers([Convolution2D, Activation, MaxPooling2D, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[2].pool_size == (2, 2), 'Second layer must be a max pool layer with pool size of 2x2'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add dropout after the pooling layer.
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras import backend as K
# input image dimensions
img_rows, img_cols = 32, 32
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
if K.image_dim_ordering() == 'th':
X_normalized = X_normalized.reshape(X_normalized.shape[0], 3, img_rows, img_cols)
input_shape = (3, img_rows, img_cols)
else:
X_normalized = X_normalized.reshape(X_normalized.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
# Create the Sequential model
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.5))
model.add(Flatten(input_shape=(32, 32, 3)))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(43))
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
check_layers([Convolution2D, Activation, MaxPooling2D, Dropout, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[3].p == 0.5, 'Third layer should be a Dropout of 50%'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
## Define traffic sign model
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras import backend as K
# input image dimensions
img_rows, img_cols = 32, 32
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
if K.image_dim_ordering() == 'th':
X_normalized = X_normalized.reshape(X_normalized.shape[0], 3, img_rows, img_cols)
input_shape = (3, img_rows, img_cols)
else:
X_normalized = X_normalized.reshape(X_normalized.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
# Create the Sequential model
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Convolution2D(nb_filters*2, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Flatten(input_shape=(32, 32, 3)))
model.add(Dropout(0.5))
model.add(Dense(128, name="dense_1"))
model.add(Activation('relu'))
# model.add(Dropout(0.5))
model.add(Dense(43, name="dense_2"))
model.add(Activation('softmax'))
# Train and save traffic sign model
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=20, validation_split=0.2)
## Define traffic sign model
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras import backend as K
# input image dimensions
img_rows, img_cols = 32, 32
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
if K.image_dim_ordering() == 'th':
X_normalized = X_normalized.reshape(X_normalized.shape[0], 3, img_rows, img_cols)
input_shape = (3, img_rows, img_cols)
else:
X_normalized = X_normalized.reshape(X_normalized.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
# Create the Sequential model
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape, name="conv_1"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.5))
model.add(Flatten(input_shape=(32, 32, 3)))
model.add(Dense(128, name="dense_1"))
model.add(Activation('relu'))
model.add(Dense(43, name="dense_2"))
model.add(Activation('softmax'))
# Train and save traffic sign model
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=20, validation_split=0.2)
model.save_weights('traffic_weights.h5')
## Download Cifar10 dataset
from keras.datasets import cifar10
from keras.utils import np_utils
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# y_train.shape is 2d, (50000, 1). While Keras is smart enough to handle this
# it's a good idea to flatten the array.
y_train = y_train.reshape(-1)
y_test = y_test.reshape(-1)
def normalize(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
# TODO: Implement Min-Max scaling for grayscale image data
x_min = np.min(image_data)
x_max = np.max(image_data)
a = -0.5
b = 0.5
image_data_rescale = a+ (image_data - x_min)*(b-a)/(x_max - x_min)
return image_data_rescale
X_train = normalize(X_train)
X_test = normalize(X_test)
print('Data normalization finished')
## Define Cifar10 model
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras import backend as K
# input image dimensions
img_rows, img_cols = 32, 32
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
if K.image_dim_ordering() == 'th':
X_normalized = X_normalized.reshape(X_normalized.shape[0], 3, img_rows, img_cols)
input_shape = (3, img_rows, img_cols)
else:
X_normalized = X_normalized.reshape(X_normalized.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
# Create the Sequential model
model_cifar10 = Sequential()
model_cifar10.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape, name="conv_1"))
model_cifar10.add(Activation('relu'))
model_cifar10.add(MaxPooling2D(pool_size=pool_size))
model_cifar10.add(Dropout(0.5))
model_cifar10.add(Flatten(input_shape=(32, 32, 3)))
model_cifar10.add(Dense(128, name="dense_1"))
model_cifar10.add(Activation('relu'))
model_cifar10.add(Dense(10, name="dense_2_new"))
model_cifar10.add(Activation('softmax', name="acivation_3_new"))
# load and use model weight from traffic sign
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
model_cifar10.compile('adam', 'categorical_crossentropy', ['accuracy'])
model_cifar10.load_weights('traffic_weights.h5', by_name=True)
score = model_cifar10.evaluate(X_test, y_test, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
model_cifar10.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model_cifar10.fit(X_train, y_train, batch_size=128, nb_epoch=10, validation_split=0.2)
# TODO: Build a model
# TODO: Compile and train the model
# TODO: Load test data
# TODO: Preprocess data & one-hot encode the labels
# TODO: Evaluate model on test data
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: Load the Data
Step3: Preprocess the Data
Step6: Normalize the features
Step7: One-Hot Encode the labels
Step8: Keras Sequential Model
Step9: Training a Sequential Model
Step10: Convolutions
Step11: Pooling
Step12: Dropout
Step13: Use more conv layers.
Step15: Transfer learning from German Traffic Sign dataset to the Cifar10 dataset
Step16: Optimization
Step17: Best Validation Accuracy
|
14,387 | <ASSISTANT_TASK:>
Python Code:
import sqlite3
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
#import CGATPipelines.Pipeline as P
import os
import statistics
#import collections
#load R and the R packages required
#%load_ext rpy2.ipython
#%R require(ggplot2)
# use these functions to display tables nicely as html
from IPython.display import display, HTML
plt.style.use('ggplot')
#plt.style.available
!pwd
!date
database_path = '../csvdb'
output_path = '.'
#database_path= "/ifs/projects/charlotteg/pipeline_peakcalling/csvdb"
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
def getTableNamesFromDB(database_path):
# Create a SQL connection to our SQLite database
con = sqlite3.connect(database_path)
cur = con.cursor()
# the result of a "cursor.execute" can be iterated over by row
cur.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;")
available_tables = (cur.fetchall())
#Be sure to close the connection.
con.close()
return available_tables
db_tables = getTableNamesFromDB(database_path)
print('Tables contained by the database:')
for x in db_tables:
print('\t\t%s' % x[0])
#This function retrieves a table from sql database and indexes it with track name
def getTableFromDB(statement,database_path):
'''gets table from sql database depending on statement
and set track as index if contains track in column names'''
conn = sqlite3.connect(database_path)
df = pd.read_sql_query(statement,conn)
if 'track' in df.columns:
df.index = df['track']
return df
insert_df = getTableFromDB('select * from insert_sizes;',database_path)
insert_df = insert_df[insert_df["filename"].str.contains('pseudo')==False].copy()
insert_df = insert_df[insert_df["filename"].str.contains('pooled')==False].copy()
def add_expt_to_insertdf(dataframe):
''' splits track name for example HsTh1-RATotal-R1.star into expt
featues, expt, sample_treatment and replicate and adds these as
collumns to the dataframe'''
expt = []
treatment = []
replicate = []
for value in dataframe.filename:
x = value.split('/')[-1]
x = x.split('_insert')[0]
# split into design features
y = x.split('-')
expt.append(y[-3])
treatment.append(y[-2])
replicate.append(y[-1])
if len(expt) == len(treatment) and len(expt)== len(replicate):
print ('all values in list correctly')
else:
print ('error in loading values into lists')
#add collums to dataframe
dataframe['expt_name'] = expt
dataframe['sample_treatment'] = treatment
dataframe['replicate'] = replicate
return dataframe
insert_df = add_expt_to_insertdf(insert_df)
insert_df
ax = insert_df.boxplot(column='fragmentsize_mean', by='sample_treatment')
ax.set_title('for mean fragment size',size=10)
ax.set_ylabel('mean fragment length')
ax.set_xlabel('sample treatment')
ax = insert_df.boxplot(column='tagsize', by='sample_treatment')
ax.set_title('for tag size',size=10)
ax.set_ylabel('tag size')
ax.set_xlabel('sample treatment')
ax.set_ylim(((insert_df.tagsize.min()-2),(insert_df.tagsize.max()+2)))
def getFraglengthTables(database_path):
'''Takes path to sqlite3 database and retrieves fraglengths tables for individual samples
, returns a dictionary where keys = sample table names, values = fraglengths dataframe'''
frag_tabs = []
db_tables = getTableNamesFromDB(database_path)
for table_name in db_tables:
if 'fraglengths' in str(table_name[0]):
tab_name = str(table_name[0])
statement ='select * from %s;' % tab_name
df = getTableFromDB(statement,database_path)
frag_tabs.append((tab_name,df))
print('detected fragment length distribution tables for %s files: \n' % len(frag_tabs))
for val in frag_tabs:
print(val[0])
return frag_tabs
def getDFofFragLengths(database_path):
''' this takes a path to database and gets a dataframe where length of fragments is the index,
each column is a sample and values are the number of reads that have that fragment length in that
sample
'''
fraglength_dfs_list = getFraglengthTables(database_path)
dfs=[]
for item in fraglength_dfs_list:
track = item[0].split('_filtered_fraglengths')[0]
df = item[1]
#rename collumns so that they are correct - correct this in the pipeline then delete this
#df.rename(columns={'frequency':'frag_length', 'frag_length':'frequency'}, inplace=True)
df.index = df.frag_length
df.drop('frag_length',axis=1,inplace=True)
df.rename(columns={'frequency':track},inplace=True)
dfs.append(df)
frag_length_df = pd.concat(dfs,axis=1)
frag_length_df.fillna(0, inplace=True)
return frag_length_df
#Note the frequency and fragment lengths are around the wrong way!
#frequency is actually fragment length, and fragement length is the frequency
#This gets the tables from db and makes master df of all fragment length frequencies
frag_length_df = getDFofFragLengths(database_path)
#plot fragment length frequencies
ax = frag_length_df.divide(1000).plot()
ax.set_ylabel('Number of fragments\n(thousands)')
ax.legend(loc=2,bbox_to_anchor=(1.05, 1),borderaxespad=0. )
ax.set_title('fragment length distribution')
ax.set_xlabel('fragment length (bp)')
ax.set_xlim()
ax = frag_length_df.divide(1000).plot(figsize=(9,9))
ax.set_ylabel('Number of fragments\n(thousands)')
ax.legend(loc=2,bbox_to_anchor=(1.05, 1),borderaxespad=0. )
ax.set_title('fragment length distribution')
ax.set_xlabel('fragment length (bp)')
ax.set_xlim((0,800))
percent_frag_length_df = pd.DataFrame(index=frag_length_df.index)
for column in frag_length_df:
total_frags = frag_length_df[column].sum()
percent_frag_length_df[column] = frag_length_df[column].divide(total_frags)*100
ax = percent_frag_length_df.plot(figsize=(9,9))
ax.set_ylabel('Percentage of fragments')
ax.legend(loc=2,bbox_to_anchor=(1.05, 1),borderaxespad=0. )
ax.set_title('percentage fragment length distribution')
ax.set_xlabel('fragment length (bp)')
ax.set_xlim((0,800))
insert_df = getTableFromDB('select * from picard_stats_insert_size_metrics;',database_path)
for c in insert_df.columns:
print (c)
insert_df
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is where we are and when the notebook was run
Step2: First lets set the output path for where we want our plots to be saved and the database path and see what tables it contains
Step3: This code adds a button to see/hide code in html
Step4: The code below provides functions for accessing the project database and extract a table names so you can see what tables have been loaded into the database and are available for plotting. It also has a function for geting table from the database and indexing the table with the track name
Step5: Insert Size Summary
Step6: lets graph the fragment length mean and tag size grouped by sample so we can see if they are much different
Step7: Ok now get get the fragment length distributiions for each sample and plot them
Step8: Now lets zoom in on the interesting region of the plot (the default in the code looks at fragment lengths from 0 to 800bp - you can change this below by setting the tuple in the ax.set_xlim() function
Step9: it is a bit trickly to see differences between samples of different library sizes so lets look and see if the reads for each fragment length is similar
Step10: SUMMARISE HERE
|
14,388 | <ASSISTANT_TASK:>
Python Code:
import csv
import string
import datetime
import pandas as pd
import numpy as np
# import matplotlib pyplot commands
from matplotlib.pyplot import *
# Show Plots in the Notebook
%matplotlib inline
rcParams['figure.figsize']= (10, 8) # set Chart Size
rcParams['font.size'] = 14 # set Font size in Chart
# 'style' the plot using 'bmh' style
style.use('bmh')
def clean_string(s):
Function that "cleans" a string by first stripping leading and trailing
whitespace and then substituting an underscore for all other whitepace
and punctuation. After that substitution is made, any consecutive occurrences
of the underscore character are reduced to one occurrence.
Finally, the string is converted to lower case.
Returns the cleaned string.
Input Parameters:
-----------------
s: The string to clean.
to_sub = string.whitespace + string.punctuation
trans_table = str.maketrans(to_sub, len(to_sub) * '_')
fixed = str.translate(s.strip(), trans_table)
while True:
new_fixed = fixed.replace('_' * 2, '_')
if new_fixed == fixed:
break
fixed = new_fixed
return fixed.lower()
file_name = 'data/siemens_sample.csv'
reader = csv.reader(open(file_name))
include_location = False # if True include location in point ID
# For running in the notebook, this controls how many rows are shown
# for each execution of the cell below.
# Set to a very large number if you want to process the entire file
# in one execution of the cell below.
num_rows_to_show = 300000
# Going to put the data into a dictionary, keyed by the name of the
# point
data_dict = {}
# repeatedly execute this cell to step through chunks of the data
row_ct = 0
for row in reader:
f1 = row[0] # the first field
if '/' in f1: # Look for the / in the Date
# this is a row with a data point in it.
# create a date/time string and parse into a Python datetime
ts = '{} {}'.format(row[0], row[1])
ts = datetime.datetime.strptime(ts, '%m/%d/%Y %H:%M:%S')
# get the value, which is usually a number, but sometimes a string.
# first try to convert to a number, and if it errors, just return it as a string
try:
val = float(row[2])
except:
val = row[2]
tstamps, vals = data_dict.get(pt_id, ([], []))
tstamps.append(ts)
vals.append(val)
data_dict[pt_id] = (tstamps, vals)
elif f1.startswith('Point'):
# This row has a Point ID in it
pt_id = clean_string(row[1])
elif f1.startswith('Trend L'):
# This row has a Location code in it. If requested, add it
# to the point name.
if include_location:
pt_id = '{}_{}'.format(clean_string(row[1]), pt_id)
row_ct += 1
if row_ct == num_rows_to_show: break
df_final = pd.DataFrame()
for pt_id in data_dict.keys():
# for this point, retrieve the timestamps and values frome the dictionary
tstamps, vals = data_dict[pt_id]
# make a DataFrame, indexed on the timestamps, with the point ID as the column
# name.
df = pd.DataFrame(vals, index=tstamps, columns=[pt_id])
# Sometimes there are duplicate timestamps due to Alarms, I think.
# Only take the value from the last timestamp of the duplicate timestamps.
df = df.groupby(level=0).last()
# Add this DataFrame to the final DataFrame. Indexes are matched up
# or added if they don't already exist in the final frame.
df_final = pd.concat([df_final, df], axis=1)
# Save the final DataFrame to a CSV file to be viewed, perhaps by Excel.
df_final.to_csv('df_final.csv')
df_final.bh_100w_tec_room_temp.dropna().plot()
df_final.bh_uhe_tec_room_temp.dropna().plot()
# Convert the notebook to a script.
# I usually have this commented out
# !jupyter nbconvert --to script siemens_reader.ipynb
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Use this function to clean up point names and location names
Step3: Use the Python csv module to read the file
Step4: Create the Final DataFrame by concatenating a DataFrame for each Point
Step5: Plot a couple points, getting rid of gaps by using drop_na()
Step6: Export this Code to a File
|
14,389 | <ASSISTANT_TASK:>
Python Code:
import gammalib
import ctools
import cscripts
%matplotlib inline
import matplotlib.pyplot as plt
caldb = 'prod2'
irf = 'South_5h'
emin = 0.1 # TeV
emax = 160.0 # TeV
pointing_file = 'pointings.txt'
# open file
f = open(pointing_file, 'w')
# header
f.write('id,ra,dec,tmin,tmax\n')
# pointings
f.write('0001,275.65,-13.78,0.,10800.\n')
f.write('0002,277.25,-13.78,11000.,21800.\n')
# close file
f.close()
obsdef = cscripts.csobsdef()
obsdef['inpnt'] = pointing_file
obsdef['caldb'] = caldb
obsdef['irf'] = irf
obsdef['emin'] = emin
obsdef['emax'] = emax
obsdef['rad'] = 5.
obsdef.run()
obssim = ctools.ctobssim(obsdef.obs())
obssim['inmodel'] = '$CTOOLS/share/models/hess1825_26.xml'
obssim.run()
skymap = ctools.ctskymap(obssim.obs())
skymap['emin'] = emin
skymap['emax'] = emax
skymap['nxpix'] = 200
skymap['nypix'] = 200
skymap['binsz'] = 0.02
skymap['proj'] = 'TAN'
skymap['coordsys'] = 'CEL'
skymap['xref'] = 276.45
skymap['yref'] = -13.78
skymap['bkgsubtract'] = 'NONE'
skymap.run()
# Slightly smooth the map for display to suppress statistical fluctuations
skymap.skymap().smooth('GAUSSIAN',0.1)
fig = plt.figure()
ax = plt.subplot()
plt.imshow(skymap.skymap().array(),origin='lower',
extent=[276.45+0.02*100,276.45-0.02*100,-13.78-0.02*100,-13.78+0.02*100])
# boundaries of the coord grid
ax.set_xlabel('R.A. (deg)')
ax.set_ylabel('Dec (deg)')
cbar = plt.colorbar()
cbar.set_label('Counts')
skymap = ctools.ctskymap(obssim.obs())
skymap['emin'] = 10. #TeV
skymap['emax'] = emax
skymap['nxpix'] = 200
skymap['nypix'] = 200
skymap['binsz'] = 0.02
skymap['proj'] = 'TAN'
skymap['coordsys'] = 'CEL'
skymap['xref'] = 276.45
skymap['yref'] = -13.78
skymap['bkgsubtract'] = 'NONE'
skymap.run()
# Slightly smooth the map for display to suppress statistical fluctuations
skymap.skymap().smooth('GAUSSIAN',0.1)
fig = plt.figure()
ax = plt.subplot()
plt.imshow(skymap.skymap().array(),origin='lower',
extent=[276.45+0.02*100,276.45-0.02*100,-13.78-0.02*100,-13.78+0.02*100])
# boundaries of the coord grid
ax.set_xlabel('R.A. (deg)')
ax.set_ylabel('Dec (deg)')
cbar = plt.colorbar()
cbar.set_label('Counts')
# model container
models = gammalib.GModels()
# low-energy blob
centre = gammalib.GSkyDir()
centre.radec_deg(276.5,-13.75)
spatial = gammalib.GModelSpatialRadialDisk(centre,0.5)
# free source centre
spatial['RA'].fix()
spatial['DEC'].fix()
spatial['Radius'].fix()
spectral = gammalib.GModelSpectralPlaw(4.e-18,-2.5,gammalib.GEnergy(1.,'TeV'))
source = gammalib.GModelSky(spatial,spectral)
source.name('HESS J1825-137')
models.append(source)
# high-energy spot
centre = gammalib.GSkyDir()
centre.radec_deg(276.5,-13)
spatial = gammalib.GModelSpatialRadialDisk(centre,0.1)
# free source centre
spatial['RA'].fix()
spatial['DEC'].fix()
spatial['Radius'].fix()
spectral = gammalib.GModelSpectralPlaw(4.e-19,-1.5,gammalib.GEnergy(1.,'TeV'))
source = gammalib.GModelSky(spatial,spectral)
source.name('HESS J1826-130')
models.append(source)
# instrumental background
# power law spectral correction with pivot energy at 1 TeV
spectral = gammalib.GModelSpectralPlaw(1, 0, gammalib.GEnergy(1, 'TeV'))
bkgmodel = gammalib.GCTAModelIrfBackground(spectral)
bkgmodel.name('Background')
bkgmodel.instruments('CTA')
# append to models
models.append(bkgmodel)
obs = obssim.obs().copy()
obs.models(models)
# Bin events
cntcube = ctools.ctbin(obs)
cntcube['usepnt'] = False
cntcube['ebinalg'] = 'LOG'
cntcube['xref'] = 276.45
cntcube['yref'] = -13.78
cntcube['binsz'] = 0.02
cntcube['nxpix'] = 200
cntcube['nypix'] = 200
cntcube['enumbins'] = 40
cntcube['emin'] = emin
cntcube['emax'] = emax
cntcube['coordsys'] = 'CEL'
cntcube['proj'] = 'TAN'
cntcube.run()
# Extract counts cube
cube = cntcube.cube()
# Compute stacked response
response = cscripts.obsutils.get_stacked_response(obs,cube)
# Copy stacked observations
stacked_obs = cntcube.obs().copy()
# Append stacked response
stacked_obs[0].response(response['expcube'], response['psfcube'],response['bkgcube'])
# Set stacked models
stacked_obs.models(response['models'])
like = ctools.ctlike(stacked_obs)
like.run()
print(like.opt())
print(like.obs().models())
resspec = cscripts.csresspec(like.obs())
resspec['algorithm'] = 'SIGNIFICANCE'
resspec['components'] = True
resspec['outfile'] = 'resspec.fits'
resspec.execute()
import sys
import os
sys.path.append(os.environ['CTOOLS']+'/share/examples/python/')
from show_residuals import plot_residuals
plot_residuals('resspec.fits','',0)
resmap = cscripts.csresmap(like.obs())
resmap['algorithm'] = 'SIGNIFICANCE'
resmap.run()
# Slightly smooth the map for display to suppress statistical fluctuations
resid = resmap._resmap.copy()
resid.smooth('GAUSSIAN',0.1)
# Plotting
fig = plt.figure()
ax = plt.subplot()
plt.imshow(resid.array(),origin='lower', cmap='bwr',
extent=[276.45+0.02*100,276.45-0.02*100,-13.78-0.02*100,-13.78+0.02*100])
# Boundaries of the coord grid
ax.set_xlabel('R.A. (deg)')
ax.set_ylabel('Dec (deg)')
cbar = plt.colorbar()
cbar.set_label('Significance ($\sigma$)')
# copy the fitted models
fit_obs = like.obs().copy()
# replace disks with isotropic model
for model in fit_obs.models():
if model.name() == 'HESS J1825-137' or model.name() == 'HESS J1826-130':
model.spatial(gammalib.GModelSpatialDiffuseConst())
scs1 = cscripts.csscs(fit_obs)
scs1['srcnames'] = 'HESS J1825-137;HESS J1826-130'
scs1['emin'] = emin
scs1['emax'] = emax
scs1['nxpix'] = 20
scs1['nypix'] = 20
scs1['binsz'] = 0.1
scs1['rad'] = 0.2
scs1['proj'] = 'TAN'
scs1['coordsys'] = 'CEL'
scs1['xref'] = 276.45
scs1['yref'] = -13.78
scs1.run()
flux_1826 = scs1.flux('HESS J1826-130')
# Plotting
fig = plt.figure()
ax = plt.subplot()
plt.imshow(flux_1826.array(),origin='lower', vmin = 1.e-8,
extent=[276.45+0.1*10,276.45-0.1*10,-13.78-0.1*10,-13.78+0.1*10])
# Boundaries of the coord grid
ax.set_xlabel('R.A. (deg)')
ax.set_ylabel('Dec (deg)')
cbar = plt.colorbar()
cbar.set_label('Flux (photons/cm$^2$/s/sr)')
flux_1825 = scs1.flux('HESS J1825-137')
# Plotting
fig = plt.figure()
ax = plt.subplot()
plt.imshow(flux_1825.array(),origin='lower', vmin = 1.e-8,
extent=[276.45+0.1*10,276.45-0.1*10,-13.78-0.1*10,-13.78+0.1*10])
# Boundaries of the coord grid
ax.set_xlabel('R.A. (deg)')
ax.set_ylabel('Dec (deg)')
cbar = plt.colorbar()
cbar.set_label('Flux (photons/cm$^2$/s/sr)')
skymap = ctools.ctskymap(obssim.obs())
skymap['emin'] = emin
skymap['emax'] = emax
skymap['nxpix'] = 200
skymap['nypix'] = 200
skymap['binsz'] = 0.02
skymap['proj'] = 'TAN'
skymap['coordsys'] = 'CEL'
skymap['xref'] = 276.45
skymap['yref'] = -13.78
skymap['bkgsubtract'] = 'RING'
skymap['roiradius'] = 0.5
skymap['inradius'] = 1.0
skymap['outradius'] = 1.5
skymap['iterations'] = 3
skymap['threshold'] = 5 # sigma
skymap.run()
fig = plt.figure()
ax = plt.subplot()
plt.imshow(skymap.exclusion_map().map().array(), origin='lower', cmap = 'binary',
extent=[276.45+0.02*100,276.45-0.02*100,-13.48-0.02*100,-13.48+0.02*100])
# boundaries of the coord grid
ax.set_xlabel('R.A. (deg)')
ax.set_ylabel('Dec (deg)')
obs.models(like.obs().models())
scs2 = cscripts.csscs(obs)
scs2['srcnames'] = 'HESS J1825-137;HESS J1826-130'
scs2['emin'] = emin
scs2['emax'] = emax
scs2['nxpix'] = 20
scs2['nypix'] = 20
scs2['binsz'] = 0.1
scs2['rad'] = 0.2
scs2['proj'] = 'TAN'
scs2['coordsys'] = 'CEL'
scs2['xref'] = 276.45
scs2['yref'] = -13.78
scs2['method'] = 'ONOFF'
scs2['use_model_bkg'] = False
scs2['enumbins'] = 30
scs2.exclusion_map(skymap.exclusion_map())
scs2.run()
flux_1826 = scs2.flux('HESS J1826-130')
# Plotting
fig = plt.figure()
ax = plt.subplot()
plt.imshow(flux_1826.array(),origin='lower', vmin = 1.e-8,
extent=[276.45+0.1*10,276.45-0.1*10,-13.78-0.1*10,-13.78+0.1*10])
# Boundaries of the coord grid
ax.set_xlabel('R.A. (deg)')
ax.set_ylabel('Dec (deg)')
cbar = plt.colorbar()
cbar.set_label('Flux (photons/cm$^2$/s/sr)')
flux_1825 = scs1.flux('HESS J1825-137')
# Plotting
fig = plt.figure()
ax = plt.subplot()
plt.imshow(flux_1825.array(),origin='lower', vmin = 1.e-8,
extent=[276.45+0.1*10,276.45-0.1*10,-13.78-0.1*10,-13.78+0.1*10])
# Boundaries of the coord grid
ax.set_xlabel('R.A. (deg)')
ax.set_ylabel('Dec (deg)')
cbar = plt.colorbar()
cbar.set_label('Flux (photons/cm$^2$/s/sr)')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We also import the matplotlib package for plotting.
Step2: Simulated dataset
Step3: We will simulate an observation of the region around the famous sources HESS J1825-137 and HESS J1826-130 based on a very simple sky model. We will consider two pointings of a few hours wobbling around the sources' position.
Step4: Then we use the csobsdef script to convert the list of pointings into an observation definition XML file.
Step5: Finally we use ctobssim to perform the observation simulation.
Step6: Skymap inspection and preliminary likelihood fit
Step7: Below we inspect the skymap by using matpltolib.
Step8: A large blob of emission appears at the center of the map. It coincides with the source HESS J1825-137. Past observations have indicated that HESS J1826-130 has a harder spectrum than HESS J1825-137. Let's peek at a skymap above 10 TeV.
Step9: Indeed, emission above 10 TeV is concentrated on a spot North of the low-energy blob coincident with the position of HESS J1826-130. How can we disentangle the morphology of the two sources based on our observations?
Step10: We copy the simulated observations and append to them our initial sky model.
Step11: We are going to start with a stacked analysis. We bin the events, and then attach to the stacked observations the stacked response. Note that if the dataset is small it may be convenient to use an unbinned analysis in lieu of the stacked analysis for this step.
Step12: Now we can run the preliminary likelihood analysis in which the spectral parameters for the two sources are fit to the data.
Step13: Let's check that the fit was successful.
Step14: We also check the fitted models.
Step15: As guessed from the skymaps HESS J1826-130 is fainter than HESS J1825-137, but its spectrum is rather harder. We will use the values of the spectral indices obtained from this likelihood analysis to derive the source morphology below.
Step16: We can use an example script to display the residuals.
Step17: The model reproduces reasonably well the data spectrally (although not perfectly). We will also check the spatial residuals using csresmap.
Step18: We inspect the map to check the spatial residuals.
Step19: The spatial residuals are small, indicating that the model is close enough to the data. However, the structures in the residuals indicate that the morphological models adopted may not accurately represent the data.
Step20: The essential information to be provided to csscs is
Step21: Now we can inspect the maps of the fluxes from the two sources. We will set a minimum flux for display to avoid being confused by noisy bins. In fact the script has calculated also the flux uncertainty in each bin (accessible through the flux_error method) and the detection significance (accessible through the ts method), that you can use to filter more intelligently the maps. We did not request this, but one could also have flux upper limits computed (calc_ulimit parameter).
Step22: As you can see the hard emission is confined in the North. The soft emission blob seems to have an elongated shape.
Step23: Let's inspect the exclusion map.
Step24: To use csscs in On/Off mode we need to go back to using the event lists. We modify the associated models to the latest version obtained from the global likelihood fit.
Step25: Note that in On/Off mode if there are multiple sources csscs will use only the spectral models for the sources, and their emission within each ROI for component separation will be assumed by default to be isotropic. We will not use the background model, we'll just assume that the background rate of the reflected background regions is the same as in the ROI.
Step26: We visualize below the flux maps, which are quite consistent with those obtained using the stacked analysis.
|
14,390 | <ASSISTANT_TASK:>
Python Code:
#Plotting the relationships between variables
sns.set_style("white")
dfcont = df.drop(['carname','cylinders','modelyear','origin'], axis=1)
# Declare that you want to make a scatterplot matrix.
g = sns.PairGrid(dfcont, diag_sharey=False)
# Scatterplot.
g.map_upper(plt.scatter, alpha=.5)
# Fit line summarizing the linear relationship of the two variables.
g.map_lower(sns.regplot, scatter_kws=dict(alpha=0))
# Give information about the univariate distributions of the variables.
g.map_diag(sns.kdeplot, lw=3)
plt.show()
#Some warnings will show up below because the plot does not include a legend.
# Make the correlation matrix.
corrmat = dfcont.corr()
print(corrmat)
# Set up the matplotlib figure.
f, ax = plt.subplots(figsize=(12, 9))
# Draw the heatmap using seaborn.
sns.heatmap(corrmat, vmax=.8, square=True)
plt.show()
df1 = df.drop(['carname'], axis=1)
df1.head()
# Plot all the variables with boxplots
dfb = df1.drop(['origin','modelyear'], axis=1)
df_long = dfb
df_long = pd.melt(df_long, id_vars=['cylinders'])
g = sns.FacetGrid(df_long, col="variable",size=10, aspect=.5)
g = g.map(sns.boxplot, "cylinders", "value")
g.fig.get_axes()[0].set_yscale('log')
sns.despine(left=True)
plt.show()
# Descriptive statistics by group.
df1.groupby('cylinders').describe().transpose()
df1['cylinders'] = df1["cylinders"].astype(float)
df1 = df1.drop( df[(df.cylinders == 3.0)].index )
df1 = df1.drop( df[(df.cylinders == 5.0)].index )
df1['cylinders'] = df1['cylinders'].astype(str)
dffinal1 = df1[['cylinders','modelyear','origin','mpg','displacement','horsepower','weight','acceleration']]
dffinal1.head()
dffinal1['cylinders'].unique()
for col in dffinal1.loc[:,'mpg':'acceleration'].columns:
print(col)
print(stats.ttest_ind(
dffinal1[dffinal1['cylinders'] == '4.0'][col],
dffinal1[dffinal1['cylinders'] == '6.0'][col]
))
for col in dffinal1.loc[:,'mpg':'acceleration'].columns:
print(col)
print(stats.ttest_ind(
dffinal1[dffinal1['cylinders'] == '4.0'][col],
dffinal1[dffinal1['cylinders'] == '8.0'][col]
))
for col in dffinal1.loc[:,'mpg':'acceleration'].columns:
print(col)
print(stats.ttest_ind(
dffinal1[dffinal1['cylinders'] == '6.0'][col],
dffinal1[dffinal1['cylinders'] == '8.0'][col]
))
plt.figure(figsize=(20,5))
ax = sns.countplot(x="modelyear", hue='cylinders', data=dffinal1, palette="Set3")
plt.show()
# Table of counts
counttable = pd.crosstab(dffinal1['modelyear'], dffinal1['cylinders'])
print(counttable)
print(stats.chisquare(counttable, axis=None))
#Feature 1: Standard number of cylinders vs high end number of cylinders
features = pd.get_dummies(dffinal1['cylinders'])
features['High_end'] = np.where((dffinal1['cylinders'].isin(['6.0', '8.0'])), 1, 0)
#print(pd.crosstab(features['High_end'], dffinal1['cylinders']))
#Feature 2: # Cars from the 70s and cars from the 80s.
features = pd.get_dummies(dffinal1['modelyear'])
features['decade'] = np.where((dffinal1['modelyear'].isin(range(70,80))), 1, 0)
#print(pd.crosstab(features['decade'], dffinal1['modelyear']))
# Feature 3: National cars vs imported cars
features = pd.get_dummies(dffinal1['origin'])
features['national'] = np.where((dffinal1['origin'].isin(['1'])), 1, 0)
#print(pd.crosstab(features['national'], dffinal1['origin']))
# Feature 4: Nacceleration: Normalized acceleration
# Making a four-panel plot.
fig = plt.figure()
fig.add_subplot(221)
plt.hist(dffinal1['acceleration'].dropna())
plt.title('Raw')
fig.add_subplot(222)
plt.hist(np.log(dffinal1['acceleration'].dropna()))
plt.title('Log')
fig.add_subplot(223)
plt.hist(np.sqrt(dffinal1['acceleration'].dropna()))
plt.title('Square root')
ax3=fig.add_subplot(224)
plt.hist(1/df['acceleration'].dropna())
plt.title('Inverse')
plt.show()
features['nacceleration'] = np.sqrt(dffinal1['acceleration'])
# Feature 5: CAR DHW. Composite of highly correlated variables
corrmat = dffinal1.corr()
# Set up the matplotlib figure.
f, ax = plt.subplots(figsize=(12, 9))
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=.8, square=True)
plt.show()
means = dffinal1[['displacement','horsepower','weight']].mean(axis=0)
stds = dffinal1[['displacement','horsepower','weight']].std(axis=0)
features['car_dhw'] = ((dffinal1[['displacement','horsepower','weight']] - means) / stds).mean(axis=1)
# Check how well the composite correlates with each of the individual variables.
plotdffinal1= dffinal1.loc[:, ['displacement','horsepower','weight']]
plotdffinal1['dhw'] = features['car_dhw']
corrmat2 = plotdffinal1.corr()
print(corrmat2)
# Feature 6: Carperformance. Relationship between car_dhw & nacceleration
features['carperformance'] = features['car_dhw'] * features['nacceleration']
# A plot of an interaction.
# Add the 'tvtot' feature to the features data frame for plotting.
features['mpg'] = dffinal1['mpg']
sns.lmplot(
x='carperformance',
y='mpg',
data=features,
scatter=False
)
plt.show()
# Feature 7: Carperformance (squared).
sns.regplot(
features['carperformance'],
y=dffinal1['mpg'],
y_jitter=.49,
order=2,
scatter_kws={'alpha':0.3},
line_kws={'color':'black'},
ci=None
)
plt.show()
features['carperformance_sq'] = features['carperformance'] * features['carperformance']
# Feature 7: standardised carperformance (squared).
means = features[['carperformance_sq']].mean(axis=0)
stds = features[['carperformance_sq']].std(axis=0)
features['standcarperformance_sq'] = ((features[['carperformance_sq']] - means) / stds).mean(axis=1)
# Feature 8: Acceleration (squared).
sns.regplot(
dffinal1['acceleration'],
y=dffinal1['mpg'],
y_jitter=.49,
order=2,
scatter_kws={'alpha':0.3},
line_kws={'color':'black'},
ci=None
)
plt.show()
features['acceleration_sq'] = dffinal1['acceleration'] * dffinal1['acceleration']
# Feature 9: Dhw composite value abs.
sns.regplot(
dffinal1['acceleration'],
y=features['car_dhw'],
y_jitter=.49,
order=2,
scatter_kws={'alpha':0.3},
line_kws={'color':'black'},
ci=None
)
plt.show()
features['dhw_abs'] = features['car_dhw'].abs()
# Select only numeric variables to scale.
df_num = features.select_dtypes(include=[np.number]).dropna()
# Save the column names.
names=df_num.columns
# Scale, then turn the resulting numpy array back into a data frame with the correct column names.
df_scaled = pd.DataFrame(preprocessing.scale(df_num), columns=names)
# The new features contain all the information of the old ones, but on a new scale.
plt.scatter(df_num['car_dhw'], df_scaled['car_dhw'])
plt.show()
# Lookit all those matching means and standard deviations!
print(df_scaled.describe())
# Normalize the data so that all variables have a mean of 0 and standard deviation
# of 1.
X = StandardScaler().fit_transform(df_scaled)
# The NumPy covariance function assumes that variables are represented by rows,
# not columns, so we transpose X.
Xt = X.T
Cx = np.cov(Xt)
print('Covariance Matrix:\n', Cx)
# Calculating eigenvalues and eigenvectors.
eig_val_cov, eig_vec_cov = np.linalg.eig(Cx)
# Inspecting the eigenvalues and eigenvectors.
for i in range(len(eig_val_cov)):
eigvec_cov = eig_vec_cov[:, i].reshape(1, 12).T
print('Eigenvector {}: \n{}'.format(i + 1, eigvec_cov))
print('Eigenvalue {}: {}'.format(i + 1, eig_val_cov[i]))
print(40 * '-')
print(
'The percentage of total variance in the dataset explained by each',
'component calculated by hand.\n',
eig_val_cov / sum(eig_val_cov)
)
#From the Scree plot we should use onle the first 2 components that will explain 46% and 23% each
plt.plot(eig_val_cov)
plt.show()
# Create P, which we will use to transform Cx into Cy to get Y, the
# dimensionally-reduced representation of X.
P = eig_vec_cov[:, 0]
# Transform X into Y.
Y = P.T.dot(Xt)
# Combine X and Y for plotting purposes.
data_to_plot = df_scaled[['nacceleration','car_dhw','carperformance','carperformance_sq','standcarperformance_sq','acceleration_sq','dhw_abs']]
data_to_plot['Component'] = Y
data_to_plot = pd.melt(data_to_plot, id_vars='Component')
g = sns.FacetGrid(data_to_plot, col="variable", size=4, aspect=.5)
g = g.map(
sns.regplot,
"Component",
"value",
x_jitter=.49,
y_jitter=.49,
fit_reg=False
)
plt.show()
sklearn_pca = PCA(n_components=5)
Y_sklearn = sklearn_pca.fit_transform(X)
print(
'The percentage of total variance in the dataset explained by each',
'component from Sklearn PCA.\n',
sklearn_pca.explained_variance_ratio_
)
# Compare the sklearn solution to ours – a perfect match.
plt.plot(Y_sklearn[:, 0], Y, 'o')
plt.title('Comparing solutions')
plt.ylabel('Sklearn Component 1')
plt.xlabel('By-hand Component 1')
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From the correlation matrix it seems that displacement, horsepower and weight are strongly correlated. Acceleration is less correlated with the rest thus providing more information.
Step2: For cylinders = 6 & 8
Step3: The number of counts for cylinders = 3 and 5 is very small so they are discarded considering only 4, 6 & 8
Step4: The difference for all variables for each cylinders value is significant (except for acceleration when comparing 4 & 6)
Step5: Modelyear on average is equivalent regarding the population per year. There are differences regarding the cylinders values.
|
14,391 | <ASSISTANT_TASK:>
Python Code:
# grab the entire Penn World Tables data from the web...
pwt = pypwt.load_pwt_data()
#...this gives us a panel (i.e., two dimensional) data set
pwt
def labor_supply(data, year="1950-01-01"):
Labor supply in a given year is the product of number of employed
persons, 'emp', and the average number of hours worked, 'avh'.
Parameters
----------
data : pandas.Panel
The Penn World Tables (PWT) data set.
year : str (default="1950-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
Returns
-------
L : pandas.Series
Effective labor supply in units of employed person-years.
L = data.minor_xs(year)["emp"] * data.minor_xs(year)["avh"]
return L
def real_gdp_per_unit_labor(data, year="1950-01-01"):
Real gross domestic product (GDP) per unit labor is the ratio of
some measure of real output and some measure of labor supply.
Parameters
----------
data : pandas.Panel
The Penn World Tables (PWT) data set.
year : str (default="1950-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
Returns
-------
rgdppul : pandas.Series
Real gdp per unit labor supply.
rgdppul = data.minor_xs(year)["rgdpo"] / labor_supply(data, year)
return rgdppul
def growth_rate_real_gdp_per_unit_labor(data, start="1950-01-01", end="2011-01-01"):
Plot the growth rate of real GDP per unit labor over some time period
against the level of real GDP per unit labor at the start of the time
period.
Parameters
----------
data : pandas.Panel
The Penn World Tables (PWT) data set.
start : str (default="1950-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
end : str (default="2011-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
gr = (np.log(real_gdp_per_unit_labor(data, end)) -
np.log(real_gdp_per_unit_labor(data, start)))
return gr
def some_interesting_plot(data, start="1950-01-01", end="2011-01-01"):
Plot the growth rate of real GDP per unit labor over some time period
against the level of real GDP per unit labor at the start of the time
period.
Parameters
----------
data : pandas.Panel
The Penn World Tables (PWT) data set.
start : str (default="1950-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
end : str (default="2011-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
# create the scatter plot
fig, ax = plt.subplots(1, 1, figsize=(12,9))
xs = np.log(real_gdp_per_unit_labor(data, start))
ys = growth_rate_real_gdp_per_unit_labor(data, start, end)
ax.scatter(xs, ys, color='k')
# axis labels, title, etc
ax.set_xlabel('Log income (per unit labor) in 1960', fontsize=25)
ax.set_xlim(0.95 * xs.min(), 1.05 * xs.max())
ax.set_ylabel('Income (per unit labor) growth\n({}-{})'.format(start[:4], end[:4]),
fontsize=25)
ax.set_ylim(1.05 * ys.min(), 1.05 * ys.max())
ax.set_title('Do poor countries grow faster than rich countries?',
fontsize=25, family="serif")
some_interesting_plot(data=pwt, start="1960-01-01", end="2010-01-01")
def another_interesting_plot(data, start, end, intercept=2.5, slope=-0.5):
Plot the growth rate of real GDP per unit labor over some time period
against the level of real GDP per unit labor at the start of the time
period. Then add a regression line associated with given values for the
intercept and slope.
Parameters
----------
data : pandas.Panel
The Penn World Tables (PWT) data set.
start : str (default="1950-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
end : str (default="2011-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
intercept: float (defalut=2.5)
Intercept for the regression line.
slope : float (default=-0.5)
Slope for the regression line.
# create the scatter plot
fig, ax = plt.subplots(1, 1, figsize=(12,9))
xs = np.log(real_gdp_per_unit_labor(data, start))
ys = growth_rate_real_gdp_per_unit_labor(data, start, end)
ax.scatter(xs, ys, color='k')
# compute the regression line given params
grid = np.linspace(0.95 * xs.min(), 1.05 * xs.max(), 1000)
predicted = lambda x: intercept + slope * x
yhat, = ax.plot(grid, predicted(grid) , color='b',
label=r"$\hat{y}_i=%.2f + %.2fx_i$" %(intercept, slope))
# axis labels, title, etc
ax.set_xlabel('Log income (per unit labor) in 1960', fontsize=25)
ax.set_xlim(0.95 * xs.min(), 1.05 * xs.max())
ax.set_ylabel('Income (per unit labor) growth\n({}-{})'.format(start[:4], end[:4]),
fontsize=25)
ax.set_ylim(1.05 * ys.min(), 1.05 * ys.max())
ax.legend(bbox_to_anchor=(1.0, 0.95), prop={'size': 25})
another_interesting_plot(pwt, start="1960-01-01", end="2010-01-01")
def another_static_plot(data, start, end, intercept=2.5, slope=-0.5):
Plot the growth rate of real GDP per unit labor over some time period
against the level of real GDP per unit labor at the start of the time
period. Then add a regression line associated with given values for the
intercept and slope.
Parameters
----------
data : pandas.Panel
The Penn World Tables (PWT) data set.
start : str (default="1950-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
end : str (default="2011-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
intercept: float (default=2.5)
Intercept for the regression line.
slope : float (default=-0.5)
Slope for the regression line.
# create the scatter plot
fig, ax = plt.subplots(1, 1, figsize=(12,9))
xs = np.log(real_gdp_per_unit_labor(data, start))
ys = growth_rate_real_gdp_per_unit_labor(data, start, end)
ax.scatter(xs, ys, color='k')
# compute and plot the regression line given current params
predicted = lambda x: intercept + slope * x
grid = np.linspace(0.95 * xs.min(), 1.05 * xs.max(), 1000)
yhat, = ax.plot(grid, predicted(grid) , color='b')
ssr = np.sum((predicted(xs) - ys)**2)
# add the residuals to the plot
lines = zip(zip(xs, ys), zip(xs, predicted(xs)))
lc = mc.LineCollection(lines, colors='r', linewidths=2)
ax.add_collection(lc)
# axis labels, title, etc
ax.set_xlabel('Log income (per unit labor) in 1960', fontsize=25)
ax.set_xlim(0.95 * xs.min(), 1.05 * xs.max())
ax.set_ylabel('Income (per unit labor) growth\n({}-{})'.format(start[:4], end[:4]),
fontsize=25)
fig.suptitle(r'Sum of squared errors (SSE) $\equiv \sum_{i=1}^N (y_i - (\beta_0 + \beta_1x_i))^2$',
x=0.5, y=1.025, fontsize=25, family="serif")
fig.legend([yhat, lc], [r"$\hat{y}_i=\hat{\beta}_0 + \hat{\beta}_1x_i$", r"$SSE={0:.4f}$".format(ssr)],
bbox_to_anchor=(0.8, 0.85), prop={'size': 25})
# create the interactive widget
intercept_widget = widgets.FloatSlider(value=2.5e0, min=0.0, max=5.0, step=5e-2, description=r"$\hat{\beta}_0$")
slope_widget = widgets.FloatSlider(value=-0.5, min=-1.0, max=1.0, step=5e-2, description=r"$\hat{\beta}_1$")
some_interactive_plot = widgets.interactive(another_static_plot,
data=widgets.fixed(pwt),
start=widgets.fixed("1960-01-01"),
end=widgets.fixed("2010-01-01"),
intercept=intercept_widget,
slope=slope_widget,
)
display(some_interactive_plot)
# compute the optimal parameter estimates "by hand"
estimated_slope = xs.cov(ys) / xs.var()
estimated_intercept = ys.mean() - estimated_slope * xs.mean()
print "Estimated intercept: {}".format(estimated_intercept)
print "Estimated slope: {}".format(estimated_slope)
# define the variables
xs = np.log(real_gdp_per_unit_labor(pwt, '1960-01-01'))
ys = growth_rate_real_gdp_per_unit_labor(pwt, '1960-01-01', '2010-01-01')
# compute and plot the regression line given current params
data = pd.DataFrame.from_dict({'x': xs, 'y': ys})
lm = smf.ols(formula="y ~ x", data=data).fit()
lm.summary()
def plot_fitted_model(data, start="1950-01-01", end="2011-01-01"):
Plot the growth rate of real GDP per unit labor over some time period
against the level of real GDP per unit labor at the start of the time
period. Then add a regression line associated with given values for the
intercept and slope.
Parameters
----------
data : pandas.Panel
The Penn World Tables (PWT) data set.
start : str (default="1950-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
end : str (default="2011-01-01")
Some year between 1950 and 2011. Format should be "YYYY-01-01".
# create the scatter plot
fig, ax = plt.subplots(1, 1, figsize=(12,9))
xs = np.log(real_gdp_per_unit_labor(data, start))
ys = (np.log(real_gdp_per_unit_labor(data, end)) -
np.log(real_gdp_per_unit_labor(data, start)))
ax.scatter(xs, ys, color='k')
# compute and plot the regression line given current params
data = pd.DataFrame.from_dict({'x': xs, 'y': ys})
lm = smf.ols(formula="y ~ x", data=data).fit()
predicted = lambda x: lm.params[0] + lm.params[1] * x
grid = np.linspace(0.95 * xs.min(), 1.05 * xs.max(), 1000)
yhat, = ax.plot(grid, predicted(grid) , color='b')
ssr = np.sum((predicted(xs) - ys)**2)
# add the residuals to the plot
lines = zip(zip(xs, ys), zip(xs, predicted(xs)))
lc = mc.LineCollection(lines, colors='r', linewidths=2)
ax.add_collection(lc)
# axis labels, title, etc
ax.set_xlabel('Log income (per unit labor) in 1960', fontsize=25)
ax.set_xlim(0.95 * xs.min(), 1.05 * xs.max())
ax.set_ylabel('Income (per unit labor) growth\n({}-{})'.format(start[:4], end[:4]),
fontsize=25)
ax.set_ylim(1.05 * ys.min(), 1.05 * ys.max())
fig.legend([yhat, lc], [r"$\hat{y}_i=%.4f + %.4f x_i$" % (lm.params[0], lm.params[1]),
r"$SSR={0:.4f}$".format(ssr)],
bbox_to_anchor=(0.85, 0.85), prop={'size': 25})
plot_fitted_model(pwt, '1960-01-01', '2010-01-01')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: <h1 class="title-slide"> (Linear) Regression </h1>
Step5: Let's focus on a relation between...
Step7: <h2 class="section-header">Step 3
Step8: Since we are drawing straight lines...
Step10: <h2 class="section-header">Step 4
Step11: <h3>Informal parameter estimation...</h3>
Step13: <h3>Formal parameter estimation...</h3>
Step14: <h3>Plotting the final result...</h3>
|
14,392 | <ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
import numpy as np
from scipy.special import beta
# Two variable drichlet ditribution with alpha = (1,2)
def drichlet_pdf(x, y):
return (np.power(x, 1)*np.power(y, 2))/beta(x, y)
from pgmpy.factors.continuous import ContinuousFactor
drichlet_factor = ContinuousFactor(['x', 'y'], drichlet_pdf)
drichlet_factor.scope(), drichlet_factor.assignment(5,6)
def custom_pdf(x, y, z):
return z*(np.power(x, 1)*np.power(y, 2))/beta(x, y)
custom_factor = ContinuousFactor(['x', 'y', 'z'], custom_pdf)
custom_factor.scope(), custom_factor.assignment(1, 2, 3)
custom_factor.reduce([('y', 2)])
custom_factor.scope(), custom_factor.assignment(1, 3)
from scipy.stats import multivariate_normal
std_normal_pdf = lambda *x: multivariate_normal.pdf(x, [0, 0], [[1, 0], [0, 1]])
std_normal = ContinuousFactor(['x1', 'x2'], std_normal_pdf)
std_normal.scope(), std_normal.assignment([1, 1])
std_normal.marginalize(['x2'])
std_normal.scope(), std_normal.assignment(1)
sn_pdf1 = lambda x: multivariate_normal.pdf([x], [0], [[1]])
sn_pdf2 = lambda x1,x2: multivariate_normal.pdf([x1, x2], [0, 0], [[1, 0], [0, 1]])
sn1 = ContinuousFactor(['x2'], sn_pdf1)
sn2 = ContinuousFactor(['x1', 'x2'], sn_pdf2)
sn3 = sn1 * sn2
sn4 = sn2 / sn1
sn3.assignment(0, 0), sn4.assignment(0, 0)
from pgmpy.factors.distributions import GaussianDistribution as JGD
dis = JGD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
dis.variables
dis.mean
dis.covariance
dis.pdf([0,0,0])
dis1 = JGD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
dis2 = JGD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
dis3 = dis1 * dis2
dis3.variables
dis3.mean
dis3.covariance
from pgmpy.factors.continuous import CanonicalDistribution
phi1 = CanonicalDistribution(['x1', 'x2', 'x3'],
np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
np.array([[1], [4], [-1]]), -2)
phi2 = CanonicalDistribution(['x1', 'x2'], np.array([[3, -2], [-2, 4]]),
np.array([[5], [-1]]), 1)
phi3 = phi1 * phi2
phi3.variables
phi3.h
phi3.K
phi3.g
phi = CanonicalDistribution(['x1', 'x2'], np.array([[3, -2], [-2, 4]]),
np.array([[5], [-1]]), 1)
jgd = phi.to_joint_gaussian()
jgd.variables
jgd.covariance
jgd.mean
# For P(Y| X1, X2, X3) = N(-2x1 + 3x2 + 7x3 + 0.2; 9.6)
from pgmpy.factors.continuous import LinearGaussianCPD
cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3'])
print(cpd)
from pgmpy.models import LinearGaussianBayesianNetwork
model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
cpd1 = LinearGaussianCPD('x1', [1], 4)
cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
# This is a hack due to a bug in pgmpy (LinearGaussianCPD
# doesn't have `variables` attribute but `add_cpds` function
# wants to check that...)
cpd1.variables = [*cpd1.evidence, cpd1.variable]
cpd2.variables = [*cpd2.evidence, cpd2.variable]
cpd3.variables = [*cpd3.evidence, cpd3.variable]
model.add_cpds(cpd1, cpd2, cpd3)
jgd = model.to_joint_gaussian()
jgd.variables
jgd.mean
jgd.covariance
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Continuous Factors
Step2: This class supports methods like marginalize, reduce, product and divide just like what we have with discrete classes. One caveat is that when there are a number of variables involved, these methods prove to be inefficient and hence we resort to certain Gaussian or some other approximations which are discussed later.
Step3: The ContinuousFactor class also has a method discretize that takes a pgmpy Discretizer class as input. It will output a list of discrete probability masses or a Factor or TabularCPD object depending upon the discretization method used. Although, we do not have inbuilt discretization algorithms for multivariate distributions for now, the users can always define their own Discretizer class by subclassing the pgmpy.BaseDiscretizer class.
Step4: This class overrides the basic operation methods (marginalize, reduce, normalize, product and divide) as these operations here are more efficient than the ones in its parent class. Most of these operation involve a matrix inversion which is $\mathcal{O}(n^3)$ with repect to the number of variables.
Step5: The others methods can also be used in a similar fashion.
Step6: This class also has a method, to_joint_gaussian to convert the canoncial representation back into the joint gaussian distribution.
Step7: Linear Gaussian CPD
Step8: A Gaussian Bayesian is defined as a network all of whose variables are continuous, and where all of the CPDs are linear Gaussians. These networks are of particular interest as these are an alternate form of representaion of the Joint Gaussian distribution.
|
14,393 | <ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
%matplotlib inline
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import time
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from sklearn.manifold import TSNE
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
Download a file if not present, and make sure it's the right size.
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified %s' % filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
def read_data(filename):
Extract the first file enclosed in a zip file as a list of words
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size %d' % len(words))
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
del words # Hint to reduce memory.
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
print('data:', [reverse_dictionary[di] for di in data[:8]])
for num_skips, skip_window in [(2, 1), (4, 2)]:
data_index = 0
batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window)
print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))
print(' batch:', [reverse_dictionary[bi] for bi in batch])
print(' labels:', [reverse_dictionary[li] for li in labels.reshape(8)])
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Define info to be used by the SummaryWriter. This will let TensorBoard
# plot loss values during the training process.
loss_summary = tf.scalar_summary("loss", loss)
train_summary_op = tf.merge_summary([loss_summary])
# Add variable initializer.
init = tf.initialize_all_variables()
print("finished building graph.")
# Begin training.
num_steps = 100001
session = tf.InteractiveSession(graph=graph)
# We must initialize all variables before we use them.
init.run()
print("Initialized")
# Directory in which to write summary information.
# You can point TensorBoard to this directory via:
# $ tensorboard --logdir=/tmp/word2vec_basic/summaries
# Tensorflow assumes this directory already exists, so we need to create it.
timestamp = str(int(time.time()))
if not os.path.exists(os.path.join("/tmp/word2vec_basic",
"summaries", timestamp)):
os.makedirs(os.path.join("/tmp/word2vec_basic", "summaries", timestamp))
# Create the SummaryWriter
train_summary_writer = tf.train.SummaryWriter(
os.path.join(
"/tmp/word2vec_basic", "summaries", timestamp), session.graph)
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also evaluate the training summary op.
_, loss_val, tsummary = session.run(
[optimizer, loss, train_summary_op],
feed_dict=feed_dict)
average_loss += loss_val
# Write the evaluated summary info to the SummaryWriter. This info will
# then show up in the TensorBoard events.
train_summary_writer.add_summary(tsummary, step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
print("finished training.")
# Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn and matplotlib to visualize embeddings.")
test_word = 'six'
test_word_idx = dictionary[test_word]
print("Found word {} at index {}".format(test_word, test_word_idx))
test_embeddings = tf.nn.embedding_lookup(normalized_embeddings, [test_word_idx])
test_similarity = tf.matmul(test_embeddings, normalized_embeddings, transpose_b=True)
top_k = 8 # number of nearest neighbors
# Extra: eval the 'test word' similarity
sim = test_similarity.eval()
nearest = (-sim[0, :]).argsort()[1:top_k + 1]
print("Nearest to {}:".format(test_word))
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
print (close_word)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Download the text corpus.
Step4: Read the data into a string.
Step5: Build the dictionary and replace rare words with UNK token.
Step6: Function to generate a training batch for the skip-gram model.
Step7: Build and train a skip-gram model.
Step8: Start TensorBoard
Step9: How-to find the 'nearby' words for a specific given word
|
14,394 | <ASSISTANT_TASK:>
Python Code:
import numpy as np
import datetime
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from numpy import nan
import keras
print keras.__version__
now = datetime.datetime.now
now = datetime.datetime.now
batch_size = 128
nb_classes = 5
nb_epoch = 5
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = 2
# convolution kernel size
kernel_size = 3
if K.image_data_format() == 'channels_first':
input_shape = (1, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, 1)
def train_model(model, train, test, nb_classes):
X_train = train[0].reshape((train[0].shape[0],) + input_shape)
X_test = test[0].reshape((test[0].shape[0],) + input_shape)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(train[1], nb_classes)
Y_test = np_utils.to_categorical(test[1], nb_classes)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
t = now()
model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1,
validation_data=(X_test, Y_test))
print('Training time: %s' % (now() - t))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# create two datasets one with digits below 5 and one with 5 and above
X_train_lt5 = X_train[y_train < 5]
y_train_lt5 = y_train[y_train < 5]
X_test_lt5 = X_test[y_test < 5]
y_test_lt5 = y_test[y_test < 5]
X_train_gte5 = X_train[y_train >= 5]
y_train_gte5 = y_train[y_train >= 5] - 5 # make classes start at 0 for
X_test_gte5 = X_test[y_test >= 5] # np_utils.to_categorical
y_test_gte5 = y_test[y_test >= 5] - 5
# define two groups of layers: feature (convolutions) and classification (dense)
feature_layers = [
Convolution2D(nb_filters, kernel_size, kernel_size,
border_mode='valid',
input_shape=input_shape),
Activation('relu'),
Convolution2D(nb_filters, kernel_size, kernel_size),
Activation('relu'),
MaxPooling2D(pool_size=(pool_size, pool_size)),
Dropout(0.25),
Flatten(),
]
classification_layers = [
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(nb_classes),
Activation('softmax')
]
# create complete model
model = Sequential(feature_layers + classification_layers)
# train model for 5-digit classification [0..4]
train_model(model,
(X_train_lt5, y_train_lt5),
(X_test_lt5, y_test_lt5), nb_classes)
# freeze feature layers and rebuild model
for l in feature_layers:
l.trainable = False
# transfer: train dense layers for new classification task [5..9]
train_model(model,
(X_train_gte5, y_train_gte5),
(X_test_gte5, y_test_gte5), nb_classes)
from keras.applications import VGG16
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.layers import Input, Flatten, Dense
from keras.models import Model
import numpy as np
#Get back the convolutional part of a VGG network trained on ImageNet
model_vgg16_conv = VGG16(weights='imagenet', include_top=False)
model_vgg16_conv.summary()
#Create your own input format (here 3x200x200)
inp = Input(shape=(48,48,3),name = 'image_input')
#Use the generated model
output_vgg16_conv = model_vgg16_conv(inp)
#Add the fully-connected layers
x = Flatten(name='flatten')(output_vgg16_conv)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(5, activation='softmax', name='predictions')(x)
#Create your own model
my_model = Model(input=inp, output=x)
#In the summary, weights and layers from VGG part will be hidden, but they will be fit during the training
my_model.summary()
import scipy
new_shape = (48,48)
X_train_new = np.empty(shape=(X_train_gte5.shape[0],)+(48,48,3))
for idx in xrange(X_train_gte5.shape[0]):
X_train_new[idx] = np.resize(scipy.misc.imresize(X_train_gte5[idx], (new_shape)), (48, 48, 3))
X_train_new[idx] = np.resize(X_train_new[idx], (48, 48, 3))
#X_train_new = np.expand_dims(X_train_new, axis=-1)
print X_train_new.shape
X_train_new = X_train_new.astype('float32')
X_train_new /= 255
print('X_train shape:', X_train_new.shape)
print(X_train_new.shape[0], 'train samples')
print(X_train_new.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train_gte5, nb_classes)
Y_test = np_utils.to_categorical(y_test_gte5, nb_classes)
print y_train.shape
my_model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
my_model.fit(X_train_new, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1)
#print('Training time: %s' % (now() - t))
#score = my_model.evaluate(X_test, Y_test, verbose=0)
#print('Test score:', score[0])
#print('Test accuracy:', score[1])
#train_model(my_model,
# (X_train_new, y_train_gte5),
# (X_test_gte5, y_test_gte5), nb_classes)
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Settings
Step2: Dataset Preparation
Step3: Your Turn
Step4: ```python
|
14,395 | <ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
arr = np.array(image_data)
arr = (arr - arr.min())/(arr.max() - arr.min())
arr = arr * 0.8 + 0.1
return arr.tolist()
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
print(train_features)
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
features = tf.placeholder(tf.float32, [None, features_count])
labels = tf.placeholder(tf.float32, [None, labels_count])
weights = tf.Variable(tf.truncated_normal([features_count, labels_count]))
biases = tf.Variable(tf.zeros([labels_count]))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
results
# Change if you have memory restrictions
batch_size = 128
# Find the best parameters for each configuration
#results = []
#for epochs in [1,2,3,4,5]:
# for learning_rate in [0.8, 0.5, 0.1, 0.05, 0.01]:
epochs = 4
learning_rate = 0.1
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
#print('LR: {}\t Epochs: {}\t Validation accuracy at {}'.format(learning_rate, epochs, validation_accuracy))
#results.append((learning_rate, epochs, validation_accuracy))
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/Mean_Variance_Image.png" style="height
Step6: Checkpoint
Step7: Problem 2
Step8: <img src="image/Learn_Rate_Tune_Image.png" style="height
Step9: Test
|
14,396 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
def well2d(x, y, nx, ny, L=1.0):
Compute the 2d quantum well wave function.
answer = np.array(2/L * np.sin(nx*np.pi*x/L)*np.sin(ny*np.pi*y/L))
return answer
psi = well2d(np.linspace(0,1,10), np.linspace(0,1,10), 1, 1)
assert len(psi)==10
assert psi.shape==(10,)
#Worked with Orion Zorich
x = np.linspace(0,1,100)
y = np.linspace(0,1,100)
psi_1 = well2d(x,y,3,2,1.0)
psi_2 = well2d(x,y,3,2,1.0)
X,Y = np.meshgrid(psi1,psi2)
plt.contour(Y)
assert True # use this cell for grading the contour plot
plt.pcolor(Y)
assert True # use this cell for grading the pcolor plot
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Contour plots of 2d wavefunctions
Step3: The contour, contourf, pcolor and pcolormesh functions of Matplotlib can be used for effective visualizations of 2d scalar fields. Use the Matplotlib documentation to learn how to use these functions along with the numpy.meshgrid function to visualize the above wavefunction
Step4: Next make a visualization using one of the pcolor functions
|
14,397 | <ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='inputs_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='inputs_z')
return inputs_real, inputs_z
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha*h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha*h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real, d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, d_hidden_size, reuse=True, alpha=alpha)
# Calculate losses
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real)*(1-smooth)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator') ]
d_vars = [var for var in t_vars if var.name.startswith('discriminator') ]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Inputs
Step2: Generator network
Step3: Discriminator
Step4: Hyperparameters
Step5: Build network
Step6: Discriminator and Generator Losses
Step7: Optimizers
Step8: Training
Step9: Training loss
Step10: Generator samples from training
Step11: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
Step12: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
Step13: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
|
14,398 | <ASSISTANT_TASK:>
Python Code:
sample_rate = 4096
nyquist = sample_rate/2
time_length_seconds = 512
# Make the data twice as long so we can cut off the wrap-around
num_noise_samples=2*time_length_seconds*sample_rate
white_noise_fd=rfft(np.random.normal(size=num_noise_samples))
sim_freqs=np.arange(len(white_noise_fd))/(2.*time_length_seconds)
psd=(sim_freqs/40.)**-10+(sim_freqs/70.)**-4+0.5+1.397e-6*(sim_freqs)**2
# Put in a fake highpass around 8 Hz, so we don't have too much low frequency
to_bin=2*time_length_seconds
f_pass, f_min = 8., 10.
idx1=int(to_bin*f_pass)
idx2=int(to_bin*f_min)
psd[:idx1]=psd[idx2]*(sim_freqs[:idx1]/f_pass)**2
psd[idx1:idx2]=psd[idx2]
colored_noise_td = np.sqrt(float(nyquist))*irfft(np.sqrt(psd)*white_noise_fd)
colored_noise_td = colored_noise_td[len(colored_noise_td)/4:-len(colored_noise_td)/4]
def welch_asd(data, fft_len_sec, overlap=0.5, window='hanning'):
Measure the ASD using the Welch method of averaging
estimates on shorter overlapping segments of the data.
assert 0. <= overlap < 1.
ff, tmp = sig.welch(data, fs=sample_rate, window=window,
nperseg=fft_len_sec*sample_rate,
noverlap=overlap*fft_len_sec*sample_rate)
return ff, np.sqrt(tmp)
ff, measured_asd_bc = welch_asd(colored_noise_td, window='boxcar', fft_len_sec=4)
ff, measured_asd = welch_asd(colored_noise_td, window='hanning', fft_len_sec=4)
plt.loglog(ff, measured_asd_bc, label='no window')
plt.loglog(ff, measured_asd, label='hann')
plt.loglog(sim_freqs, np.sqrt(psd), c='k', ls=':', label='true')
plt.xlim(4,nyquist)
plt.ylim(0.5,2e3)
plt.legend(loc='upper right')
def line_at(f0, data):
Make a sinusoid at f0 Hz with the same length as data
return np.sin(2.*np.pi*f0*np.arange(len(data))/sample_rate)
noise_with_lines = colored_noise_td\
+ 100.*line_at(35.9, colored_noise_td)\
+ 100.*line_at(36.7, colored_noise_td)
ff16, measured_asd_lines_bc = welch_asd(noise_with_lines, window='boxcar', fft_len_sec=16)
ff16, measured_asd_lines = welch_asd(noise_with_lines, window='hanning', fft_len_sec=16)
plt.loglog(ff16, measured_asd_lines_bc, label='no window')
plt.loglog(ff16, measured_asd_lines, label='hann window')
plt.loglog(sim_freqs, np.sqrt(psd), c='k', ls=':', label='true')
plt.xlim(30,40)
plt.ylim(0.5,2e3)
plt.legend(loc='upper right')
plt.loglog(ff16, measured_asd_lines_bc, label='no window')
plt.loglog(ff16, measured_asd_lines, label='hann window')
plt.loglog(sim_freqs, np.sqrt(psd), c='k', ls=':', label='true')
plt.xlim(10,2000)
plt.ylim(0.5,2e3)
plt.legend(loc='upper right')
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Generate fake Gaussian noise
Step4: The estimate using the Hann window recovers the true spectrum very well. However, forgetting to use a window causes something called 'spectral leakage'. The large power from low frequencies leaks into the higher frequencies and the spectral estimate is wrong. This has a characteristic smooth, slowly falling shape. You can see that in the code above we use a 'boxcar' window; this is a rectangular window tht goes instantly from one to zero, and is the same as not using a window.
|
14,399 | <ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.__version__
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("tmp/data/")
batch_size = 3
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch.shape
for image_data in X_batch:
plt.imshow(image_data.reshape([28, 28]), cmap="binary", interpolation="nearest")
plt.show()
y_batch
n_inputs = 28 * 28
n_hidden1 = 100
n_outputs = 10
graph = tf.Graph()
with graph.as_default():
with tf.name_scope("inputs"):
X = tf.placeholder(tf.float32, shape=[None, n_inputs], name="X")
y = tf.placeholder(tf.int32, shape=[None], name="y")
with tf.name_scope("hidden1"):
b1 = tf.Variable(tf.zeros([n_hidden1]), name="b1")
W1 = tf.Variable(tf.random_uniform([n_inputs, n_hidden1], -1.0, 1.0), name="W1")
hidden1 = tf.nn.relu(tf.matmul(X, W1) + b1)
with tf.name_scope("output"):
b2 = tf.Variable(tf.zeros([n_outputs]), name="b2")
W2 = tf.Variable(tf.random_uniform([n_hidden1, n_outputs], -1.0, 1.0), name="W2")
logits = tf.matmul(hidden1, W2) + b2
Y_proba = tf.nn.softmax(logits, name="Y_proba")
with tf.name_scope("train"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer()
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
with tf.name_scope("init_and_save"):
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_inputs = 28 * 28
n_hidden1 = 100
n_hidden2 = 100
n_outputs = 10
graph = tf.Graph()
with graph.as_default():
with tf.name_scope("inputs"):
X = tf.placeholder(tf.float32, shape=[None, n_inputs], name="X")
y = tf.placeholder(tf.int32, shape=[None], name="y")
with tf.name_scope("hidden1"):
b1 = tf.Variable(tf.zeros([n_hidden1]), name="b1")
W1 = tf.Variable(tf.random_uniform([n_inputs, n_hidden1], -1.0, 1.0), name="W1")
hidden1 = tf.nn.relu(tf.matmul(X, W1) + b1)
with tf.name_scope("hidden2"):
b2 = tf.Variable(tf.zeros([n_hidden2]), name="b2")
W2 = tf.Variable(tf.random_uniform([n_hidden1, n_hidden2], -1.0, 1.0), name="W2")
hidden2 = tf.nn.relu(tf.matmul(hidden1, W2) + b2)
with tf.name_scope("output"):
b3 = tf.Variable(tf.zeros([n_outputs]), name="b3")
W3 = tf.Variable(tf.random_uniform([n_hidden2, n_outputs], -1.0, 1.0), name="W3")
logits = tf.matmul(hidden2, W3) + b3
Y_proba = tf.nn.softmax(logits, name="Y_proba")
with tf.name_scope("train"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer()
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
with tf.name_scope("init_and_save"):
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 20
batch_size = 50
with tf.Session(graph=graph) as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_val = accuracy.eval(feed_dict={X: mnist.validation.images, y: mnist.validation.labels})
print(epoch, "Train accuracy:", acc_train, "Validation accuracy:", acc_val)
save_path = saver.save(sess, "./my_mnist_model")
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
saver = tf.train.import_meta_graph("./my_mnist_model.meta")
saver.restore(sess, "./my_mnist_model")
X = graph.get_tensor_by_name("inputs/X:0")
Y_proba = graph.get_tensor_by_name("output/Y_proba:0")
Y_proba_val = Y_proba.eval(feed_dict={X: mnist.test.images})
for example_index in range(200):
y_proba = Y_proba_val[example_index]
y_pred = np.argmax(y_proba)
y_label = mnist.test.labels[example_index]
if y_pred != y_label:
print("Actual class:{}, Predicted class: {}, Main probabilities: {}".format(
y_label,
y_pred,
", ".join(["{}:{:.1f}%".format(n, 100*p)
for n, p in enumerate(y_proba) if p > 0.01])))
plt.imshow(mnist.test.images[example_index].reshape([28, 28]), cmap="binary", interpolation="nearest")
plt.show()
<END_TASK> | <SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Artificial Neural Networks
Step2: Exercise 8
Step3: 8.2) Write the training code, and train the model for about 20 epochs (i.e. enough training iterations to go through the training set 20 times). Evaluate it on the test set
Step4: 8.2)
Step5: 8.3)
|