File size: 25,229 Bytes
1ba06ec
1
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"Jupyternote Cheatsheet.ipynb","provenance":[],"mount_file_id":"1rMSETYdooFC6fVgT0PaOovnBrB4ZWoys","authorship_tag":"ABX9TyN4O59ZYPVT0rGiUB3bfznT"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"markdown","source":["# Models"],"metadata":{"id":"ODx9TIOB4tCe"}},{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"BelRHeLw4qyQ","executionInfo":{"status":"ok","timestamp":1654537166220,"user_tz":-60,"elapsed":22,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"60695f20-3957-4958-aabd-c2ecff870977"},"outputs":[{"output_type":"stream","name":"stdout","text":["Writing models.py\n"]}],"source":["%%writefile models.py\n","from __future__ import division\n","\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from torch.autograd import Variable\n","import numpy as np\n","\n","from PIL import Image\n","\n","from utils.parse_config import *\n","from utils.utils import build_targets\n","from collections import defaultdict\n","\n","##import matplotlib.pyplot as plt\n","##import matplotlib.patches as patches\n","\n","\n","def create_modules(module_defs):\n","    \"\"\"\n","    Constructs module list of layer blocks from module configuration in module_defs\n","    \"\"\"\n","    hyperparams = module_defs.pop(0)\n","    output_filters = [int(hyperparams[\"channels\"])]\n","    module_list = nn.ModuleList()\n","    for i, module_def in enumerate(module_defs):\n","        modules = nn.Sequential()\n","\n","        if module_def[\"type\"] == \"convolutional\":\n","            bn = int(module_def[\"batch_normalize\"])\n","            filters = int(module_def[\"filters\"])\n","            kernel_size = int(module_def[\"size\"])\n","            pad = (kernel_size - 1) // 2 if int(module_def[\"pad\"]) else 0\n","            modules.add_module(\n","                \"conv_%d\" % i,\n","                nn.Conv2d(\n","                    in_channels=output_filters[-1],\n","                    out_channels=filters,\n","                    kernel_size=kernel_size,\n","                    stride=int(module_def[\"stride\"]),\n","                    padding=pad,\n","                    bias=not bn,\n","                ),\n","            )\n","            if bn:\n","                modules.add_module(\"batch_norm_%d\" % i, nn.BatchNorm2d(filters))\n","            if module_def[\"activation\"] == \"leaky\":\n","                modules.add_module(\"leaky_%d\" % i, nn.LeakyReLU(0.1))\n","\n","        elif module_def[\"type\"] == \"maxpool\":\n","            kernel_size = int(module_def[\"size\"])\n","            stride = int(module_def[\"stride\"])\n","            if kernel_size == 2 and stride == 1:\n","                padding = nn.ZeroPad2d((0, 1, 0, 1))\n","                modules.add_module(\"_debug_padding_%d\" % i, padding)\n","            maxpool = nn.MaxPool2d(\n","                kernel_size=int(module_def[\"size\"]),\n","                stride=int(module_def[\"stride\"]),\n","                padding=int((kernel_size - 1) // 2),\n","            )\n","            modules.add_module(\"maxpool_%d\" % i, maxpool)\n","\n","        elif module_def[\"type\"] == \"upsample\":\n","            upsample = nn.Upsample(scale_factor=int(module_def[\"stride\"]), mode=\"nearest\")\n","            modules.add_module(\"upsample_%d\" % i, upsample)\n","\n","        elif module_def[\"type\"] == \"route\":\n","            layers = [int(x) for x in module_def[\"layers\"].split(\",\")]\n","            filters = sum([output_filters[layer_i] for layer_i in layers])\n","            modules.add_module(\"route_%d\" % i, EmptyLayer())\n","\n","        elif module_def[\"type\"] == \"shortcut\":\n","            filters = output_filters[int(module_def[\"from\"])]\n","            modules.add_module(\"shortcut_%d\" % i, EmptyLayer())\n","\n","        elif module_def[\"type\"] == \"yolo\":\n","            anchor_idxs = [int(x) for x in module_def[\"mask\"].split(\",\")]\n","            # Extract anchors\n","            anchors = [int(x) for x in module_def[\"anchors\"].split(\",\")]\n","            anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]\n","            anchors = [anchors[i] for i in anchor_idxs]\n","            num_classes = int(module_def[\"classes\"])\n","            img_height = int(hyperparams[\"height\"])\n","            # Define detection layer\n","            yolo_layer = YOLOLayer(anchors, num_classes, img_height)\n","            modules.add_module(\"yolo_%d\" % i, yolo_layer)\n","        # Register module list and number of output filters\n","        module_list.append(modules)\n","        output_filters.append(filters)\n","\n","    return hyperparams, module_list\n","\n","\n","class EmptyLayer(nn.Module):\n","    \"\"\"Placeholder for 'route' and 'shortcut' layers\"\"\"\n","\n","    def __init__(self):\n","        super(EmptyLayer, self).__init__()\n","\n","\n","class YOLOLayer(nn.Module):\n","    \"\"\"Detection layer\"\"\"\n","\n","    def __init__(self, anchors, num_classes, img_dim):\n","        super(YOLOLayer, self).__init__()\n","        self.anchors = anchors\n","        self.num_anchors = len(anchors)\n","        self.num_classes = num_classes\n","        self.bbox_attrs = 5 + num_classes\n","        self.image_dim = img_dim\n","        self.ignore_thres = 0.5\n","        self.lambda_coord = 1\n","\n","        self.mse_loss = nn.MSELoss(size_average=True)  # Coordinate loss\n","        self.bce_loss = nn.BCELoss(size_average=True)  # Confidence loss\n","        self.ce_loss = nn.CrossEntropyLoss()  # Class loss\n","\n","    def forward(self, x, targets=None):\n","        nA = self.num_anchors\n","        nB = x.size(0)\n","        nG = x.size(2)\n","        stride = self.image_dim / nG\n","\n","        # Tensors for cuda support\n","        FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor\n","        LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor\n","        ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor\n","\n","        prediction = x.view(nB, nA, self.bbox_attrs, nG, nG).permute(0, 1, 3, 4, 2).contiguous()\n","\n","        # Get outputs\n","        x = torch.sigmoid(prediction[..., 0])  # Center x\n","        y = torch.sigmoid(prediction[..., 1])  # Center y\n","        w = prediction[..., 2]  # Width\n","        h = prediction[..., 3]  # Height\n","        pred_conf = torch.sigmoid(prediction[..., 4])  # Conf\n","        pred_cls = torch.sigmoid(prediction[..., 5:])  # Cls pred.\n","\n","        # Calculate offsets for each grid\n","        grid_x = torch.arange(nG).repeat(nG, 1).view([1, 1, nG, nG]).type(FloatTensor)\n","        grid_y = torch.arange(nG).repeat(nG, 1).t().view([1, 1, nG, nG]).type(FloatTensor)\n","        scaled_anchors = FloatTensor([(a_w / stride, a_h / stride) for a_w, a_h in self.anchors])\n","        anchor_w = scaled_anchors[:, 0:1].view((1, nA, 1, 1))\n","        anchor_h = scaled_anchors[:, 1:2].view((1, nA, 1, 1))\n","\n","        # Add offset and scale with anchors\n","        pred_boxes = FloatTensor(prediction[..., :4].shape)\n","        pred_boxes[..., 0] = x.data + grid_x\n","        pred_boxes[..., 1] = y.data + grid_y\n","        pred_boxes[..., 2] = torch.exp(w.data) * anchor_w\n","        pred_boxes[..., 3] = torch.exp(h.data) * anchor_h\n","\n","        # Training\n","        if targets is not None:\n","\n","            if x.is_cuda:\n","                self.mse_loss = self.mse_loss.cuda()\n","                self.bce_loss = self.bce_loss.cuda()\n","                self.ce_loss = self.ce_loss.cuda()\n","\n","            nGT, nCorrect, mask, conf_mask, tx, ty, tw, th, tconf, tcls = build_targets(\n","                pred_boxes=pred_boxes.cpu().data,\n","                pred_conf=pred_conf.cpu().data,\n","                pred_cls=pred_cls.cpu().data,\n","                target=targets.cpu().data,\n","                anchors=scaled_anchors.cpu().data,\n","                num_anchors=nA,\n","                num_classes=self.num_classes,\n","                grid_size=nG,\n","                ignore_thres=self.ignore_thres,\n","                img_dim=self.image_dim,\n","            )\n","\n","            nProposals = int((pred_conf > 0.5).sum().item())\n","            recall = float(nCorrect / nGT) if nGT else 1\n","            precision = float(nCorrect / nProposals)\n","\n","            # Handle masks\n","            mask = Variable(mask.type(ByteTensor))\n","            conf_mask = Variable(conf_mask.type(ByteTensor))\n","\n","            # Handle target variables\n","            tx = Variable(tx.type(FloatTensor), requires_grad=False)\n","            ty = Variable(ty.type(FloatTensor), requires_grad=False)\n","            tw = Variable(tw.type(FloatTensor), requires_grad=False)\n","            th = Variable(th.type(FloatTensor), requires_grad=False)\n","            tconf = Variable(tconf.type(FloatTensor), requires_grad=False)\n","            tcls = Variable(tcls.type(LongTensor), requires_grad=False)\n","\n","            # Get conf mask where gt and where there is no gt\n","            conf_mask_true = mask\n","            conf_mask_false = conf_mask - mask\n","\n","            # Mask outputs to ignore non-existing objects\n","            loss_x = self.mse_loss(x[mask], tx[mask])\n","            loss_y = self.mse_loss(y[mask], ty[mask])\n","            loss_w = self.mse_loss(w[mask], tw[mask])\n","            loss_h = self.mse_loss(h[mask], th[mask])\n","            loss_conf = self.bce_loss(pred_conf[conf_mask_false], tconf[conf_mask_false]) + self.bce_loss(\n","                pred_conf[conf_mask_true], tconf[conf_mask_true]\n","            )\n","            loss_cls = (1 / nB) * self.ce_loss(pred_cls[mask], torch.argmax(tcls[mask], 1))\n","            loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls\n","\n","            return (\n","                loss,\n","                loss_x.item(),\n","                loss_y.item(),\n","                loss_w.item(),\n","                loss_h.item(),\n","                loss_conf.item(),\n","                loss_cls.item(),\n","                recall,\n","                precision,\n","            )\n","\n","        else:\n","            # If not in training phase return predictions\n","            output = torch.cat(\n","                (\n","                    pred_boxes.view(nB, -1, 4) * stride,\n","                    pred_conf.view(nB, -1, 1),\n","                    pred_cls.view(nB, -1, self.num_classes),\n","                ),\n","                -1,\n","            )\n","            return output\n","\n","\n","class Darknet(nn.Module):\n","    \"\"\"YOLOv3 object detection model\"\"\"\n","\n","    def __init__(self, config_path, img_size=416):\n","        super(Darknet, self).__init__()\n","        self.module_defs = parse_model_config(config_path)\n","        self.hyperparams, self.module_list = create_modules(self.module_defs)\n","        self.img_size = img_size\n","        self.seen = 0\n","        self.header_info = np.array([0, 0, 0, self.seen, 0])\n","        self.loss_names = [\"x\", \"y\", \"w\", \"h\", \"conf\", \"cls\", \"recall\", \"precision\"]\n","\n","    def forward(self, x, targets=None):\n","        is_training = targets is not None\n","        output = []\n","        self.losses = defaultdict(float)\n","        layer_outputs = []\n","        for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):\n","            if module_def[\"type\"] in [\"convolutional\", \"upsample\", \"maxpool\"]:\n","                x = module(x)\n","            elif module_def[\"type\"] == \"route\":\n","                layer_i = [int(x) for x in module_def[\"layers\"].split(\",\")]\n","                x = torch.cat([layer_outputs[i] for i in layer_i], 1)\n","            elif module_def[\"type\"] == \"shortcut\":\n","                layer_i = int(module_def[\"from\"])\n","                x = layer_outputs[-1] + layer_outputs[layer_i]\n","            elif module_def[\"type\"] == \"yolo\":\n","                # Train phase: get loss\n","                if is_training:\n","                    x, *losses = module[0](x, targets)\n","                    for name, loss in zip(self.loss_names, losses):\n","                        self.losses[name] += loss\n","                # Test phase: Get detections\n","                else:\n","                    x = module(x)\n","                output.append(x)\n","            layer_outputs.append(x)\n","\n","        self.losses[\"recall\"] /= 3\n","        self.losses[\"precision\"] /= 3\n","        return sum(output) if is_training else torch.cat(output, 1)\n","\n","    def load_weights(self, weights_path):\n","        \"\"\"Parses and loads the weights stored in 'weights_path'\"\"\"\n","\n","        # Open the weights file\n","        fp = open(weights_path, \"rb\")\n","        header = np.fromfile(fp, dtype=np.int32, count=5)  # First five are header values\n","\n","        # Needed to write header when saving weights\n","        self.header_info = header\n","\n","        self.seen = header[3]\n","        weights = np.fromfile(fp, dtype=np.float32)  # The rest are weights\n","        fp.close()\n","\n","        ptr = 0\n","        for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):\n","            if module_def[\"type\"] == \"convolutional\":\n","                conv_layer = module[0]\n","                if module_def[\"batch_normalize\"]:\n","                    # Load BN bias, weights, running mean and running variance\n","                    bn_layer = module[1]\n","                    num_b = bn_layer.bias.numel()  # Number of biases\n","                    # Bias\n","                    bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias)\n","                    bn_layer.bias.data.copy_(bn_b)\n","                    ptr += num_b\n","                    # Weight\n","                    bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight)\n","                    bn_layer.weight.data.copy_(bn_w)\n","                    ptr += num_b\n","                    # Running Mean\n","                    bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean)\n","                    bn_layer.running_mean.data.copy_(bn_rm)\n","                    ptr += num_b\n","                    # Running Var\n","                    bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var)\n","                    bn_layer.running_var.data.copy_(bn_rv)\n","                    ptr += num_b\n","                else:\n","                    # Load conv. bias\n","                    num_b = conv_layer.bias.numel()\n","                    conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias)\n","                    conv_layer.bias.data.copy_(conv_b)\n","                    ptr += num_b\n","                # Load conv. weights\n","                num_w = conv_layer.weight.numel()\n","                conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight)\n","                conv_layer.weight.data.copy_(conv_w)\n","                ptr += num_w\n","\n","    \"\"\"\n","        @:param path    - path of the new weights file\n","        @:param cutoff  - save layers between 0 and cutoff (cutoff = -1 -> all are saved)\n","    \"\"\"\n","\n","    def save_weights(self, path, cutoff=-1):\n","\n","        fp = open(path, \"wb\")\n","        self.header_info[3] = self.seen\n","        self.header_info.tofile(fp)\n","\n","        # Iterate through layers\n","        for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):\n","            if module_def[\"type\"] == \"convolutional\":\n","                conv_layer = module[0]\n","                # If batch norm, load bn first\n","                if module_def[\"batch_normalize\"]:\n","                    bn_layer = module[1]\n","                    bn_layer.bias.data.cpu().numpy().tofile(fp)\n","                    bn_layer.weight.data.cpu().numpy().tofile(fp)\n","                    bn_layer.running_mean.data.cpu().numpy().tofile(fp)\n","                    bn_layer.running_var.data.cpu().numpy().tofile(fp)\n","                # Load conv bias\n","                else:\n","                    conv_layer.bias.data.cpu().numpy().tofile(fp)\n","                # Load conv weights\n","                conv_layer.weight.data.cpu().numpy().tofile(fp)\n","\n","        fp.close()"]},{"cell_type":"code","source":["!ls"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"ar8FuY3z43Fk","executionInfo":{"status":"ok","timestamp":1654537174809,"user_tz":-60,"elapsed":16,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"ce227d02-75a3-477d-becf-e1c2702c7001"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["models.py  sample_data\n"]}]},{"cell_type":"code","source":["!pwd"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"hRxa6vyoGbla","executionInfo":{"status":"ok","timestamp":1654537258168,"user_tz":-60,"elapsed":26,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"ccaaf1dc-6769-4093-8769-c8aa3b809bdf"},"execution_count":3,"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n"]}]},{"cell_type":"code","source":["%%writefile Readme.md\n","Are you for real!!"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"cVKDwgGtGv7g","executionInfo":{"status":"ok","timestamp":1654537404197,"user_tz":-60,"elapsed":21,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"41cdc392-059d-42be-b267-2a7f66d0a1f6"},"execution_count":4,"outputs":[{"output_type":"stream","name":"stdout","text":["Overwriting Readme.md\n"]}]},{"cell_type":"code","source":["%cd Computer Vision"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"780vJiykHTmT","executionInfo":{"status":"ok","timestamp":1654537643123,"user_tz":-60,"elapsed":16,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"159eb128-2a7a-41b3-b84c-7d517ff92454"},"execution_count":14,"outputs":[{"output_type":"stream","name":"stdout","text":["/content/drive/MyDrive/Python/Machine Learning/Computer Vision\n"]}]},{"cell_type":"code","source":["!pwd"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"WeA417NzHe0W","executionInfo":{"status":"ok","timestamp":1654537646111,"user_tz":-60,"elapsed":408,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"036a3c8e-b106-46a8-b5de-b7adf66938ab"},"execution_count":15,"outputs":[{"output_type":"stream","name":"stdout","text":["/content/drive/MyDrive/Python/Machine Learning/Computer Vision\n"]}]},{"cell_type":"code","source":["%%writefile test.and\n","\n","Really I can now write to my drive!"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"hrSVQd-fHzai","executionInfo":{"status":"ok","timestamp":1654537570112,"user_tz":-60,"elapsed":24,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"c58a5849-aaba-4fe3-c596-681a5e7df731"},"execution_count":10,"outputs":[{"output_type":"stream","name":"stdout","text":["Writing test.and\n"]}]},{"cell_type":"code","source":["!ls"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"jRtg6b1IH8KV","executionInfo":{"status":"ok","timestamp":1654537654214,"user_tz":-60,"elapsed":24,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"dd49447d-6924-4176-f5a9-ca184b671be8"},"execution_count":16,"outputs":[{"output_type":"stream","name":"stdout","text":["cnn-resnet-CIFAR10  darknet-COCO-object_detection  feedforward-cnn-MNIST\n"]}]},{"cell_type":"code","source":["%%bash\n","\n","ls -la\n","python --version"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"iUpVW1oZIQnl","executionInfo":{"status":"ok","timestamp":1654537857269,"user_tz":-60,"elapsed":14,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"ff54c93a-9f2c-4453-d82f-c6c1683f61b8"},"execution_count":19,"outputs":[{"output_type":"stream","name":"stdout","text":["total 12\n","drwx------ 2 root root 4096 May 17 21:02 cnn-resnet-CIFAR10\n","drwx------ 2 root root 4096 Jun  6 16:38 darknet-COCO-object_detection\n","drwx------ 2 root root 4096 May 17 21:01 feedforward-cnn-MNIST\n","Python 3.7.13\n"]}]},{"cell_type":"code","source":["%cd ../"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"NJ7riTtCI2-V","executionInfo":{"status":"ok","timestamp":1654537984381,"user_tz":-60,"elapsed":14,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"713f2de8-ae10-46b9-d5e9-bbfa779de2c8"},"execution_count":21,"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n"]}]},{"cell_type":"code","source":["%%bash\n","\n","cd \"drive/MyDrive/Python/Machine Learning\"\n","ls"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"ZAOqxQzPJc1k","executionInfo":{"status":"ok","timestamp":1654538084191,"user_tz":-60,"elapsed":14,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"7b82c13f-3e14-47b5-bc12-25bdf0dee540"},"execution_count":25,"outputs":[{"output_type":"stream","name":"stdout","text":["Articles\n","Computer Vision\n","Datasets\n","Deep-Learning-with-PyTorch-Jovian\n","Deep RL\n","FastAI Course\n","Generative Models\n","HuggingFace-Deep-RL\n","PyTorch\n","ZeroToGANS_Revision\n"]}]},{"cell_type":"code","source":["%run models.py"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":235},"id":"HvI6SRX8JsS7","executionInfo":{"status":"ok","timestamp":1654538109961,"user_tz":-60,"elapsed":2355,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"08a28f6a-76c2-4eaa-fa36-36d5a8e145ea"},"execution_count":27,"outputs":[{"output_type":"error","ename":"ModuleNotFoundError","evalue":"ignored","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)","\u001b[0;32m/content/models.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     10\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mPIL\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mImage\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     11\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 12\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparse_config\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     13\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mbuild_targets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     14\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mcollections\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdefaultdict\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'utils'"]}]},{"cell_type":"code","source":["%edit"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"JLbktoGWJvft","executionInfo":{"status":"ok","timestamp":1654538391516,"user_tz":-60,"elapsed":21,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}},"outputId":"cce69d99-b879-4600-a9ba-9afb5a58b76a"},"execution_count":29,"outputs":[{"output_type":"stream","name":"stdout","text":["IPython will make a temporary file named: /tmp/ipython_edit_nffqr1eo/ipython_edit_msvbxat4.py\n"]}]},{"cell_type":"code","source":["%load models.py"],"metadata":{"id":"PI_bYsujKQfx","executionInfo":{"status":"ok","timestamp":1654538646656,"user_tz":-60,"elapsed":443,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}}},"execution_count":31,"outputs":[]},{"cell_type":"code","source":["%%writefile\n","%run\n","%cd\n","%cat\n","%load [-r, -s]\n","%edit\n","%time, %%time\n","%timeit, %%timeit\n","%%html\n","%env, ...\n","%%file, alias for writefile\n","%%bash\n","%matplotlib [inline, ...]\n","and more\n","%paste, %cpaste\n","%pinfo\n","%who\n","%lsmagic\n","%pwd"],"metadata":{"id":"GdCgR_KCL7MK"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["%quickref\n","%%js\n","%%python[2, 3]\n","%%latex\n","%%shell\n","%%svg"],"metadata":{"id":"B4QAAv64NHRW","executionInfo":{"status":"ok","timestamp":1654539235689,"user_tz":-60,"elapsed":445,"user":{"displayName":"Adejumo Daniel","userId":"02925977078148845759"}}},"execution_count":38,"outputs":[]},{"cell_type":"code","source":["%system, %%system\n","%sx, %%sx"],"metadata":{"id":"psD0AZ7YNJBZ"},"execution_count":null,"outputs":[]}]}