{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# DAEDRA: Determining Adverse Event Disposition for Regulatory Affairs\n", "\n", "DAEDRA is a language model intended to predict the disposition (outcome) of an adverse event based on the text of the event report. Intended to be used to classify reports in passive reporting systems, it is trained on the [VAERS](https://vaers.hhs.gov/) dataset, which contains reports of adverse events following vaccination in the United States." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "nteract": { "transient": { "deleting": false } }, "tags": [] }, "outputs": [], "source": [ "# %pip install accelerate -U" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "nteract": { "transient": { "deleting": false } } }, "outputs": [], "source": [ "# %pip install transformers datasets shap watermark wandb" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "datalore": { "hide_input_from_viewers": false, "hide_output_from_viewers": false, "node_id": "caZjjFP0OyQNMVgZDiwswE", "report_properties": { "rowId": "un8W7ez7ZwoGb5Co6nydEV" }, "type": "CODE" }, "gather": { "logged": 1706449625034 }, "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/anaconda/envs/azureml_py38_PT_TF/lib/python3.8/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n", "2024-01-28 14:18:31.729214: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n", "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", "2024-01-28 14:18:32.746966: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory\n", "2024-01-28 14:18:32.747096: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory\n", "2024-01-28 14:18:32.747111: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n" ] } ], "source": [ "import pandas as pd\n", "import numpy as np\n", "import torch\n", "import os\n", "from typing import List\n", "from sklearn.metrics import f1_score, accuracy_score, classification_report\n", "from transformers import AutoTokenizer, Trainer, AutoModelForSequenceClassification, TrainingArguments, pipeline\n", "from datasets import load_dataset, Dataset, DatasetDict\n", "from pyarrow import Table\n", "import shap\n", "import wandb\n", "\n", "os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n", "\n", "%load_ext watermark" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": false, "gather": { "logged": 1706449721319 }, "jupyter": { "outputs_hidden": false } }, "outputs": [], "source": [ "device: str = 'cuda' if torch.cuda.is_available() else 'cpu'\n", "\n", "SEED: int = 42\n", "\n", "BATCH_SIZE: int = 16\n", "EPOCHS: int = 3\n", "model_ckpt: str = \"distilbert-base-uncased\"\n", "\n", "CLASS_NAMES: List[str] = [\"DIED\",\n", " \"ER_VISIT\",\n", " \"HOSPITAL\",\n", " \"OFC_VISIT\",\n", " #\"X_STAY\", # pruned\n", " #\"DISABLE\", # pruned\n", " #\"D_PRESENTED\" # pruned\n", " ]\n", "\n", "\n", "\n", "\n", "# WandB configuration\n", "os.environ[\"WANDB_PROJECT\"] = \"DAEDRA model training\" # name your W&B project\n", "os.environ[\"WANDB_LOG_MODEL\"] = \"checkpoint\" # log all model checkpoints\n", "os.environ[\"WANDB_NOTEBOOK_NAME\"] = \"DAEDRA.ipynb\"" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false, "jupyter": { "outputs_hidden": false } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "torch : 1.12.0\n", "pandas : 2.0.2\n", "numpy : 1.23.5\n", "shap : 0.44.1\n", "re : 2.2.1\n", "wandb : 0.16.2\n", "logging: 0.5.1.2\n", "\n" ] } ], "source": [ "%watermark --iversion" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "UU2oOJhwbIualogG1YyCMd", "type": "CODE" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Sun Jan 28 14:18:35 2024 \n", "+---------------------------------------------------------------------------------------+\n", "| NVIDIA-SMI 535.129.03 Driver Version: 535.129.03 CUDA Version: 12.2 |\n", "|-----------------------------------------+----------------------+----------------------+\n", "| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |\n", "| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |\n", "| | | MIG M. |\n", "|=========================================+======================+======================|\n", "| 0 Tesla V100-PCIE-16GB Off | 00000001:00:00.0 Off | Off |\n", "| N/A 29C P0 27W / 250W | 4MiB / 16384MiB | 0% Default |\n", "| | | N/A |\n", "+-----------------------------------------+----------------------+----------------------+\n", "| 1 Tesla V100-PCIE-16GB Off | 00000002:00:00.0 Off | Off |\n", "| N/A 29C P0 24W / 250W | 4MiB / 16384MiB | 0% Default |\n", "| | | N/A |\n", "+-----------------------------------------+----------------------+----------------------+\n", " \n", "+---------------------------------------------------------------------------------------+\n", "| Processes: |\n", "| GPU GI CI PID Type Process name GPU Memory |\n", "| ID ID Usage |\n", "|=======================================================================================|\n", "| No running processes found |\n", "+---------------------------------------------------------------------------------------+\n" ] } ], "source": [ "!nvidia-smi" ] }, { "cell_type": "markdown", "metadata": { "datalore": { "hide_input_from_viewers": false, "hide_output_from_viewers": false, "node_id": "t45KHugmcPVaO0nuk8tGJ9", "report_properties": { "rowId": "40nN9Hvgi1clHNV5RAemI5" }, "type": "MD" } }, "source": [ "## Loading the data set" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "collapsed": false, "gather": { "logged": 1706449040507 }, "jupyter": { "outputs_hidden": false } }, "outputs": [], "source": [ "dataset = load_dataset(\"chrisvoncsefalvay/vaers-outcomes\")" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "collapsed": false, "gather": { "logged": 1706449044205 }, "jupyter": { "outputs_hidden": false, "source_hidden": false }, "nteract": { "transient": { "deleting": false } } }, "outputs": [ { "data": { "text/plain": [ "DatasetDict({\n", " train: Dataset({\n", " features: ['id', 'text', 'labels'],\n", " num_rows: 1270444\n", " })\n", " test: Dataset({\n", " features: ['id', 'text', 'labels'],\n", " num_rows: 272238\n", " })\n", " val: Dataset({\n", " features: ['id', 'text', 'labels'],\n", " num_rows: 272238\n", " })\n", "})" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "SUBSAMPLING: float = 0.1" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "collapsed": false, "gather": { "logged": 1706449378281 }, "jupyter": { "outputs_hidden": false, "source_hidden": false }, "nteract": { "transient": { "deleting": false } } }, "outputs": [], "source": [ "def minisample(ds: DatasetDict, fraction: float) -> DatasetDict:\n", " res = DatasetDict()\n", "\n", " res[\"train\"] = Dataset.from_dict(ds[\"train\"].shuffle()[:round(len(ds[\"train\"]) * fraction)])\n", " res[\"test\"] = Dataset.from_dict(ds[\"test\"].shuffle()[:round(len(ds[\"test\"]) * fraction)])\n", " res[\"val\"] = Dataset.from_dict(ds[\"val\"].shuffle()[:round(len(ds[\"val\"]) * fraction)])\n", " \n", " return res" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "collapsed": false, "gather": { "logged": 1706449384162 }, "jupyter": { "outputs_hidden": false, "source_hidden": false }, "nteract": { "transient": { "deleting": false } } }, "outputs": [], "source": [ "dataset = minisample(dataset, SUBSAMPLING)" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "collapsed": false, "gather": { "logged": 1706449387981 }, "jupyter": { "outputs_hidden": false, "source_hidden": false }, "nteract": { "transient": { "deleting": false } } }, "outputs": [ { "data": { "text/plain": [ "DatasetDict({\n", " train: Dataset({\n", " features: ['id', 'text', 'labels'],\n", " num_rows: 127044\n", " })\n", " test: Dataset({\n", " features: ['id', 'text', 'labels'],\n", " num_rows: 27224\n", " })\n", " val: Dataset({\n", " features: ['id', 'text', 'labels'],\n", " num_rows: 27224\n", " })\n", "})" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset" ] }, { "cell_type": "markdown", "metadata": { "nteract": { "transient": { "deleting": false } } }, "source": [ "We prune things down to the first four keys: `DIED`, `ER_VISIT`, `HOSPITAL`, `OFC_VISIT`." ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "collapsed": false, "gather": { "logged": 1706449443055 }, "jupyter": { "outputs_hidden": false, "source_hidden": false }, "nteract": { "transient": { "deleting": false } } }, "outputs": [], "source": [ "ds = DatasetDict()\n", "\n", "for i in [\"test\", \"train\", \"val\"]:\n", " tab = Table.from_arrays([dataset[i][\"id\"], dataset[i][\"text\"], [i[:4] for i in dataset[i][\"labels\"]]], names=[\"id\", \"text\", \"labels\"])\n", " ds[i] = Dataset(tab)\n", "\n", "dataset = ds" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Tokenisation and encoding" ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "I7n646PIscsUZRoHu6m7zm", "type": "CODE" }, "gather": { "logged": 1706449638377 } }, "outputs": [], "source": [ "tokenizer = AutoTokenizer.from_pretrained(model_ckpt)" ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "QBLOSI0yVIslV7v7qX9ZC3", "type": "CODE" }, "gather": { "logged": 1706449642580 } }, "outputs": [], "source": [ "def tokenize_and_encode(examples):\n", " return tokenizer(examples[\"text\"], truncation=True)" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "slHeNysZOX9uWS9PB7jFDb", "type": "CODE" }, "gather": { "logged": 1706449721161 } }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Map: 100%|██████████| 27224/27224 [00:10<00:00, 2638.52 examples/s]\n", "Map: 100%|██████████| 127044/127044 [00:48<00:00, 2633.40 examples/s]\n", "Map: 100%|██████████| 27224/27224 [00:10<00:00, 2613.19 examples/s]\n" ] } ], "source": [ "cols = dataset[\"train\"].column_names\n", "cols.remove(\"labels\")\n", "ds_enc = dataset.map(tokenize_and_encode, batched=True, remove_columns=cols)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Training" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "itXWkbDw9sqbkMuDP84QoT", "type": "CODE" }, "gather": { "logged": 1706449743072 } }, "outputs": [], "source": [ "class MultiLabelTrainer(Trainer):\n", " def compute_loss(self, model, inputs, return_outputs=False):\n", " labels = inputs.pop(\"labels\")\n", " outputs = model(**inputs)\n", " logits = outputs.logits\n", " loss_fct = torch.nn.BCEWithLogitsLoss()\n", " loss = loss_fct(logits.view(-1, self.model.config.num_labels),\n", " labels.float().view(-1, self.model.config.num_labels))\n", " return (loss, outputs) if return_outputs else loss" ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "ZQU7aW6TV45VmhHOQRzcnF", "type": "CODE" }, "gather": { "logged": 1706449761205 } }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight', 'pre_classifier.bias', 'pre_classifier.weight']\n", "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" ] } ], "source": [ "model = AutoModelForSequenceClassification.from_pretrained(model_ckpt, num_labels=len(CLASS_NAMES)).to(\"cuda\")" ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "swhgyyyxoGL8HjnXJtMuSW", "type": "CODE" }, "gather": { "logged": 1706449761541 } }, "outputs": [], "source": [ "def accuracy_threshold(y_pred, y_true, threshold=.5, sigmoid=True):\n", " y_pred = torch.from_numpy(y_pred)\n", " y_true = torch.from_numpy(y_true)\n", "\n", " if sigmoid:\n", " y_pred = y_pred.sigmoid()\n", "\n", " return ((y_pred > threshold) == y_true.bool()).float().mean().item()" ] }, { "cell_type": "code", "execution_count": 20, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "1Uq3HtkaBxtHNAnSwit5cI", "type": "CODE" }, "gather": { "logged": 1706449761720 } }, "outputs": [], "source": [ "def compute_metrics(eval_pred):\n", " predictions, labels = eval_pred\n", " return {'accuracy_thresh': accuracy_threshold(predictions, labels)}" ] }, { "cell_type": "code", "execution_count": 21, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "1iPZOTKPwSkTgX5dORqT89", "type": "CODE" }, "gather": { "logged": 1706449761893 } }, "outputs": [], "source": [ "args = TrainingArguments(\n", " output_dir=\"vaers\",\n", " evaluation_strategy=\"epoch\",\n", " learning_rate=2e-5,\n", " per_device_train_batch_size=BATCH_SIZE,\n", " per_device_eval_batch_size=BATCH_SIZE,\n", " num_train_epochs=EPOCHS,\n", " weight_decay=.01,\n", " logging_steps=1,\n", " run_name=f\"daedra-training\",\n", " report_to=[\"wandb\"]\n", ")" ] }, { "cell_type": "code", "execution_count": 22, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "bnRkNvRYltLun6gCEgL7v0", "type": "CODE" }, "gather": { "logged": 1706449769103 } }, "outputs": [], "source": [ "multi_label_trainer = MultiLabelTrainer(\n", " model, \n", " args, \n", " train_dataset=ds_enc[\"train\"], \n", " eval_dataset=ds_enc[\"test\"], \n", " compute_metrics=compute_metrics, \n", " tokenizer=tokenizer\n", ")" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "LO54PlDkWQdFrzV25FvduB", "type": "CODE" }, "gather": { "logged": 1706449880674 } }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mchrisvoncsefalvay\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n", "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[33mWARNING\u001b[0m wandb.init() arguments ignored because wandb magic has already been initialized\n" ] }, { "data": { "text/html": [ "Tracking run with wandb version 0.16.2" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Run data is saved locally in /mnt/batch/tasks/shared/LS_root/mounts/clusters/cvc-vaers-bert-dnsd/code/Users/kristof.csefalvay/daedra/notebooks/wandb/run-20240128_141956-9lniqjvz" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Syncing run init_evaluation_run to Weights & Biases (docs)
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View project at https://wandb.ai/chrisvoncsefalvay/DAEDRA%20model%20training" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View run at https://wandb.ai/chrisvoncsefalvay/DAEDRA%20model%20training/runs/9lniqjvz" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Finishing last run (ID:9lniqjvz) before initializing another..." ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View run init_evaluation_run at: https://wandb.ai/chrisvoncsefalvay/DAEDRA%20model%20training/runs/9lniqjvz
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Find logs at: ./wandb/run-20240128_141956-9lniqjvz/logs" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Successfully finished last run (ID:9lniqjvz). Initializing new run:
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Tracking run with wandb version 0.16.2" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Run data is saved locally in /mnt/batch/tasks/shared/LS_root/mounts/clusters/cvc-vaers-bert-dnsd/code/Users/kristof.csefalvay/daedra/notebooks/wandb/run-20240128_141958-5idmkcie" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Syncing run init_evaluation_run to Weights & Biases (docs)
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View project at https://wandb.ai/chrisvoncsefalvay/DAEDRA%20model%20training" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View run at https://wandb.ai/chrisvoncsefalvay/DAEDRA%20model%20training/runs/5idmkcie" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "\n", "
\n", " \n", " \n", " [851/851 26:26]\n", "
\n", " " ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "\n", "

Run history:


eval/accuracy_thresh
eval/loss
eval/runtime
eval/samples_per_second
eval/steps_per_second
train/global_step

Run summary:


eval/accuracy_thresh0.55198
eval/loss0.68442
eval/runtime105.0436
eval/samples_per_second259.168
eval/steps_per_second8.101
train/global_step0

" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View run init_evaluation_run at: https://wandb.ai/chrisvoncsefalvay/DAEDRA%20model%20training/runs/5idmkcie
Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Find logs at: ./wandb/run-20240128_141958-5idmkcie/logs" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "if SUBSAMPLING != 1.0:\n", " wandb_tag: List[str] = [f\"subsample-{SUBSAMPLING}\"]\n", "else:\n", " wandb_tag: List[str] = [f\"full_sample\"]\n", " \n", "wandb.init(name=\"init_evaluation_run\", tags=wandb_tag, magic=True)\n", "\n", "multi_label_trainer.evaluate()\n", "wandb.finish()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "hf0Ei1QXEYDmBv1VNLZ4Zw", "type": "CODE" }, "gather": { "logged": 1706449934637 } }, "outputs": [ { "data": { "text/html": [ "Tracking run with wandb version 0.16.2" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Run data is saved locally in /mnt/batch/tasks/shared/LS_root/mounts/clusters/cvc-vaers-bert-dnsd/code/Users/kristof.csefalvay/daedra/notebooks/wandb/run-20240128_142151-2mcc0ibc" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Syncing run daedra_training_run to Weights & Biases (docs)
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View project at https://wandb.ai/chrisvoncsefalvay/DAEDRA%20model%20training" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View run at https://wandb.ai/chrisvoncsefalvay/DAEDRA%20model%20training/runs/2mcc0ibc" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "\n", "
\n", " \n", " \n", " [ 3972/11913 24:20 < 48:40, 2.72 it/s, Epoch 1/3]\n", "
\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
EpochTraining LossValidation Loss

" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./vaers/checkpoint-500)... Done. 15.6s\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./vaers/checkpoint-1000)... Done. 22.7s\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./vaers/checkpoint-1500)... Done. 14.0s\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./vaers/checkpoint-2000)... Done. 15.2s\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./vaers/checkpoint-2500)... Done. 14.0s\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./vaers/checkpoint-3000)... Done. 12.4s\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./vaers/checkpoint-3500)... Done. 13.4s\n" ] } ], "source": [ "if SUBSAMPLING != 1.0:\n", " wandb_tag: List[str] = [f\"subsample-{SUBSAMPLING}\"]\n", "else:\n", " wandb_tag: List[str] = [f\"full_sample\"]\n", " \n", "wandb.init(name=\"daedra_training_run\", tags=wandb_tag, magic=True)\n", "\n", "multi_label_trainer.train()\n", "wandb.finish()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Evaluation" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We instantiate a classifier `pipeline` and push it to CUDA." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "kHoUdBeqcyVXDSGv54C4aE", "type": "CODE" }, "gather": { "logged": 1706411459928 } }, "outputs": [], "source": [ "classifier = pipeline(\"text-classification\", \n", " model, \n", " tokenizer=tokenizer, \n", " device=\"cuda:0\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We use the same tokenizer used for training to tokenize/encode the validation set." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "Dr5WCWA6jL51NR1fSrQu6Z", "type": "CODE" }, "gather": { "logged": 1706411523285 } }, "outputs": [], "source": [ "test_encodings = tokenizer.batch_encode_plus(dataset[\"val\"][\"text\"], \n", " max_length=None, \n", " padding='max_length', \n", " return_token_type_ids=True, \n", " truncation=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Once we've made the data loadable by putting it into a `DataLoader`, we " ] }, { "cell_type": "code", "execution_count": null, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "MWfGq2tTkJNzFiDoUPq2X7", "type": "CODE" }, "gather": { "logged": 1706411543379 } }, "outputs": [], "source": [ "test_data = torch.utils.data.TensorDataset(torch.tensor(test_encodings['input_ids']), \n", " torch.tensor(test_encodings['attention_mask']), \n", " torch.tensor(ds_enc[\"val\"][\"labels\"]), \n", " torch.tensor(test_encodings['token_type_ids']))\n", "test_dataloader = torch.utils.data.DataLoader(test_data, \n", " sampler=torch.utils.data.SequentialSampler(test_data), \n", " batch_size=BATCH_SIZE)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "1SJCSrQTRCexFCNCIyRrzL", "type": "CODE" }, "gather": { "logged": 1706411587843 } }, "outputs": [], "source": [ "model.eval()\n", "\n", "logit_preds, true_labels, pred_labels, tokenized_texts = [], [], [], []\n", "\n", "for i, batch in enumerate(test_dataloader):\n", " batch = tuple(t.to(device) for t in batch)\n", " \n", " # Unpack the inputs from our dataloader\n", " b_input_ids, b_input_mask, b_labels, b_token_types = batch\n", " \n", " with torch.no_grad():\n", " outs = model(b_input_ids, attention_mask=b_input_mask)\n", " b_logit_pred = outs[0]\n", " pred_label = torch.sigmoid(b_logit_pred)\n", "\n", " b_logit_pred = b_logit_pred.detach().cpu().numpy()\n", " pred_label = pred_label.to('cpu').numpy()\n", " b_labels = b_labels.to('cpu').numpy()\n", "\n", " tokenized_texts.append(b_input_ids)\n", " logit_preds.append(b_logit_pred)\n", " true_labels.append(b_labels)\n", " pred_labels.append(pred_label)\n", "\n", "# Flatten outputs\n", "tokenized_texts = [item for sublist in tokenized_texts for item in sublist]\n", "pred_labels = [item for sublist in pred_labels for item in sublist]\n", "true_labels = [item for sublist in true_labels for item in sublist]\n", "\n", "# Converting flattened binary values to boolean values\n", "true_bools = [tl == 1 for tl in true_labels]\n", "pred_bools = [pl > 0.50 for pl in pred_labels] " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We create a classification report:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "eBprrgF086mznPbPVBpOLS", "type": "CODE" }, "gather": { "logged": 1706411588249 } }, "outputs": [], "source": [ "print('Test F1 Accuracy: ', f1_score(true_bools, pred_bools, average='micro'))\n", "print('Test Flat Accuracy: ', accuracy_score(true_bools, pred_bools), '\\n')\n", "clf_report = classification_report(true_bools, pred_bools, target_names=CLASS_NAMES)\n", "print(clf_report)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Finally, we render a 'head to head' comparison table that maps each text prediction to actual and predicted labels." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "yELHY0IEwMlMw3x6e7hoD1", "type": "CODE" }, "gather": { "logged": 1706411588638 } }, "outputs": [], "source": [ "# Creating a map of class names from class numbers\n", "idx2label = dict(zip(range(len(CLASS_NAMES)), CLASS_NAMES))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "jH0S35dDteUch01sa6me6e", "type": "CODE" }, "gather": { "logged": 1706411589004 } }, "outputs": [], "source": [ "true_label_idxs, pred_label_idxs = [], []\n", "\n", "for vals in true_bools:\n", " true_label_idxs.append(np.where(vals)[0].flatten().tolist())\n", "for vals in pred_bools:\n", " pred_label_idxs.append(np.where(vals)[0].flatten().tolist())" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "h4vHL8XdGpayZ6xLGJUF6F", "type": "CODE" }, "gather": { "logged": 1706411589301 } }, "outputs": [], "source": [ "true_label_texts, pred_label_texts = [], []\n", "\n", "for vals in true_label_idxs:\n", " if vals:\n", " true_label_texts.append([idx2label[val] for val in vals])\n", " else:\n", " true_label_texts.append(vals)\n", "\n", "for vals in pred_label_idxs:\n", " if vals:\n", " pred_label_texts.append([idx2label[val] for val in vals])\n", " else:\n", " pred_label_texts.append(vals)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "SxUmVHfQISEeptg1SawOmB", "type": "CODE" }, "gather": { "logged": 1706411591952 } }, "outputs": [], "source": [ "symptom_texts = [tokenizer.decode(text,\n", " skip_special_tokens=True,\n", " clean_up_tokenization_spaces=False) for text in tokenized_texts]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "datalore": { "hide_input_from_viewers": true, "hide_output_from_viewers": true, "node_id": "BxFNigNGRLTOqraI55BPSH", "type": "CODE" }, "gather": { "logged": 1706411592512 } }, "outputs": [], "source": [ "comparisons_df = pd.DataFrame({'symptom_text': symptom_texts, \n", " 'true_labels': true_label_texts, \n", " 'pred_labels':pred_label_texts})\n", "comparisons_df.to_csv('comparisons.csv')\n", "comparisons_df" ] } ], "metadata": { "datalore": { "base_environment": "default", "computation_mode": "JUPYTER", "package_manager": "pip", "packages": [ { "name": "datasets", "source": "PIP", "version": "2.16.1" }, { "name": "torch", "source": "PIP", "version": "2.1.2" }, { "name": "accelerate", "source": "PIP", "version": "0.26.1" } ], "report_row_ids": [ "un8W7ez7ZwoGb5Co6nydEV", "40nN9Hvgi1clHNV5RAemI5", "TgRD90H5NSPpKS41OeXI1w", "ZOm5BfUs3h1EGLaUkBGeEB", "kOP0CZWNSk6vqE3wkPp7Vc", "W4PWcOu2O2pRaZyoE2W80h", "RolbOnQLIftk0vy9mIcz5M", "8OPhUgbaNJmOdiq5D3a6vK", "5Qrt3jSvSrpK6Ne1hS6shL", "hTq7nFUrovN5Ao4u6dIYWZ", "I8WNZLpJ1DVP2wiCW7YBIB", "SawhU3I9BewSE1XBPstpNJ", "80EtLEl2FIE4FqbWnUD3nT" ], "version": 3 }, "kernelspec": { "display_name": "Python 3.8 - Pytorch and Tensorflow", "language": "python", "name": "python38-azureml-pt-tf" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.5" }, "microsoft": { "host": { "AzureML": { "notebookHasBeenCompleted": true } }, "ms_spell_check": { "ms_spell_check_language": "en" } }, "nteract": { "version": "nteract-front-end@1.0.0" } }, "nbformat": 4, "nbformat_minor": 4 }