diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..3185e0ad7ba1842ecd18d0570f1451c373a246c6 --- /dev/null +++ b/.gitignore @@ -0,0 +1,136 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +.idea/ +wandb/* +save/* +!save/.gitkeep +logs/* +!logs/.gitkeep +test +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ +.vscode/ diff --git a/README.md b/README.md index 4dcfc176fe99fe8b7bbb3d16b8e9e2c2a5797247..f7ef5f085b84b18183de0d945121eb74a815c95e 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,262 @@ + + +--- + +

+ + version + + + Status-building + + + + PRs-Welcome + + + stars + + + FORK + + + Issues + +
+

+ +## Motivation + +Spoken Language Understanding (SLU) is one of the core components of a task-oriented dialogue system, which aims to extract the semantic meaning of user queries (e.g., intents and slots). + +In this work, we introduce __OpenSLU__, an open-source toolkit to provide a unified, modularized, and extensible toolkit for spoken language understanding. Specifically, OpenSLU unifies 10 SLU baselines for both single-intent and multi-intent scenarios, which support both non-pretrained and pretrained models simultaneously. Additionally, OpenSLU is highly modularized and extensible by decomposing the model architecture, inference, and learning process into reusable modules, which allows researchers to quickly set up SLU experiments with highly flexible configurations. We hope OpenSLU can help researcher to quickly initiate experiments and spur more breakthroughs in SLU. + +## Changelog +- 2023-02-09 + - We build the first version and release it. + +## Installation +### System requirements +OpenSLU requires `Python>=3.8`, and `torch>=1.12.0`. +### Install from git +```bash +git clone https://github.com/LightChen2333/OpenSLU.git && cd OpenSLU/ +pip install -r requirements.txt +``` + + +## File Structure + +```yaml +root +├── common +│ ├── config.py # load configuration and auto preprocess ignored config +│ ├── loader.py # load data from hugging face +│ ├── logger.py # log predict result, support [fitlog], [wandb], [local logging] +│ ├── metric.py # evalutation metric, support [intent acc], [slot F1], [EMA] +│ ├── model_manager.py # help to prepare data, prebuild training progress. +│ ├── tokenizer.py # tokenizer also support no-pretrained model for word tokenizer. +│ └── utils.py # canonical model communication data structure and other common tool function +├── config +│ ├── reproduction # configurations for reproducted SLU model. +│ └── **.yaml # configuration for SLU model. +├── logs # local log storage dir path. +├── model +│ ├── encoder +│ │ ├── base_encoder.py # base encoder model. All implemented encoder models need to inherit the BaseEncoder class +│ │ ├── auto_encoder.py # auto-encoder to autoload provided encoder model +│ │ ├── non_pretrained_encoder.py # all common-used no pretrained encoder like lstm, lstm+self-attention +│ │ └── pretrained_encoder.py # all common-used pretrained encoder, implemented by hugging-face [AutoModel]. +│ ├── decoder +│ │ ├── interaction +│ │ │ ├── base_interaction.py # base interaction model. All implemented encoder models need to inherit the BaseInteraction class +│ │ │ └── *_interaction.py # some SOTA SLU interaction module. You can easily reuse or rewrite to implement your own idea. +│ │ ├── base_decoder.py # decoder class, [BaseDecoder] support classification after interaction, also you can rewrite for your own interaction order +│ │ └── classifier.py # classifier class, support linear and LSTM classification. Also support token-level intent. +│ └── open_slu_model.py # the general model class, can automatically build the model through configuration. +├── save # model checkpoint storage dir path and dir to automatically save glove embedding. +└── run.py # run script for all function. +``` + +## Quick Start + +### 1. Reproducing Existing Models +Example for reproduction of `slot-gated` model: + +```bash +python run.py --dataset atis --model slot-gated +``` + +### 2. Customizable Combination Existing Components +1. First, you can freely combine and build your own model through config files. For details, see [Configuration](config/README.md). +2. Then, you can assign the configuration path to train your own model. + +Example for `stack-propagation` fine-tuning: + +```bash +python run.py -cp config/stack-propagation.yaml +``` + +Example for multi-GPU fine-tuning: + +```bash +accelerate config +accelerate launch run.py -cp config/stack-propagation.yaml +``` + +Or you can assign `accelerate` yaml configuration. + +```bash +accelerate launch [--config_file ./accelerate/config.yaml] run.py -cp config/stack-propagation.yaml +``` + +### 3. Implementing a New SLU Model +In OpenSLU, you are only needed to rewrite required commponents and assign them in configuration instead of rewriting all commponents. + +In most cases, rewriting Interaction module is enough for building a new SLU model. +This module accepts [HiddenData](./common/utils.py) as input and return with `HiddenData`, which contains the `hidden_states` for `intent` and `slot`, and other helpful information. The example is as follows: +```python +class NewInteraction(BaseInteraction): +    def __init__(self, **config): +        self.config = config +        ... +    +    def forward(self, hiddens: HiddenData): +        ... +        intent, slot = self.func(hiddens) +        hiddens.update_slot_hidden_state(slot) +        hiddens.update_intent_hidden_state(intent) +        return hiddens +``` + +To further meet the +needs of complex exploration, we provide the +[BaseDecoder](./model/decoder/base_decoder.py) class, and the user can simply override the `forward()` function in class, which accepts `HiddenData` as input and `OutputData` as output. The example is as follows: +```python +class NewDecoder(BaseDecoder): +    def __init__(self, +        intent_classifier, +        slot_classifier, +        interaction=None): +        ... +        self.int_cls = intent_classifier +        self.slot_cls = slot_classifier +        self.interaction = interaction +        +    def forward(self, hiddens: HiddenData): +        ... +        interact = self.interaction(hiddens) +        slot = self.slot_cls(interact.slot) +        intent = self.int_cls(interact.intent) +        return OutputData(intent, slot) +``` + + +## Modules + +### 1. Encoder Modules + +- **No Pretrained Encoder** + - GloVe Embedding + - BiLSTM Encoder + - BiLSTM + Self-Attention Encoder + - Bi-Encoder (support two encoders for intent and slot, respectively) +- **Pretrained Encoder** + - `bert-base-uncased` + - `roberta-base` + - `microsoft/deberta-v3-base` + - other hugging-face supported encoder model... + +### 2. Decoder Modules + +#### 2.1 Interaction Modules + +- DCA Net Interaction +- Stack Propagation Interaction +- Bi-Model Interaction(with decoder/without decoder) +- Slot Gated Interaction + +#### 2.2 Classification Modules +All classifier support `Token-level Intent` and `Sentence-level intent`. What's more, our decode function supports to both `Single-Intent` and `Multi-Intent`. +- LinearClassifier +- AutoregressiveLSTMClassifier +- MLPClassifier + +### 3. Supported Models +We implement various 10 common-used SLU baselines: + --- -title: OpenSLU -emoji: 🐠 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: mit +**Single-Intent Model** +- Bi-Model \[ [Wang et al., 2018](https://aclanthology.org/N18-2050/) \] : + - `bi-model.yaml` +- Slot-Gated \[ [Goo et al., 2018](https://www.csie.ntu.edu.tw/~yvchen/doc/NAACL18_SlotGated.pdf) \] : + - `slot-gated.yaml` +- Stack-Propagation \[ [Qin et al., 2019](https://www.aclweb.org/anthology/D19-1214/) \] : + - `stack-propagation.yaml` +- Joint Bert \[ [Chen et al., 2019](https://arxiv.org/abs/1902.10909) \] : + - `joint-bert.yaml` +- RoBERTa \[ [Liu et al., 2019](https://arxiv.org/abs/1907.11692) \] : + - `roberta.yaml` +- ELECTRA \[ [Clark et al., 2020](https://arxiv.org/abs/2003.10555) \] : + - `electra.yaml` +- DCA-Net \[ [Qin et al., 2021](https://arxiv.org/abs/2010.03880) \] : + - `dca_net.yaml` +- DeBERTa \[ [He et al., 2021](https://arxiv.org/abs/2111.09543) \] : + - `deberta.yaml` + --- +**Multi-Intent Model** +- AGIF \[ [Qin et al., 2020](https://arxiv.org/pdf/2004.10087.pdf) \] : + - `agif.yaml` +- GL-GIN \[ [Qin et al., 2021](https://arxiv.org/abs/2106.01925) \] : + - `gl-gin.yaml` + + +## Application +### 1. Visualization Tools +Model metrics tests alone no longer adequately reflect the model's performance. To help researchers further improve their models, we provide a tool for visual error analysis. + +We provide an analysis interface with three main parts: +- (a) error distribution analysis; +- (b) label transfer analysis; +- (c) instance analysis. + + + +```bash +python tools/visualization.py \ + --config_path config/visual.yaml \ + --output_path {ckpt_dir}/outputs.jsonl +``` +Visualization configuration can be set as below: +```yaml +host: 127.0.0.1 +port: 7861 +is_push_to_public: true # whether to push to gradio platform(public network) +output_path: save/stack/outputs.jsonl # output prediction file path +page-size: 2 # the number of instances of each page in instance anlysis. +``` +### 2. Deployment + +We provide an script to deploy your model automatically. You are only needed to run the command as below to deploy your own model: + +```bash +python app.py --config_path config/reproduction/atis/bi-model.yaml +``` + + + +### 3. Publish your model to hugging face + +We also offer an script to transfer models trained by OpenSLU to hugging face format automatically. And you can upload the model to your `Model` space. + +```shell +python tools/parse_to_hugging_face.py -cp config/reproduction/atis/bi-model.yaml -op save/temp +``` + +It will generate 5 files, and you should only need to upload `config.json`, `pytorch_model.bin` and `tokenizer.pkl`. +After that, others can reproduction your model just by adjust `_from_pretrained_` parameters in Configuration. + +## Contact -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +Please create Github issues here or email [Libo Qin](mailto:lbqin@ir.hit.edu.cn) or [Qiguang Chen](mailto:charleschen2333@gmail.com) if you have any questions or suggestions. \ No newline at end of file diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/__init__.py @@ -0,0 +1 @@ + diff --git a/accelerate/config-old.yaml b/accelerate/config-old.yaml new file mode 100644 index 0000000000000000000000000000000000000000..96c0db2acd15ccc84d88ae8e375303dcb31655ba --- /dev/null +++ b/accelerate/config-old.yaml @@ -0,0 +1,16 @@ +compute_environment: LOCAL_MACHINE +deepspeed_config: {} +distributed_type: MULTI_GPU +downcast_bf16: 'no' +fsdp_config: {} +gpu_ids: all +machine_rank: 0 +main_process_ip: null +main_process_port: 9001 +main_training_function: main +mixed_precision: 'no' +num_machines: 0 +num_processes: 2 +rdzv_backend: static +same_network: true +use_cpu: false diff --git a/accelerate/config.yaml b/accelerate/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e4da53f316a9da379d601a447d54ea4644a3a9b8 --- /dev/null +++ b/accelerate/config.yaml @@ -0,0 +1,22 @@ +command_file: null +commands: null +compute_environment: LOCAL_MACHINE +deepspeed_config: {} +distributed_type: 'NO' +downcast_bf16: 'no' +dynamo_backend: 'NO' +fsdp_config: {} +gpu_ids: all +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +megatron_lm_config: {} +mixed_precision: 'no' +num_machines: 1 +num_processes: 2 +rdzv_backend: static +same_network: true +tpu_name: null +tpu_zone: null +use_cpu: false diff --git a/app.py b/app.py index 36c321c64bfa9ae29ff1beb3c5bdffcffc51db7a..9392b7af72e5fb26f466f0e8ae47775778d4f705 100644 --- a/app.py +++ b/app.py @@ -1,11 +1,31 @@ +''' +Author: Qiguang Chen +LastEditors: Qiguang Chen +Date: 2023-02-07 15:42:32 +LastEditTime: 2023-02-19 21:04:03 +Description: + +''' +import argparse import gradio as gr -import os + from common.config import Config from common.model_manager import ModelManager +from common.utils import str2bool + + +parser = argparse.ArgumentParser() +parser.add_argument('--config_path', '-cp', type=str, default="config/examples/from_pretrained.yaml") +parser.add_argument('--push_to_public', '-p', type=str2bool, nargs='?', + const=True, default=False, + help="Push to public network.") +args = parser.parse_args() +config = Config.load_from_yaml(args.config_path) +config.base["train"] = False +config.base["test"] = False -config = Config.load_from_yaml("config/app.yaml") model_manager = ModelManager(config) -model_manager.load() +model_manager.init_model() def text_analysis(text): @@ -34,9 +54,10 @@ demo = gr.Interface( gr.Textbox(placeholder="Enter sentence here..."), ["html"], examples=[ - ["What a beautiful morning for a walk!"], - ["It was the best of times, it was the worst of times."], + ["i would like to find a flight from charlotte to las vegas that makes a stop in st louis"], ], ) - -demo.launch() +if args.push_to_public: + demo.launch(share=True) +else: + demo.launch() diff --git a/common/config.py b/common/config.py index bd4a7b8ad6d39cd408b3fd7b8be2c9cd19076992..9563ea6ffa6a75095e61a872db5b4fcd6f2e9d65 100644 --- a/common/config.py +++ b/common/config.py @@ -2,7 +2,7 @@ Author: Qiguang Chen Date: 2023-01-11 10:39:26 LastEditors: Qiguang Chen -LastEditTime: 2023-01-26 10:55:43 +LastEditTime: 2023-02-15 17:58:53 Description: Configuration class to manage all process in OpenSLU like model construction, learning processing and so on. ''' @@ -18,7 +18,8 @@ class Config(dict): dict.__init__(self, *args, **kwargs) self.__dict__ = self self.start_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') - self.__autowired() + if not self.model.get("_from_pretrained_"): + self.__autowired() @staticmethod def load_from_yaml(file_path:str)->"Config": @@ -46,8 +47,8 @@ class Config(dict): Returns: Config: _description_ """ - if args.model is not None: - args.config_path = "config/" + args.model + ".yaml" + if args.model is not None and args.dataset is not None: + args.config_path = f"config/reproduction/{args.dataset}/{args.model}.yaml" config = Config.load_from_yaml(args.config_path) if args.dataset is not None: config.__update_dataset(args.dataset) diff --git a/common/global_pool.py b/common/global_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f6e0db50fd1d1c6fbd4ae10658cbdb97de5494 --- /dev/null +++ b/common/global_pool.py @@ -0,0 +1,26 @@ +''' +Author: Qiguang Chen +LastEditors: Qiguang Chen +Date: 2023-02-12 14:35:37 +LastEditTime: 2023-02-12 14:37:40 +Description: + +''' +def _init(): + global _global_dict + _global_dict = {} + + +def set_value(key, value): + # set gobal value to object pool + _global_dict[key] = value + + +def get_value(key): + # get gobal value from object pool + try: + return _global_dict[key] + except: + print('读取' + key + '失败\r\n') + + \ No newline at end of file diff --git a/common/loader.py b/common/loader.py index 618e943542734061cea47b759df931b25785729a..5b97680ae3ef9b5523cce591e30c520059ff36a8 100644 --- a/common/loader.py +++ b/common/loader.py @@ -2,7 +2,7 @@ Author: Qiguang Chen Date: 2023-01-11 10:39:26 LastEditors: Qiguang Chen -LastEditTime: 2023-02-07 19:26:06 +LastEditTime: 2023-02-19 15:39:48 Description: all class for load data. ''' @@ -36,14 +36,13 @@ class DataFactory(object): return dataset_name.lower() in ["atis", "snips", "mix-atis", "mix-atis"] def load_dataset(self, dataset_config, split="train"): - # TODO: 关闭use_auth_token dataset_name = None if split not in dataset_config: dataset_name = dataset_config.get("dataset_name") elif self.__is_supported_datasets(dataset_config[split]): dataset_name = dataset_config[split].lower() if dataset_name is not None: - return load_dataset("LightChen2333/OpenSLU", dataset_name, split=split, use_auth_token=True) + return load_dataset("LightChen2333/OpenSLU", dataset_name, split=split) else: data_file = dataset_config[split] data_dict = {"text": [], "slot": [], "intent":[]} diff --git a/common/logger.py b/common/logger.py index 264e8e3943df08833ef0f113c4f2f5869df13650..5523a9fe98ab25b9fc1ab386049e3a340474c7a3 100644 --- a/common/logger.py +++ b/common/logger.py @@ -2,14 +2,17 @@ Author: Qiguang Chen Date: 2023-01-11 10:39:26 LastEditors: Qiguang Chen -LastEditTime: 2023-02-02 16:29:13 +LastEditTime: 2023-02-17 20:38:38 Description: log manager ''' +import datetime import json import os import time from common.config import Config +import logging +import colorlog def mkdirs(dir_names): for dir_name in dir_names: @@ -71,7 +74,7 @@ class Logger(): self.other_log_file = os.path.join(self.output_dir, "/other_log.jsonl") with open(self.other_log_file, "w", encoding="utf8") as f: print(f"Other Log Result will be written to {self.other_log_file}") - import logging + LOGGING_LEVEL_MAP = { "CRITICAL": logging.CRITICAL, "FATAL": logging.FATAL, @@ -82,10 +85,47 @@ class Logger(): "DEBUG": logging.DEBUG, "NOTSET": logging.NOTSET, } - logging.basicConfig(format='[%(levelname)s - %(asctime)s]\t%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', - filename=os.path.join(self.output_dir, "log.log"), level=LOGGING_LEVEL_MAP[logging_level]) - self.logging = logging - + # logging.basicConfig(format='[%(levelname)s - %(asctime)s]\t%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', + # filename=os.path.join(self.output_dir, "log.log"), level=LOGGING_LEVEL_MAP[logging_level]) + + # logger = logging.getLogger() + # KZT = logging.StreamHandler() + # KZT.setLevel(logging.DEBUG) + # logger.addHandler(KZT) + + self.logging = self._get_logging_logger(logging_level) + + def _get_logging_logger(self, level="INFO"): + log_colors_config = { + 'DEBUG': 'cyan', + 'INFO': 'blue', + 'WARNING': 'yellow', + 'ERROR': 'red', + 'CRITICAL': 'red,bg_white', + } + + logger = logging.getLogger() + logger.setLevel(level) + + log_path = os.path.join(self.output_dir, "log.log") + + if not logger.handlers: + sh = logging.StreamHandler() + fh = logging.FileHandler(filename=log_path, mode='a', encoding="utf-8") + fmt = logging.Formatter( + fmt='[%(levelname)s - %(asctime)s]\t%(message)s', + datefmt='%m/%d/%Y %I:%M:%S %p') + + sh_fmt = colorlog.ColoredFormatter( + fmt='%(log_color)s[%(levelname)s - %(asctime)s]\t%(message)s', + datefmt='%m/%d/%Y %I:%M:%S %p', + log_colors=log_colors_config) + sh.setFormatter(fmt=sh_fmt) + fh.setFormatter(fmt=fmt) + logger.addHandler(sh) + logger.addHandler(fh) + return logger + def set_config(self, config: Config): """save config diff --git a/common/metric.py b/common/metric.py index 8cd117905d345e2f394bee6e1ff5817e4301ad38..f84fd67830f2b2da9e09a4f7cb67644a51f139dc 100644 --- a/common/metric.py +++ b/common/metric.py @@ -2,7 +2,7 @@ Author: Qiguang Chen Date: 2023-01-11 10:39:26 LastEditors: Qiguang Chen -LastEditTime: 2023-01-26 12:12:55 +LastEditTime: 2023-02-17 19:39:22 Description: Metric calculation class ''' @@ -198,6 +198,8 @@ class Evaluator(object): lastPredTag = 'O' lastPredType = '' for c, p in zip(correct_slot, pred_slot): + c = str(c) + p = str(p) correctTag, correctType = Evaluator.__splitTagType(c) predTag, predType = Evaluator.__splitTagType(p) @@ -317,6 +319,7 @@ class Evaluator(object): use_intent = output.intent_ids is not None and len( output.intent_ids) > 0 if use_slot and "slot_f1" in metric_list: + res_dict["slot_f1"] = Evaluator.computeF1Score( output.slot_ids, inps.slot) if use_intent and "intent_acc" in metric_list: diff --git a/common/model_manager.py b/common/model_manager.py index 79f5b57a5b3f76a5c6630076ce59a5a433fe6309..c2969529bbccf0ee37ab24aff851e002ea5ba3bf 100644 --- a/common/model_manager.py +++ b/common/model_manager.py @@ -2,11 +2,13 @@ Author: Qiguang Chen Date: 2023-01-11 10:39:26 LastEditors: Qiguang Chen -LastEditTime: 2023-02-08 00:57:09 +LastEditTime: 2023-02-19 18:50:11 Description: manage all process of model training and prediction. ''' +import math import os +import queue import random import numpy as np @@ -18,11 +20,15 @@ from common import utils from common.loader import DataFactory from common.logger import Logger from common.metric import Evaluator +from common.saver import Saver from common.tokenizer import get_tokenizer, get_tokenizer_class, load_embedding from common.utils import InputData, instantiate from common.utils import OutputData from common.config import Config import dill +from common import global_pool +from tools.load_from_hugging_face import PreTrainedTokenizerForSLU, PretrainedModelForSLU +# from tools.hugging_face_parser import load_model, load_tokenizer class ModelManager(object): @@ -33,45 +39,101 @@ class ModelManager(object): config (Config): configuration to manage all process in OpenSLU """ # init config + global_pool._init() self.config = config self.__set_seed(self.config.base.get("seed")) self.device = self.config.base.get("device") - + self.load_dir = self.config.model_manager.get("load_dir") + if self.config.get("logger") and self.config["logger"].get("logger_type"): + logger_type = self.config["logger"].get("logger_type") + else: + logger_type = "wandb" # enable accelerator if "accelerator" in self.config and self.config["accelerator"].get("use_accelerator"): from accelerate import Accelerator - self.accelerator = Accelerator(log_with="wandb") + self.accelerator = Accelerator(log_with=logger_type) else: self.accelerator = None + self.tokenizer = None + self.saver = Saver(self.config.model_manager, start_time=self.config.start_time) if self.config.base.get("train"): + self.model = None + self.optimizer = None + self.total_step = None + self.lr_scheduler = None + self.init_step = 0 + self.best_metric = 0 + self.logger = Logger(logger_type=logger_type, + logger_name=self.config.base["name"], + start_time=self.config.start_time, + accelerator=self.accelerator) + global_pool.set_value("logger", self.logger) + + def init_model(self): + """init model, optimizer, lr_scheduler + + Args: + model (Any): pytorch model + """ + self.prepared = False + if self.load_dir is not None: + self.load() + self.config.set_vocab_size(self.tokenizer.vocab_size) + self.init_data() + if self.config.base.get("train") and self.config.model_manager.get("load_train_state"): + train_state = torch.load(os.path.join( + self.load_dir, "train_state.pkl"), pickle_module=dill) + self.optimizer = instantiate( + self.config["optimizer"])(self.model.parameters()) + self.lr_scheduler = instantiate(self.config["scheduler"])( + optimizer=self.optimizer, + num_training_steps=self.total_step + ) + self.optimizer.load_state_dict(train_state["optimizer"]) + self.optimizer.zero_grad() + self.lr_scheduler.load_state_dict(train_state["lr_scheduler"]) + self.init_step = train_state["step"] + self.best_metric = train_state["best_metric"] + elif self.config.model.get("_from_pretrained_") and self.config.tokenizer.get("_from_pretrained_"): + self.from_pretrained() + self.config.set_vocab_size(self.tokenizer.vocab_size) + self.init_data() + else: self.tokenizer = get_tokenizer( self.config.tokenizer.get("_tokenizer_name_")) - self.logger = Logger( - "wandb", self.config.base["name"], start_time=config.start_time, accelerator=self.accelerator) + self.init_data() + self.model = instantiate(self.config.model) + self.model.to(self.device) + if self.config.base.get("train"): + self.optimizer = instantiate( + self.config["optimizer"])(self.model.parameters()) + self.lr_scheduler = instantiate(self.config["scheduler"])( + optimizer=self.optimizer, + num_training_steps=self.total_step + ) + + def init_data(self): + self.data_factory = DataFactory(tokenizer=self.tokenizer, + use_multi_intent=self.config.base.get("multi_intent"), + to_lower_case=self.config.tokenizer.get("_to_lower_case_")) + batch_size = self.config.base["batch_size"] + # init tokenizer config and dataloaders + tokenizer_config = {key: self.config.tokenizer[key] + for key in self.config.tokenizer if key[0] != "_" and key[-1] != "_"} + + if self.config.base.get("train"): # init dataloader & load data - if self.config.base.get("save_dir"): - self.model_save_dir = self.config.base["save_dir"] - else: - if not os.path.exists("save/"): - os.mkdir("save/") - self.model_save_dir = "save/" + config.start_time - if not os.path.exists(self.model_save_dir): - os.mkdir(self.model_save_dir) - batch_size = self.config.base["batch_size"] - df = DataFactory(tokenizer=self.tokenizer, - use_multi_intent=self.config.base.get("multi_intent"), - to_lower_case=self.config.base.get("_to_lower_case_")) - train_dataset = df.load_dataset(self.config.dataset, split="train") + + + train_dataset = self.data_factory.load_dataset(self.config.dataset, split="train") - # update label and vocabulary - df.update_label_names(train_dataset) - df.update_vocabulary(train_dataset) + # update label and vocabulary (ONLY SUPPORT FOR "word_tokenizer") + self.data_factory.update_label_names(train_dataset) + self.data_factory.update_vocabulary(train_dataset) - # init tokenizer config and dataloaders - tokenizer_config = {key: self.config.tokenizer[key] - for key in self.config.tokenizer if key[0] != "_" and key[-1] != "_"} - self.train_dataloader = df.get_data_loader(train_dataset, + + self.train_dataloader = self.data_factory.get_data_loader(train_dataset, batch_size, shuffle=True, device=self.device, @@ -80,9 +142,9 @@ class ModelManager(object): "_align_mode_"), label2tensor=True, **tokenizer_config) - dev_dataset = df.load_dataset( - self.config.dataset, split="validation") - self.dev_dataloader = df.get_data_loader(dev_dataset, + self.total_step = int(self.config.base.get("epoch_num")) * len(self.train_dataloader) + dev_dataset = self.data_factory.load_dataset(self.config.dataset, split="validation") + self.dev_dataloader = self.data_factory.get_data_loader(dev_dataset, batch_size, shuffle=False, device=self.device, @@ -91,16 +153,22 @@ class ModelManager(object): "_align_mode_"), label2tensor=False, **tokenizer_config) - df.update_vocabulary(dev_dataset) + self.data_factory.update_vocabulary(dev_dataset) + self.intent_list = None + self.intent_dict = None + self.slot_list = None + self.slot_dict = None # add intent label num and slot label num to config - if int(self.config.get_intent_label_num()) == 0 or int(self.config.get_slot_label_num()) == 0: - self.intent_list = df.intent_label_list - self.intent_dict = df.intent_label_dict + if self.config.model["decoder"].get("intent_classifier") and int(self.config.get_intent_label_num()) == 0: + self.intent_list = self.data_factory.intent_label_list + self.intent_dict = self.data_factory.intent_label_dict self.config.set_intent_label_num(len(self.intent_list)) - self.slot_list = df.slot_label_list - self.slot_dict = df.slot_label_dict + if self.config.model["decoder"].get("slot_classifier") and int(self.config.get_slot_label_num()) == 0: + self.slot_list = self.data_factory.slot_label_list + self.slot_dict = self.data_factory.slot_label_dict self.config.set_slot_label_num(len(self.slot_list)) - self.config.set_vocab_size(self.tokenizer.vocab_size) + + # autoload embedding for non-pretrained encoder if self.config["model"]["encoder"].get("embedding") and self.config["model"]["encoder"]["embedding"].get( @@ -114,19 +182,13 @@ class ModelManager(object): self.config.autoload_template() # save config self.logger.set_config(self.config) - - self.model = None - self.optimizer = None - self.total_step = None - self.lr_scheduler = None - if self.config.tokenizer.get("_tokenizer_name_") == "word_tokenizer": - self.tokenizer.save(os.path.join(self.model_save_dir, "tokenizer.json")) - utils.save_json(os.path.join( - self.model_save_dir, "label.json"), {"intent": self.intent_list,"slot": self.slot_list}) + self.saver.save_tokenizer(self.tokenizer) + self.saver.save_label(self.intent_list, self.slot_list) + self.config.set_vocab_size(self.tokenizer.vocab_size) + if self.config.base.get("test"): - self.test_dataset = df.load_dataset( - self.config.dataset, split="test") - self.test_dataloader = df.get_data_loader(self.test_dataset, + self.test_dataset = self.data_factory.load_dataset(self.config.dataset, split="test") + self.test_dataloader = self.data_factory.get_data_loader(self.test_dataset, batch_size, shuffle=False, device=self.device, @@ -136,30 +198,6 @@ class ModelManager(object): label2tensor=False, **tokenizer_config) - def init_model(self, model): - """init model, optimizer, lr_scheduler - - Args: - model (Any): pytorch model - """ - self.model = model - self.model.to(self.device) - if self.config.base.get("train"): - self.optimizer = instantiate( - self.config["optimizer"])(self.model.parameters()) - self.total_step = int(self.config.base.get( - "epoch_num")) * len(self.train_dataloader) - self.lr_scheduler = instantiate(self.config["scheduler"])( - optimizer=self.optimizer, - num_training_steps=self.total_step - ) - if self.accelerator is not None: - self.model, self.optimizer, self.train_dataloader, self.lr_scheduler = self.accelerator.prepare( - self.model, self.optimizer, self.train_dataloader, self.lr_scheduler) - if self.config.base.get("load_dir_path"): - self.accelerator.load_state(self.config.base.get("load_dir_path")) - # self.dev_dataloader = self.accelerator.prepare(self.dev_dataloader) - def eval(self, step: int, best_metric: float) -> float: """ evaluation models. @@ -171,31 +209,21 @@ class ModelManager(object): float: updated best metric value """ # TODO: save dev - _, res = self.__evaluate(self.model, self.dev_dataloader) + _, res = self.__evaluate(self.model, self.dev_dataloader, mode="dev") self.logger.log_metric(res, metric_split="dev", step=step) - if res[self.config.base.get("best_key")] > best_metric: - best_metric = res[self.config.base.get("best_key")] - outputs, test_res = self.__evaluate( - self.model, self.test_dataloader) - if not os.path.exists(self.model_save_dir): - os.mkdir(self.model_save_dir) - if self.accelerator is None: - torch.save(self.model, os.path.join( - self.model_save_dir, "model.pkl")) - torch.save(self.optimizer, os.path.join( - self.model_save_dir, "optimizer.pkl")) - torch.save(self.lr_scheduler, os.path.join( - self.model_save_dir, "lr_scheduler.pkl"), pickle_module=dill) - torch.save(step, os.path.join( - self.model_save_dir, "step.pkl")) - else: - self.accelerator.wait_for_everyone() - unwrapped_model = self.accelerator.unwrap_model(self.model) - self.accelerator.save(unwrapped_model.state_dict( - ), os.path.join(self.model_save_dir, "model.pkl")) - self.accelerator.save_state(output_dir=self.model_save_dir) - outputs.save(self.model_save_dir, self.test_dataset) - self.logger.log_metric(test_res, metric_split="test", step=step) + if res[self.config.evaluator.get("best_key")] > best_metric: + best_metric = res[self.config.evaluator.get("best_key")] + train_state = { + "step": step, + "best_metric": best_metric, + "optimizer": self.optimizer.state_dict(), + "lr_scheduler": self.lr_scheduler.state_dict() + } + self.saver.save_model(self.model, train_state, self.accelerator) + if self.config.base.get("test"): + outputs, test_res = self.__evaluate(self.model, self.test_dataloader, mode="test") + self.saver.save_output(outputs, self.test_dataset) + self.logger.log_metric(test_res, metric_split="test", step=step) return best_metric def train(self) -> float: @@ -204,9 +232,23 @@ class ModelManager(object): Returns: float: updated best metric value """ - step = 0 - best_metric = 0 + self.model.train() + if self.accelerator is not None: + self.total_step = math.ceil(self.total_step / self.accelerator.num_processes) + if self.optimizer is None: + self.optimizer = instantiate(self.config["optimizer"])(self.model.parameters()) + if self.lr_scheduler is None: + self.lr_scheduler = instantiate(self.config["scheduler"])( + optimizer=self.optimizer, + num_training_steps=self.total_step + ) + if not self.prepared and self.accelerator is not None: + self.model, self.optimizer, self.train_dataloader, self.lr_scheduler = self.accelerator.prepare( + self.model, self.optimizer, self.train_dataloader, self.lr_scheduler) + step = self.init_step progress_bar = tqdm(range(self.total_step)) + progress_bar.update(self.init_step) + self.optimizer.zero_grad() for _ in range(int(self.config.base.get("epoch_num"))): for data in self.train_dataloader: if step == 0: @@ -230,16 +272,25 @@ class ModelManager(object): loss.backward() self.optimizer.step() self.lr_scheduler.step() - if not self.config.base.get("eval_by_epoch") and step % self.config.base.get( - "eval_step") == 0 and step != 0: - best_metric = self.eval(step, best_metric) + train_state = { + "step": step, + "best_metric": self.best_metric, + "optimizer": self.optimizer.state_dict(), + "lr_scheduler": self.lr_scheduler.state_dict() + } + if not self.saver.auto_save_step(self.model, train_state, self.accelerator): + if not self.config.evaluator.get("eval_by_epoch") and step % self.config.evaluator.get("eval_step") == 0 and step != 0: + self.best_metric = self.eval(step, self.best_metric) step += 1 progress_bar.update(1) - if self.config.base.get("eval_by_epoch"): - best_metric = self.eval(step, best_metric) + if self.config.evaluator.get("eval_by_epoch"): + self.best_metric = self.eval(step, self.best_metric) self.logger.finish() - return best_metric + return self.best_metric + def test(self): + return self.__evaluate(self.model, self.test_dataloader, mode="test") + def __set_seed(self, seed_value: int): """Manually set random seeds. @@ -258,7 +309,7 @@ class ModelManager(object): torch.backends.cudnn.benchmark = True return - def __evaluate(self, model, dataloader): + def __evaluate(self, model, dataloader, mode="dev"): model.eval() inps = InputData() outputs = OutputData() @@ -272,52 +323,97 @@ class ModelManager(object): decode_output.map_output(slot_map=self.slot_list, intent_map=self.intent_list) - data, decode_output = utils.remove_slot_ignore_index( - data, decode_output, ignore_index="#") + if self.config.model["decoder"].get("slot_classifier"): + data, decode_output = utils.remove_slot_ignore_index( + data, decode_output, ignore_index="#") inps.merge_input_data(data) outputs.merge_output_data(decode_output) - if "metric" in self.config: + if "metric" in self.config.evaluator: res = Evaluator.compute_all_metric( - inps, outputs, intent_label_map=self.intent_dict, metric_list=self.config.metric) + inps, outputs, intent_label_map=self.intent_dict, metric_list=self.config.evaluator["metric"]) else: res = Evaluator.compute_all_metric( inps, outputs, intent_label_map=self.intent_dict) + self.logger.info(f"Best {mode} metric: "+str(res)) model.train() return outputs, res def load(self): - self.model = torch.load(os.path.join(self.config.base["model_dir"], "model.pkl"), map_location=torch.device(self.device)) - if self.config.tokenizer["_tokenizer_name_"] == "word_tokenizer": - self.tokenizer = get_tokenizer_class(self.config.tokenizer["_tokenizer_name_"]).from_file( - os.path.join(self.config.base["model_dir"], "tokenizer.json")) + + if self.tokenizer is None: + with open(os.path.join(self.load_dir, "tokenizer.pkl"), 'rb') as f: + self.tokenizer = dill.load(f) + label = utils.load_json(os.path.join(self.load_dir, "label.json")) + if label["intent"] is None: + self.intent_list = None + self.intent_dict = None + else: + self.intent_list = label["intent"] + self.intent_dict = {x: i for i, x in enumerate(label["intent"])} + self.config.set_intent_label_num(len(self.intent_list)) + if label["slot"] is None: + self.slot_list = None + self.slot_dict = None + else: + self.slot_list = label["slot"] + self.slot_dict = {x: i for i, x in enumerate(label["slot"])} + self.config.set_slot_label_num(len(self.slot_list)) + self.config.set_vocab_size(self.tokenizer.vocab_size) + if self.accelerator is not None and self.load_dir is not None: + self.model = torch.load(os.path.join(self.load_dir, "model.pkl"), map_location=torch.device(self.device)) + self.prepared = True + self.accelerator.load_state(self.load_dir) + self.accelerator.prepare_model(self.model) else: - self.tokenizer = get_tokenizer(self.config.tokenizer["_tokenizer_name_"]) + self.model = torch.load(os.path.join( + self.load_dir, "model.pkl"), map_location=torch.device(self.device)) + # if self.config.tokenizer["_tokenizer_name_"] == "word_tokenizer": + # self.tokenizer = get_tokenizer_class(self.config.tokenizer["_tokenizer_name_"]).from_file(os.path.join(self.load_dir, "tokenizer.json")) + # else: + # self.tokenizer = get_tokenizer(self.config.tokenizer["_tokenizer_name_"]) + self.model.to(self.device) + + + def from_pretrained(self): + self.config.autoload_template() + model = PretrainedModelForSLU.from_pretrained(self.config.model["_from_pretrained_"]) + # model = load_model(self.config.model["_from_pretrained_"]) + self.model = model.model + if self.tokenizer is None: + self.tokenizer = PreTrainedTokenizerForSLU.from_pretrained( + self.config.tokenizer["_from_pretrained_"]) + self.config.tokenizer = model.config.tokenizer + # self.tokenizer = load_tokenizer(self.config.tokenizer["_from_pretrained_"]) + self.model.to(self.device) - label = utils.load_json(os.path.join(self.config.base["model_dir"], "label.json")) + label = model.config._id2label + self.config.model = model.config.model self.intent_list = label["intent"] self.slot_list = label["slot"] - self.data_factory=DataFactory(tokenizer=self.tokenizer, - use_multi_intent=self.config.base.get("multi_intent"), - to_lower_case=self.config.tokenizer.get("_to_lower_case_")) + self.intent_dict = {x: i for i, x in enumerate(label["intent"])} + self.slot_dict = {x: i for i, x in enumerate(label["slot"])} def predict(self, text_data): self.model.eval() tokenizer_config = {key: self.config.tokenizer[key] - for key in self.config.tokenizer if key[0] != "_" and key[-1] != "_"} + for key in self.config.tokenizer if key[0] != "_" and key[-1] != "_"} align_mode = self.config.tokenizer.get("_align_mode_") inputs = self.data_factory.batch_fn(batch=[{"text": text_data.split(" ")}], - device=self.device, - config=tokenizer_config, - enable_label=False, - align_mode= align_mode if align_mode is not None else "general", - label2tensor=False) + device=self.device, + config=tokenizer_config, + enable_label=False, + align_mode=align_mode if align_mode is not None else "general", + label2tensor=False) output = self.model(inputs) decode_output = self.model.decode(output, inputs) decode_output.map_output(slot_map=self.slot_list, - intent_map=self.intent_list) + intent_map=self.intent_list) if self.config.base.get("multi_intent"): intent = decode_output.intent_ids[0] else: intent = [decode_output.intent_ids[0]] - return {"intent": intent, "slot": decode_output.slot_ids[0], "text": self.tokenizer.decode(inputs.input_ids[0])} \ No newline at end of file + input_ids = inputs.input_ids[0].tolist() + tokens = [self.tokenizer.decode(ids) for ids in input_ids] + slots = decode_output.slot_ids[0] + return {"intent": intent, "slot": slots, "text": tokens} diff --git a/common/saver.py b/common/saver.py new file mode 100644 index 0000000000000000000000000000000000000000..82536faeb764ce50bd28db893f693633d35c6310 --- /dev/null +++ b/common/saver.py @@ -0,0 +1,80 @@ +''' +Author: Qiguang Chen +LastEditors: Qiguang Chen +Date: 2023-02-12 22:23:58 +LastEditTime: 2023-02-19 14:14:56 +Description: + +''' +import json +import os +import queue +import shutil +import torch +import dill +from common import utils + + +class Saver(): + def __init__(self, config, start_time=None) -> None: + self.config = config + if self.config.get("save_dir"): + self.model_save_dir = self.config["save_dir"] + else: + if not os.path.exists("save/"): + os.mkdir("save/") + self.model_save_dir = "save/" + start_time + if not os.path.exists(self.model_save_dir): + os.mkdir(self.model_save_dir) + save_mode = config.get("save_mode") + self.save_mode = save_mode if save_mode is not None else "save-by-eval" + + max_save_num = self.config.get("max_save_num") + self.max_save_num = max_save_num if max_save_num is not None else 1 + self.save_pool = queue.Queue(maxsize=max_save_num) + + def save_tokenizer(self, tokenizer): + with open(os.path.join(self.model_save_dir, "tokenizer.pkl"), 'wb') as f: + dill.dump(tokenizer, f) + + def save_label(self, intent_list, slot_list): + utils.save_json(os.path.join(self.model_save_dir, "label.json"), {"intent": intent_list, "slot": slot_list}) + + + def save_model(self, model, train_state, accelerator=None): + step = train_state["step"] + if self.max_save_num != 1: + + model_save_dir =os.path.join(self.model_save_dir, str(step)) + if self.save_pool.full(): + delete_dir = self.save_pool.get() + shutil.rmtree(delete_dir) + self.save_pool.put(model_save_dir) + else: + self.save_pool.put(model_save_dir) + if not os.path.exists(model_save_dir): + os.mkdir(model_save_dir) + else: + model_save_dir = self.model_save_dir + if not os.path.exists(model_save_dir): + os.mkdir(model_save_dir) + if accelerator is None: + torch.save(model, os.path.join(model_save_dir, "model.pkl")) + torch.save(train_state, os.path.join(model_save_dir, "train_state.pkl"), pickle_module=dill) + else: + accelerator.wait_for_everyone() + unwrapped_model = accelerator.unwrap_model(model) + accelerator.save(unwrapped_model, os.path.join(model_save_dir, "model.pkl")) + accelerator.save_state(output_dir=model_save_dir) + + def auto_save_step(self, model, train_state, accelerator=None): + step = train_state["step"] + if self.save_mode == "save-by-step" and step % self.config.get("save_step")==0 and step != 0: + self.save_model(model, train_state, accelerator) + return True + else: + return False + + + def save_output(self, outputs, dataset): + outputs.save(self.model_save_dir, dataset) \ No newline at end of file diff --git a/common/tokenizer.py b/common/tokenizer.py index 082c6297ad96b0bc98cea0804082552fc1c0a69d..8b97e721b6da3cb94b18edaf5778c2c149412ff7 100644 --- a/common/tokenizer.py +++ b/common/tokenizer.py @@ -103,7 +103,7 @@ class WordTokenizer(object): assert isinstance(instance, str) # count the frequency of instances. - self.counter[instance] += 1 + # self.counter[instance] += 1 if instance not in self.index2instance: self.instance2index[instance] = len(self.index2instance) @@ -190,6 +190,18 @@ class WordTokenizer(object): index = index.tolist() return self.decode(index) return self.index2instance[index] + + def decode_batch(self, index, **kargs): + """ Get corresponding instance of query index. + + if index is invalid, then throws exception. + + Args: + index (int): is query index, possibly iterable. + Returns: + is corresponding instance. + """ + return self.decode(index) def save(self, path): """ Save the content of alphabet to files. @@ -214,7 +226,7 @@ class WordTokenizer(object): obj = json.load(fw) tokenizer = WordTokenizer(obj["name"]) tokenizer.instance2index = OrderedDict(obj["token_map"]) - tokenizer.counter = len(tokenizer.instance2index) + # tokenizer.counter = len(tokenizer.instance2index) tokenizer.index2instance = OrderedSet(tokenizer.instance2index.keys()) return tokenizer diff --git a/common/utils.py b/common/utils.py index 048af75cbc46e6015cf2cdcf581a013085125370..8569f3b06d1054623990fe9431826ed7cedc98e4 100644 --- a/common/utils.py +++ b/common/utils.py @@ -12,7 +12,7 @@ import torch from torch.nn.utils.rnn import pad_sequence from tqdm import tqdm from torch import Tensor - +import argparse class InputData(): """input datas class """ @@ -486,4 +486,14 @@ def save_json(file_path, obj): def load_json(file_path): with open(file_path, 'r', encoding="utf8") as fw: res =json.load(fw) - return res \ No newline at end of file + return res + +def str2bool(v): + if isinstance(v, bool): + return v + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') \ No newline at end of file diff --git a/config/README.md b/config/README.md new file mode 100644 index 0000000000000000000000000000000000000000..995c429a5628b1da9aeb6ad6519a4f7cc91d29ab --- /dev/null +++ b/config/README.md @@ -0,0 +1,348 @@ +# Configuation + +## 1. Introduction + +Configuration is divided into fine-grained reusable modules: + +- `base`: basic configuration +- `logger`: logger setting +- `model_manager`: loading and saving model parameters +- `accelerator`: whether to enable multi-GPU +- `dataset`: dataset management +- `evaluator`: evaluation and metrics setting. +- `tokenizer`: Tokenizer initiation and tokenizing setting. +- `optimizer`: Optimizer initiation setting. +- `scheduler`: scheduler initiation setting. +- `model`: model construction setting. + +From Sec. 2 to Sec. 11, we will describe the configuration in detail. Or you can see [Examples](examples/README.md) for Quick Start. + +NOTE: `_*_` config are reserved fields in OpenSLU. + +## Configuration Item Script +In OpenSLU configuration, we support simple calculation script for each configuration item. For example, we can get `dataset_name` by using `{dataset.dataset_name}`, and fill its value into python script `'LightChen2333/agif-slu-' + '*'`.(Without '', `{dataset.dataset_name}` value will be treated as a variable). + +NOTE: each item with `{}` will be treated as python script. +```yaml +tokenizer: + _from_pretrained_: "'LightChen2333/agif-slu-' + '{dataset.dataset_name}'" # Support simple calculation script + +``` + +## `base` Config +```yaml +# `start_time` will generated automatically when start any config script, needless to be assigned. +# start_time: xxxxxxxx +base: + name: "OpenSLU" # project/logger name + multi_intent: false # whether to enable multi-intent setting + train: True # enable train else enable zero-shot + test: True # enable test during train. + device: cuda # device for cuda/cpu + seed: 42 # random seed + best_key: EMA # save model by which metric[intent_acc/slot_f1/EMA] + tokenizer_name: word_tokenizer # tokenizer: word_tokenizer for no pretrained model, else use [AutoTokenizer] tokenizer name + add_special_tokens: false # whether add [CLS], [SEP] special tokens + epoch_num: 300 # train epoch num +# eval_step: 280 # if eval_by_epoch = false and eval_step > 0, will evaluate model by steps + eval_by_epoch: true # evaluate model by epoch + batch_size: 16 # batch size +``` +## `logger` Config +```yaml +logger: + # `wandb` is supported both in single- multi-GPU, + # `tensorboard` is only supported in multi-GPU, + # and `fitlog` is only supported in single-GPU + logger_type: wandb +``` +## `model_manager` Config +```yaml +model_manager: + # if load_dir != `null`, OpenSLU will try to load checkpoint to continue training, + # if load_dir == `null`, OpenSLU will restart training. + load_dir: null + # The dir path to save model and training state. + # if save_dir == `null` model will be saved to `save/{start_time}` + save_dir: save/stack + # save_mode can be selected in [save-by-step, save-by-eval] + # `save-by-step` means save model only by {save_step} steps without evaluation. + # `save-by-eval` means save model by best validation performance + save_mode: save-by-eval + # save_step: 100 # only enabled when save_mode == `save-by-step` + max_save_num: 1 # The number of best models will be saved. +``` +## `accelerator` Config +```yaml +accelerator: + use_accelerator: false # will enable `accelerator` if use_accelerator is `true` +``` +## `dataset` Config +```yaml +dataset: + # support load model from hugging-face. + # dataset_name can be selected in [atis, snips, mix-atis, mix-snips] + dataset_name: atis + # support assign any one of dataset path and other dataset split is the same as split in `dataset_name` + # train: atis # support load model from hugging-face or assigned local data path. + # validation: {root}/ATIS/dev.jsonl + # test: {root}/ATIS/test.jsonl +``` +## `evaluator` Config +```yaml +evaluator: + best_key: EMA # the metric to judge the best model + eval_by_epoch: true # Evaluate after an epoch if `true`. + # Evaluate after {eval_step} steps if eval_by_epoch == `false`. + # eval_step: 1800 + # metric is supported the metric as below: + # - intent_acc + # - slot_f1 + # - EMA + # - intent_f1 + # - macro_intent_f1 + # - micro_intent_f1 + # NOTE: [intent_f1, macro_intent_f1, micro_intent_f1] is only supported in multi-intent setting. intent_f1 and macro_intent_f1 is the same metric. + metric: + - intent_acc + - slot_f1 + - EMA +``` +## `tokenizer` Config +```yaml +tokenizer: + # Init tokenizer. Support `word_tokenizer` and other tokenizers in huggingface. + _tokenizer_name_: word_tokenizer + # if `_tokenizer_name_` is not assigned, you can load pretrained tokenizer from hugging-face. + # _from_pretrained_: LightChen2333/stack-propagation-slu-atis + _padding_side_: right # the padding side of tokenizer, support [left/ right] + # Align mode between text and slot, support [fast/ general], + # `general` is supported in most tokenizer, `fast` is supported only in small portion of tokenizers. + _align_mode_: fast + _to_lower_case_: true + add_special_tokens: false # other tokenizer args, you can add other args to tokenizer initialization except `_*_` format args + max_length: 512 + +``` +## `optimizer` Config +```yaml +optimizer: + _model_target_: torch.optim.Adam # Optimizer class/ function return Optimizer object + _model_partial_: true # partial load configuration. Here will add model.parameters() to complete all Optimizer parameters + lr: 0.001 # learning rate + weight_decay: 1e-6 # weight decay +``` +## `scheduler` Config +```yaml +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true # partial load configuration. Here will add optimizer, num_training_steps to complete all Optimizer parameters + name : "linear" + num_warmup_steps: 0 +``` +## `model` Config +```yaml +model: + # _from_pretrained_: LightChen2333/stack-propagation-slu-atis # load model from hugging-face and is not need to assigned any parameters below. + _model_target_: model.OpenSLUModel # the general model class, can automatically build the model through configuration. + + encoder: + _model_target_: model.encoder.AutoEncoder # auto-encoder to autoload provided encoder model + encoder_name: self-attention-lstm # support [lstm/ self-attention-lstm] and other pretrained models those hugging-face supported + + embedding: # word embedding layer +# load_embedding_name: glove.6B.300d.txt # support autoload glove embedding. + embedding_dim: 256 # embedding dim + dropout_rate: 0.5 # dropout ratio after embedding + + lstm: + layer_num: 1 # lstm configuration + bidirectional: true + output_dim: 256 # module should set output_dim for autoload input_dim in next module. You can also set input_dim manually. + dropout_rate: 0.5 + + attention: # self-attention configuration + hidden_dim: 1024 + output_dim: 128 + dropout_rate: 0.5 + + return_with_input: true # add inputs information, like attention_mask, to decoder module. + return_sentence_level_hidden: false # if return sentence representation to decoder module + + decoder: + _model_target_: model.decoder.StackPropagationDecoder # decoder name + interaction: + _model_target_: model.decoder.interaction.StackInteraction # interaction module name + differentiable: false # interaction module config + + intent_classifier: + _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier # intent classifier module name + layer_num: 1 + bidirectional: false + hidden_dim: 64 + force_ratio: 0.9 # teacher-force ratio + embedding_dim: 8 # intent embedding dim + ignore_index: -100 # ignore index to compute loss and metric + dropout_rate: 0.5 + mode: "token-level-intent" # decode mode, support [token-level-intent, intent, slot] + use_multi: "{base.multi_intent}" + return_sentence_level: true # whether to return sentence level prediction as decoded input + + slot_classifier: + _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier + layer_num: 1 + bidirectional: false + force_ratio: 0.9 + hidden_dim: 64 + embedding_dim: 32 + ignore_index: -100 + dropout_rate: 0.5 + mode: "slot" + use_multi: false + return_sentence_level: false +``` + +## Implementing a New Model + +### 1. Interaction Re-Implement +Here we take `DCA-Net` as an example: + +In most cases, you just need to rewrite `Interaction` module: + +```python +from common.utils import HiddenData +from model.decoder.interaction import BaseInteraction +class DCANetInteraction(BaseInteraction): + def __init__(self, **config): + super().__init__(**config) + self.T_block1 = I_S_Block(self.config["output_dim"], self.config["attention_dropout"], self.config["num_attention_heads"]) + ... + + def forward(self, encode_hidden: HiddenData, **kwargs): + ... +``` + +and then you should configure your module: +```yaml +base: + ... + +optimizer: + ... + +scheduler: + ... + +model: + _model_target_: model.OpenSLUModel + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: lstm + + embedding: + load_embedding_name: glove.6B.300d.txt + embedding_dim: 300 + dropout_rate: 0.5 + + lstm: + dropout_rate: 0.5 + output_dim: 128 + layer_num: 2 + bidirectional: true + output_dim: "{model.encoder.lstm.output_dim}" + return_with_input: true + return_sentence_level_hidden: false + + decoder: + _model_target_: model.decoder.DCANetDecoder + interaction: + _model_target_: model.decoder.interaction.DCANetInteraction + output_dim: "{model.encoder.output_dim}" + attention_dropout: 0.5 + num_attention_heads: 8 + + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + input_dim: "{model.decoder.output_dim.output_dim}" + ignore_index: -100 + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + input_dim: "{model.decoder.output_dim.output_dim}" + ignore_index: -100 +``` + +Oops, you finish all model construction. You can run script as follows to train model: +```shell +python run.py -cp config/dca_net.yaml [-ds atis] +``` +### 2. Decoder Re-Implement +Sometimes, `interaction then classification` order can not meet your needs. Therefore, you should simply rewrite decoder for flexible interaction order: + +Here, we take `stack-propagation` as an example: +1. We should rewrite interaction module for `stack-propagation` +```python +from common.utils import ClassifierOutputData, HiddenData +from model.decoder.interaction.base_interaction import BaseInteraction +class StackInteraction(BaseInteraction): + def __init__(self, **config): + super().__init__(**config) + ... + + def forward(self, intent_output: ClassifierOutputData, encode_hidden: HiddenData): + ... +``` +2. We should rewrite `StackPropagationDecoder` for stack-propagation interaction order: +```python +from common.utils import HiddenData, OutputData +class StackPropagationDecoder(BaseDecoder): + + def forward(self, hidden: HiddenData): + pred_intent = self.intent_classifier(hidden) + hidden = self.interaction(pred_intent, hidden) + pred_slot = self.slot_classifier(hidden) + return OutputData(pred_intent, pred_slot) +``` + +3. Then we can easily combine general model by `config/stack-propagation.yaml` configuration file: +```yaml +base: + ... + +... + +model: + _model_target_: model.OpenSLUModel + + encoder: + ... + + decoder: + _model_target_: model.decoder.StackPropagationDecoder + interaction: + _model_target_: model.decoder.interaction.StackInteraction + differentiable: false + + intent_classifier: + _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier + ... # parameters needed __init__(*) + mode: "token-level-intent" + use_multi: false + return_sentence_level: true + + slot_classifier: + _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier + ... # parameters needed __init__(*) + mode: "slot" + use_multi: false + return_sentence_level: false +``` +4. You can run script as follows to train model: +```shell +python run.py -cp config/stack-propagation.yaml +``` + + + diff --git a/config/app.yaml b/config/app.yaml index 5c07dd767e16596eb39fa42859b7b72d2a34f3a4..d9ecb57aa556caa885125c4b11e8aac67ceaa6b4 100644 --- a/config/app.yaml +++ b/config/app.yaml @@ -1,109 +1,6 @@ -device: "NVIDIA GeForce RTX 2080 Ti" - host: 127.0.0.1 port: 7860 is_push_to_public: false save-path: save/stack/outputs.jsonl -page-size: 2 - -base: - name: "OpenSLUv1" - train: false - test: false - device: cpu - ckpt_path: null - seed: 42 - best_key: EMA - epoch_num: 300 - batch_size: 16 - eval_by_epoch: true - model_dir: save/stack - template: application.html -accelerator: - use_accelerator: false - -dataset: - dataset_name: atis - -metric: - - intent_acc - - slot_f1 - - EMA - -tokenizer: - _tokenizer_name_: word_tokenizer - _padding_side_: right - _align_mode_: fast - _to_lower_case_: true - add_special_tokens: false - max_length: 512 - -optimizer: - _model_target_: torch.optim.Adam - _model_partial_: true - lr: 0.001 - weight_decay: 1e-6 - -scheduler: - _model_target_: transformers.get_scheduler - _model_partial_: true - name : "linear" - num_warmup_steps: 0 - -model: - _model_target_: model.OpenSLUModel - - encoder: - _model_target_: model.encoder.AutoEncoder - encoder_name: self-attention-lstm - - embedding: - embedding_dim: 256 - dropout_rate: 0.55 - - lstm: - layer_num: 1 - bidirectional: true - output_dim: 256 - dropout_rate: 0.5 - - attention: - hidden_dim: 1024 - output_dim: 128 - dropout_rate: 0.6 - - return_with_input: true - return_sentence_level_hidden: false - - decoder: - _model_target_: model.decoder.StackPropagationDecoder - interaction: - _model_target_: model.decoder.interaction.StackInteraction - differentiable: false - - intent_classifier: - _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier - layer_num: 1 - bidirectional: false - force_ratio: 0.9 - hidden_dim: 64 - embedding_dim: 8 - ignore_index: -100 - dropout_rate: 0.5 - mode: "token-level-intent" - use_multi: false - return_sentence_level: true - - slot_classifier: - _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier - layer_num: 1 - bidirectional: false - force_ratio: 0.9 - hidden_dim: 64 - embedding_dim: 32 - ignore_index: -100 - dropout_rate: 0.55 - mode: "slot" - use_multi: false - return_sentence_level: false \ No newline at end of file +page-size: 2 \ No newline at end of file diff --git a/config/decoder/interaction/stack-propagation.yaml b/config/decoder/interaction/stack-propagation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e488a13bab125a850bc6220f1780d4a4c569724 --- /dev/null +++ b/config/decoder/interaction/stack-propagation.yaml @@ -0,0 +1 @@ +differentiable: false \ No newline at end of file diff --git a/config/examples/README.md b/config/examples/README.md new file mode 100644 index 0000000000000000000000000000000000000000..aec8ce8006690c161333a3100dde4c1b7dab2cb5 --- /dev/null +++ b/config/examples/README.md @@ -0,0 +1,38 @@ +# Examples + +Here we introduce some usage of our famework by configuration. + +## Reload to train + +Firstly, you can run this script to train a `joint-bert` model: +```shell +python run.py -cp config/examples/normal.yaml +``` + +and you can use `kill` or `Ctrl+C` to kill the training process. + +Then, to reload model and continue training, you can run `reload_to_train.yaml` to reload checkpoint and training state. +```shell +python run.py -cp config/examples/reload_to_train.yaml +``` + +The main difference in `reload_to_train.yaml` is the `model_manager` configuration item: +```yaml +... +model_manager: + load_train_state: True # set to True + load_dir: save/joint_bert # not null + ... +... +``` + +## Load from Pre-finetuned model. +We upload all models to [LightChen2333](https://huggingface.co/LightChen2333). You can load those model by simple configuration. +In `from_pretrained.yaml` and `from_pretrained_multi.yaml`, we show two example scripts to load from hugging face in single- and multi-intent, respectively. The key configuration items are as below: +```yaml +tokenizer: + _from_pretrained_: "'LightChen2333/agif-slu-' + '{dataset.dataset_name}'" # Support simple calculation script + +model: + _from_pretrained_: "'LightChen2333/agif-slu-' + '{dataset.dataset_name}'" +``` diff --git a/config/examples/from_pretrained.yaml b/config/examples/from_pretrained.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cb83129cac0333cf492bfea3a4ea4c0a96ac7c6d --- /dev/null +++ b/config/examples/from_pretrained.yaml @@ -0,0 +1,53 @@ +device: "NVIDIA GeForce RTX 2080 Ti" + +base: + name: "OpenSLUv1" + train: false + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 16 + +logger: + logger_type: wandb # wandb is supported both in single- multi-GPU, tensorboard is only supported in multi-GPU, and fitlog is only supported in single-GPU + +model_manager: + load_dir: null + save_dir: save/joint_bert + save_mode: save-by-eval # save-by-step + # save_step: 100 + max_save_num: 1 + +accelerator: + use_accelerator: false + +dataset: + dataset_name: atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +tokenizer: + _from_pretrained_: "'LightChen2333/joint-bert-slu-' + '{dataset.dataset_name}'" + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _from_pretrained_: "'LightChen2333/joint-bert-slu-' + '{dataset.dataset_name}'" \ No newline at end of file diff --git a/config/examples/from_pretrained_multi.yaml b/config/examples/from_pretrained_multi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dbdb0f1a791193f60f3b60fc92696d2a52ee77dd --- /dev/null +++ b/config/examples/from_pretrained_multi.yaml @@ -0,0 +1,55 @@ +device: "NVIDIA GeForce RTX 2080 Ti" + +base: + name: "OpenSLUv1" + multi_intent: true + train: false + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 16 + + +logger: + logger_type: wandb # wandb is supported both in single- multi-GPU, tensorboard is only supported in multi-GPU, and fitlog is only supported in single-GPU + +model_manager: + load_dir: null + save_dir: save/joint_bert + save_mode: save-by-eval # save-by-step + # save_step: 100 + max_save_num: 1 + +accelerator: + use_accelerator: false + +dataset: + dataset_name: atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +tokenizer: + _from_pretrained_: "'LightChen2333/agif-slu-' + '{dataset.dataset_name}'" + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _from_pretrained_: "'LightChen2333/agif-slu-' + '{dataset.dataset_name}'" \ No newline at end of file diff --git a/config/examples/normal.yaml b/config/examples/normal.yaml new file mode 100644 index 0000000000000000000000000000000000000000..27aa91d3683e94d7bb1b9f44787955b6860be185 --- /dev/null +++ b/config/examples/normal.yaml @@ -0,0 +1,70 @@ +device: "Tesla V100-SXM2-16GB" + +base: + name: "OpenSLU-test" + train: True + test: True + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 128 + +model_manager: + load_dir: null + save_dir: save/joint_bert + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: atis + +tokenizer: + _tokenizer_name_: bert-base-uncased + _padding_side_: right + _align_mode_: general + add_special_tokens: true + +optimizer: + _model_target_: torch.optim.AdamW + _model_partial_: true + lr: 4e-6 + weight_decay: 1e-8 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.open_slu_model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: bert-base-uncased + output_dim: 768 + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.base_decoder.BaseDecoder + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/examples/reload_to_train.yaml b/config/examples/reload_to_train.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52b11f1c9178c8d69927a8557fd053fd81fba617 --- /dev/null +++ b/config/examples/reload_to_train.yaml @@ -0,0 +1,71 @@ +device: "Tesla V100-SXM2-16GB" + +base: + name: "OpenSLU-test" + train: True + test: True + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 128 + +model_manager: + load_train_state: True + load_dir: save/joint_bert + save_dir: save/joint_bert + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: atis + +tokenizer: + _tokenizer_name_: bert-base-uncased + _padding_side_: right + _align_mode_: general + add_special_tokens: true + +optimizer: + _model_target_: torch.optim.AdamW + _model_partial_: true + lr: 4e-6 + weight_decay: 1e-8 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.open_slu_model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: bert-base-uncased + output_dim: 768 + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.base_decoder.BaseDecoder + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/atis/bi-model.yaml b/config/reproduction/atis/bi-model.yaml new file mode 100644 index 0000000000000000000000000000000000000000..02e0a5cd430695a43c7264b41b3eb3a8cb1d1ecc --- /dev/null +++ b/config/reproduction/atis/bi-model.yaml @@ -0,0 +1,106 @@ +device: "NVIDIA GeForce RTX 2080 Ti" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 16 + +model_manager: + load_dir: null + save_dir: save/bi-model-atis + +accelerator: + use_accelerator: false + +dataset: + dataset_name: atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + + encoder: + _model_target_: model.encoder.BiEncoder + intent_encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: lstm + + embedding: + embedding_dim: 256 + dropout_rate: 0.4 + + lstm: + dropout_rate: 0.5 + output_dim: 256 + layer_num: 2 + bidirectional: true + + return_with_input: true + return_sentence_level_hidden: false + + slot_encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: lstm + + embedding: + embedding_dim: 256 + dropout_rate: 0.4 + + lstm: + dropout_rate: 0.5 + output_dim: 256 + layer_num: 2 + bidirectional: true + + return_with_input: true + return_sentence_level_hidden: false + + decoder: + _model_target_: model.decoder.BaseDecoder +# teacher_forcing: true + interaction: + _model_target_: model.decoder.interaction.BiModelInteraction + output_dim: 256 + dropout_rate: 0.4 + + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/atis/dca-net.yaml b/config/reproduction/atis/dca-net.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ad58227493734467ba3a34bec44b49b9d0d362ab --- /dev/null +++ b/config/reproduction/atis/dca-net.yaml @@ -0,0 +1,88 @@ +device: "Tesla P100-PCIE-16GB" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 16 + +model_manager: + load_dir: null + save_dir: save/dca-net-atis + +accelerator: + use_accelerator: false + +dataset: + dataset_name: atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: lstm + + embedding: + load_embedding_name: glove.6B.300d.txt + embedding_dim: 300 + dropout_rate: 0.5 + + lstm: + dropout_rate: 0.5 + output_dim: 128 + layer_num: 2 + bidirectional: true + output_dim: "{model.encoder.lstm.output_dim}" + return_with_input: true + return_sentence_level_hidden: false + + decoder: + _model_target_: model.decoder.DCANetDecoder + interaction: + _model_target_: model.decoder.interaction.DCANetInteraction + output_dim: "{model.encoder.output_dim}" + attention_dropout: 0.5 + num_attention_heads: 8 + + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + input_dim: "{model.encoder.output_dim}" + ignore_index: -100 + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + input_dim: "{model.encoder.output_dim}" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/atis/deberta.yaml b/config/reproduction/atis/deberta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c9f6532787ba5e1e4d44e0a7bd2dbe32b0b73a60 --- /dev/null +++ b/config/reproduction/atis/deberta.yaml @@ -0,0 +1,67 @@ +device: "Tesla V100-SXM2-16GB" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 32 + +model_manager: + load_dir: null + save_dir: save/deberta-atis + +dataset: + dataset_name: atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +tokenizer: + _tokenizer_name_: microsoft/deberta-v3-base + _padding_side_: right + add_special_tokens: true + max_length: 512 + +optimizer: + _model_target_: torch.optim.AdamW + _model_partial_: true + lr: 2e-5 + weight_decay: 1e-8 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.open_slu_model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: microsoft/deberta-v3-base + output_dim: 768 + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.base_decoder.BaseDecoder + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/atis/electra.yaml b/config/reproduction/atis/electra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0273298d5c38219bb425d36d13887ad134c16b62 --- /dev/null +++ b/config/reproduction/atis/electra.yaml @@ -0,0 +1,67 @@ +device: "Tesla V100-SXM2-16GB" + +base: + name: "OpenSLUv1" + train: True + test: True + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 32 + +model_manager: + load_dir: null + save_dir: save/electra-atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +dataset: + dataset_name: atis + +tokenizer: + _tokenizer_name_: google/electra-small-discriminator + _padding_side_: right + add_special_tokens: true + max_length: 512 + +optimizer: + _model_target_: torch.optim.AdamW + _model_partial_: true + lr: 2e-5 + weight_decay: 1e-8 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.open_slu_model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: google/electra-small-discriminator + output_dim: 256 + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.base_decoder.BaseDecoder + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/atis/joint-bert.yaml b/config/reproduction/atis/joint-bert.yaml new file mode 100644 index 0000000000000000000000000000000000000000..87f6b2b5783d68f38e82833395ec1d1fbca54765 --- /dev/null +++ b/config/reproduction/atis/joint-bert.yaml @@ -0,0 +1,70 @@ +device: "Tesla V100-SXM2-16GB" + +base: + name: "OpenSLUv1" + train: True + test: True + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 128 + +model_manager: + load_dir: null + save_dir: save/joint-bert-atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: atis + +tokenizer: + _tokenizer_name_: bert-base-uncased + _padding_side_: right + _align_mode_: general + add_special_tokens: true + +optimizer: + _model_target_: torch.optim.AdamW + _model_partial_: true + lr: 4e-6 + weight_decay: 1e-8 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.open_slu_model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: bert-base-uncased + output_dim: 768 + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.base_decoder.BaseDecoder + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/atis/roberta.yaml b/config/reproduction/atis/roberta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f68f97ad30263daa0d2ac046cef8fe9237c203a9 --- /dev/null +++ b/config/reproduction/atis/roberta.yaml @@ -0,0 +1,70 @@ +device: "Tesla V100-SXM2-16GB" #Useless info + +base: + name: "OpenSLUv1" + train: True + test: True + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 32 + +model_manager: + load_dir: null + save_dir: save/roberta-atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: atis + +tokenizer: + _tokenizer_name_: roberta-base + _padding_side_: right + add_special_tokens: true + max_length: 512 + +optimizer: + _model_target_: torch.optim.AdamW + _model_partial_: true + lr: 2e-5 + weight_decay: 1e-8 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.open_slu_model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: roberta-base + output_dim: 768 + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.base_decoder.BaseDecoder + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/atis/slot-gated.yaml b/config/reproduction/atis/slot-gated.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cd58f74119fd9723bbb10e77fd6a070ba6a70552 --- /dev/null +++ b/config/reproduction/atis/slot-gated.yaml @@ -0,0 +1,87 @@ +device: "NVIDIA GeForce RTX 2080 Ti" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 16 + +model_manager: + load_dir: null + save_dir: save/slot-gated-atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: atis + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: lstm + + embedding: + embedding_dim: 256 + dropout_rate: 0.4 + + lstm: + dropout_rate: 0.5 + output_dim: 256 + layer_num: 2 + bidirectional: true + + return_with_input: true + return_sentence_level_hidden: false + + decoder: + _model_target_: model.decoder.BaseDecoder + + interaction: + _model_target_: model.decoder.interaction.SlotGatedInteraction + remove_slot_attn: false + output_dim: 256 + dropout_rate: 0.4 + + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/atis/stack-propagation.yaml b/config/reproduction/atis/stack-propagation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f5041e664215dbbcca1f83fade44aba453dc6333 --- /dev/null +++ b/config/reproduction/atis/stack-propagation.yaml @@ -0,0 +1,109 @@ +device: "NVIDIA GeForce RTX 2080 Ti" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 16 + +model_manager: + load_dir: null + save_dir: save/stack-propagation-atis + save_mode: save-by-eval # save-by-step + # save_step: 100 + max_save_num: 1 + +accelerator: + use_accelerator: false + +dataset: + dataset_name: atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + _to_lower_case_: true + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: self-attention-lstm + + embedding: + embedding_dim: 256 + dropout_rate: 0.55 + + lstm: + layer_num: 1 + bidirectional: true + output_dim: 256 + dropout_rate: 0.5 + + attention: + hidden_dim: 1024 + output_dim: 128 + dropout_rate: 0.6 + + return_with_input: true + return_sentence_level_hidden: false + + decoder: + _model_target_: model.decoder.StackPropagationDecoder + interaction: + _model_target_: model.decoder.interaction.StackInteraction + differentiable: false + + intent_classifier: + _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier + layer_num: 1 + bidirectional: false + force_ratio: 0.9 + hidden_dim: 64 + embedding_dim: 8 + ignore_index: -100 + dropout_rate: 0.5 + mode: "token-level-intent" + use_multi: false + return_sentence_level: true + + slot_classifier: + _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier + layer_num: 1 + bidirectional: false + force_ratio: 0.9 + hidden_dim: 64 + embedding_dim: 32 + ignore_index: -100 + dropout_rate: 0.55 + mode: "slot" + use_multi: false + return_sentence_level: false \ No newline at end of file diff --git a/config/reproduction/mix-atis/agif.yaml b/config/reproduction/mix-atis/agif.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8f13e65dc1bb42319aa7120dffbe36b0e9951257 --- /dev/null +++ b/config/reproduction/mix-atis/agif.yaml @@ -0,0 +1,133 @@ +device: "NVIDIA GeForce RTX 3080" + +base: + name: "OpenSLUv1" + multi_intent: true + train: true + test: true + device: cuda + seed: 42 + epoch_num: 100 + batch_size: 32 + ignore_index: -100 + +model_manager: + load_dir: null + save_dir: save/agif-mix-atis + +accelerator: + use_accelerator: false + +dataset: + dataset_name: mix-atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - intent_f1 + - slot_f1 + - EMA + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: self-attention-lstm + + embedding: + embedding_dim: 128 + dropout_rate: 0.4 + + lstm: + layer_num: 1 + bidirectional: true + output_dim: 256 + dropout_rate: 0.4 + + attention: + hidden_dim: 1024 + output_dim: 128 + dropout_rate: 0.4 + + unflat_attention: + dropout_rate: 0.4 + output_dim: "{model.encoder.lstm.output_dim} + {model.encoder.attention.output_dim}" + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.AGIFDecoder +# teacher_forcing: true + interaction: + _model_target_: model.decoder.interaction.AGIFInteraction + intent_embedding_dim: 128 + input_dim: "{model.encoder.output_dim}" + hidden_dim: 128 + output_dim: "{model.decoder.interaction.intent_embedding_dim}" + dropout_rate: 0.4 + alpha: 0.2 + num_heads: 4 + num_layers: 2 + row_normalized: true + + intent_classifier: + _model_target_: model.decoder.classifier.MLPClassifier + mode: "intent" + mlp: + - _model_target_: torch.nn.Linear + in_features: "{model.encoder.output_dim}" + out_features: 256 + - _model_target_: torch.nn.LeakyReLU + negative_slope: 0.2 + - _model_target_: torch.nn.Linear + in_features: 256 + out_features: "{base.intent_label_num}" + dropout_rate: 0.4 + loss_fn: + _model_target_: torch.nn.BCEWithLogitsLoss + use_multi: "{base.multi_intent}" + multi_threshold: 0.5 + return_sentence_level: true + ignore_index: -100 + weight: 0.3 + + slot_classifier: + _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier + mode: "slot" + input_dim: "{model.encoder.output_dim}" + layer_num: 1 + bidirectional: false + force_ratio: 0.9 + hidden_dim: "{model.decoder.interaction.intent_embedding_dim}" + embedding_dim: 128 +# loss_fn: +# _model_target_: torch.nn.NLLLoss + ignore_index: -100 + dropout_rate: 0.4 + use_multi: false + multi_threshold: 0.5 + return_sentence_level: false + weight: 0.7 \ No newline at end of file diff --git a/config/reproduction/mix-atis/gl-gin.yaml b/config/reproduction/mix-atis/gl-gin.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a52a049c98e9dcdb068815d18ba6e3441dec343 --- /dev/null +++ b/config/reproduction/mix-atis/gl-gin.yaml @@ -0,0 +1,128 @@ +device: "Tesla V100-SXM2-16GB" + +base: + name: "OpenSLUv1" + multi_intent: true + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 32 + ignore_index: -100 + +model_manager: + load_dir: null + save_dir: save/gl-gin-mix-atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - intent_f1 + - slot_f1 + - EMA + +dataset: + dataset_name: mix-atis + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: self-attention-lstm + + embedding: + embedding_dim: 128 + dropout_rate: 0.4 + + lstm: + layer_num: 1 + bidirectional: true + output_dim: 256 + dropout_rate: 0.4 + + attention: + hidden_dim: 1024 + output_dim: 128 + dropout_rate: 0.4 + output_dim: "{model.encoder.lstm.output_dim} + {model.encoder.attention.output_dim}" + return_with_input: true + return_sentence_level_hidden: false + + decoder: + _model_target_: model.decoder.GLGINDecoder + dropout_rate: 0.4 + interaction: + _model_target_: model.decoder.interaction.GLGINInteraction + intent_embedding_dim: 64 + input_dim: "{model.encoder.output_dim}" + hidden_dim: 256 + output_dim: "{model.decoder.interaction.intent_embedding_dim}" + dropout_rate: 0.4 + alpha: 0.2 + num_heads: 8 + num_layers: 2 + row_normalized: true + slot_graph_window: 1 + intent_label_num: "{base.intent_label_num}" + + intent_classifier: + _model_target_: model.decoder.classifier.MLPClassifier + mode: "token-level-intent" + mlp: + - _model_target_: torch.nn.Linear + in_features: "{model.encoder.output_dim}" + out_features: 256 + - _model_target_: torch.nn.LeakyReLU + negative_slope: 0.2 + - _model_target_: torch.nn.Linear + in_features: 256 + out_features: "{base.intent_label_num}" + loss_fn: + _model_target_: torch.nn.BCEWithLogitsLoss + dropout_rate: 0.4 + use_multi: "{base.multi_intent}" + multi_threshold: 0.5 + return_sentence_level: true + ignore_index: "{base.ignore_index}" + + slot_classifier: + _model_target_: model.decoder.classifier.MLPClassifier + mode: "slot" + mlp: + - _model_target_: torch.nn.Linear + in_features: "{model.decoder.interaction.output_dim}" + out_features: "{model.decoder.interaction.output_dim}" + - _model_target_: torch.nn.LeakyReLU + negative_slope: 0.2 + - _model_target_: torch.nn.Linear + in_features: "{model.decoder.interaction.output_dim}" + out_features: "{base.slot_label_num}" + ignore_index: "{base.ignore_index}" + dropout_rate: 0.4 + use_multi: false + multi_threshold: 0.5 + return_sentence_level: false \ No newline at end of file diff --git a/config/reproduction/mix-atis/vanilla.yaml b/config/reproduction/mix-atis/vanilla.yaml new file mode 100644 index 0000000000000000000000000000000000000000..36ee8ed2b2b34133e0f4a6273617fade021ef03b --- /dev/null +++ b/config/reproduction/mix-atis/vanilla.yaml @@ -0,0 +1,95 @@ +base: + name: "OpenSLUv1" + multi_intent: true + train: true + test: true + device: cuda + seed: 42 + epoch_num: 100 + batch_size: 16 + ignore_index: -100 + +model_manager: + load_dir: null + save_dir: save/vanilla-mix-atis + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - intent_f1 + - slot_f1 + - EMA + +dataset: + dataset_name: atis + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: self-attention-lstm + + embedding: + embedding_dim: 128 + dropout_rate: 0.4 + + lstm: + layer_num: 1 + bidirectional: true + output_dim: 256 + dropout_rate: 0.4 + + attention: + hidden_dim: 1024 + output_dim: 128 + dropout_rate: 0.4 + output_dim: "{model.encoder.lstm.output_dim} + {model.encoder.attention.output_dim}" + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.BaseDecoder + + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + input_dim: "{model.encoder.output_dim}" + loss_fn: + _model_target_: torch.nn.BCEWithLogitsLoss + use_multi: "{base.multi_intent}" + multi_threshold: 0.5 + return_sentence_level: true + ignore_index: "{base.ignore_index}" + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + input_dim: "{model.encoder.output_dim}" + use_multi: false + multi_threshold: 0.5 + ignore_index: "{base.ignore_index}" + return_sentence_level: false \ No newline at end of file diff --git a/config/reproduction/mix-snips/agif.yaml b/config/reproduction/mix-snips/agif.yaml new file mode 100644 index 0000000000000000000000000000000000000000..877ad78ec112b088e192b6942fb940bb2f9af988 --- /dev/null +++ b/config/reproduction/mix-snips/agif.yaml @@ -0,0 +1,131 @@ +device: "Tesla P100-PCIE-16GB" + +base: + name: "OpenSLUv1" + multi_intent: true + train: true + test: true + device: cuda + seed: 42 + epoch_num: 50 + batch_size: 64 + ignore_index: -100 + +model_manager: + load_dir: null + save_dir: save/agif-mix-snips + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - intent_f1 + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: mix-snips + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: self-attention-lstm + + embedding: + embedding_dim: 128 + dropout_rate: 0.4 + + lstm: + layer_num: 1 + bidirectional: true + output_dim: 256 + dropout_rate: 0.4 + + attention: + hidden_dim: 1024 + output_dim: 128 + dropout_rate: 0.4 + + unflat_attention: + dropout_rate: 0.4 + output_dim: "{model.encoder.lstm.output_dim} + {model.encoder.attention.output_dim}" + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.AGIFDecoder +# teacher_forcing: true + interaction: + _model_target_: model.decoder.interaction.AGIFInteraction + intent_embedding_dim: 128 + input_dim: "{model.encoder.output_dim}" + hidden_dim: 128 + output_dim: "{model.decoder.interaction.intent_embedding_dim}" + dropout_rate: 0.4 + alpha: 0.2 + num_heads: 4 + num_layers: 2 + row_normalized: true + + intent_classifier: + _model_target_: model.decoder.classifier.MLPClassifier + mode: "intent" + mlp: + - _model_target_: torch.nn.Linear + in_features: "{model.encoder.output_dim}" + out_features: 256 + - _model_target_: torch.nn.LeakyReLU + negative_slope: 0.2 + - _model_target_: torch.nn.Linear + in_features: 256 + out_features: "{base.intent_label_num}" + dropout_rate: 0.4 + loss_fn: + _model_target_: torch.nn.BCEWithLogitsLoss + use_multi: "{base.multi_intent}" + multi_threshold: 0.5 + return_sentence_level: true + ignore_index: -100 + weight: 0.3 + + slot_classifier: + _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier + mode: "slot" + input_dim: "{model.encoder.output_dim}" + layer_num: 1 + bidirectional: false + force_ratio: 0.9 + hidden_dim: "{model.decoder.interaction.intent_embedding_dim}" + embedding_dim: 128 + ignore_index: -100 + dropout_rate: 0.4 + use_multi: false + multi_threshold: 0.5 + return_sentence_level: false + weight: 0.7 \ No newline at end of file diff --git a/config/reproduction/mix-snips/gl-gin.yaml b/config/reproduction/mix-snips/gl-gin.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4f90eec35696f1ba54ef2eb44705c5393db65e1f --- /dev/null +++ b/config/reproduction/mix-snips/gl-gin.yaml @@ -0,0 +1,131 @@ +device: "NVIDIA GeForce RTX 2080 Ti" + +base: + name: "OpenSLUv1" + multi_intent: true + train: true + test: true + device: cuda + seed: 42 + epoch_num: 50 + batch_size: 32 + ignore_index: -100 + + +model_manager: + load_dir: null + save_dir: save/gl-gin-mix-snips + +evaluator: + best_key: EMA + eval_by_epoch: false + eval_step: 1800 + metric: + - intent_acc + - intent_f1 + - slot_f1 + - EMA + +dataset: + dataset_name: mix-snips + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: self-attention-lstm + + embedding: + embedding_dim: 128 + dropout_rate: 0.4 + + lstm: + layer_num: 2 + bidirectional: true + output_dim: 256 + dropout_rate: 0.4 + + attention: + hidden_dim: 1024 + output_dim: 128 + dropout_rate: 0.4 + output_dim: "{model.encoder.lstm.output_dim} + {model.encoder.attention.output_dim}" + return_with_input: true + return_sentence_level_hidden: false + + decoder: + _model_target_: model.decoder.GLGINDecoder + dropout_rate: 0.4 + interaction: + _model_target_: model.decoder.interaction.GLGINInteraction + intent_embedding_dim: 256 + input_dim: "{model.encoder.output_dim}" + hidden_dim: 256 + output_dim: "{model.decoder.interaction.intent_embedding_dim}" + dropout_rate: 0.4 + alpha: 0.2 + num_heads: 4 + num_layers: 2 + row_normalized: true + slot_graph_window: 1 + intent_label_num: "{base.intent_label_num}" + + intent_classifier: + _model_target_: model.decoder.classifier.MLPClassifier + mode: "token-level-intent" + mlp: + - _model_target_: torch.nn.Linear + in_features: "{model.encoder.output_dim}" + out_features: 256 + - _model_target_: torch.nn.LeakyReLU + negative_slope: 0.2 + - _model_target_: torch.nn.Linear + in_features: 256 + out_features: "{base.intent_label_num}" + loss_fn: + _model_target_: torch.nn.BCEWithLogitsLoss + dropout_rate: 0.4 + use_multi: "{base.multi_intent}" + multi_threshold: 0.5 + return_sentence_level: true + ignore_index: "{base.ignore_index}" + weight: 0.2 + + slot_classifier: + _model_target_: model.decoder.classifier.MLPClassifier + mode: "slot" + mlp: + - _model_target_: torch.nn.Linear + in_features: "{model.decoder.interaction.output_dim}" + out_features: "{model.decoder.interaction.output_dim}" + - _model_target_: torch.nn.LeakyReLU + negative_slope: 0.2 + - _model_target_: torch.nn.Linear + in_features: "{model.decoder.interaction.output_dim}" + out_features: "{base.slot_label_num}" + ignore_index: "{base.ignore_index}" + dropout_rate: 0.4 + use_multi: false + multi_threshold: 0.5 + weight: 0.8 + return_sentence_level: false \ No newline at end of file diff --git a/config/reproduction/mix-snips/vanilla.yaml b/config/reproduction/mix-snips/vanilla.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8a7e8738e6c34b63ed8887796926648a1a45cece --- /dev/null +++ b/config/reproduction/mix-snips/vanilla.yaml @@ -0,0 +1,95 @@ +base: + name: "OpenSLUv1" + multi_intent: true + train: true + test: true + device: cuda + seed: 42 + epoch_num: 100 + batch_size: 16 + ignore_index: -100 + +model_manager: + load_dir: null + save_dir: save/vanilla-mix-snips + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - intent_f1 + - slot_f1 + - EMA + +dataset: + dataset_name: atis + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: self-attention-lstm + + embedding: + embedding_dim: 128 + dropout_rate: 0.4 + + lstm: + layer_num: 1 + bidirectional: true + output_dim: 256 + dropout_rate: 0.4 + + attention: + hidden_dim: 1024 + output_dim: 128 + dropout_rate: 0.4 + output_dim: "{model.encoder.lstm.output_dim} + {model.encoder.attention.output_dim}" + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.BaseDecoder + + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + input_dim: "{model.encoder.output_dim}" + loss_fn: + _model_target_: torch.nn.BCEWithLogitsLoss + use_multi: "{base.multi_intent}" + multi_threshold: 0.5 + return_sentence_level: true + ignore_index: "{base.ignore_index}" + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + input_dim: "{model.encoder.output_dim}" + use_multi: false + multi_threshold: 0.5 + ignore_index: "{base.ignore_index}" + return_sentence_level: false \ No newline at end of file diff --git a/config/reproduction/snips/bi-model.yaml b/config/reproduction/snips/bi-model.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3eb192aa2063904691d9539d73833a4a005581bf --- /dev/null +++ b/config/reproduction/snips/bi-model.yaml @@ -0,0 +1,104 @@ +device: "Tesla V100-SXM2-16GB" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 16 + +model_manager: + load_dir: null + save_dir: save/bi-model-snips + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: snips + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + + encoder: + _model_target_: model.encoder.BiEncoder + intent_encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: lstm + + embedding: + embedding_dim: 256 + dropout_rate: 0.5 + + lstm: + dropout_rate: 0.5 + output_dim: 256 + layer_num: 2 + bidirectional: true + + return_with_input: true + return_sentence_level_hidden: false + + slot_encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: lstm + + embedding: + embedding_dim: 256 + dropout_rate: 0.5 + + lstm: + dropout_rate: 0.5 + output_dim: 256 + layer_num: 2 + bidirectional: true + + return_with_input: true + return_sentence_level_hidden: false + + decoder: + _model_target_: model.decoder.BaseDecoder + interaction: + _model_target_: model.decoder.interaction.BiModelInteraction + output_dim: 256 + dropout_rate: 0.5 + + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/snips/dca_net.yaml b/config/reproduction/snips/dca_net.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d42d525ba6bf74e2d457687129a3c518b814843c --- /dev/null +++ b/config/reproduction/snips/dca_net.yaml @@ -0,0 +1,88 @@ +device: "NVIDIA GeForce RTX 2080 Ti" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 16 + +model_manager: + load_dir: null + save_dir: save/dca-net-snips + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: snips + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: lstm + + embedding: + load_embedding_name: glove.6B.300d.txt + embedding_dim: 300 + dropout_rate: 0.4 + + lstm: + dropout_rate: 0.4 + output_dim: 128 + layer_num: 2 + bidirectional: true + output_dim: "{model.encoder.lstm.output_dim}" + return_with_input: true + return_sentence_level_hidden: false + + decoder: + _model_target_: model.decoder.DCANetDecoder + interaction: + _model_target_: model.decoder.interaction.DCANetInteraction + output_dim: "{model.encoder.output_dim}" + attention_dropout: 0.4 + num_attention_heads: 8 + + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + input_dim: "{model.encoder.output_dim}" + ignore_index: -100 + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + input_dim: "{model.encoder.output_dim}" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/snips/deberta.yaml b/config/reproduction/snips/deberta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3fd23a2b41754841352b315f205cde4c8588b342 --- /dev/null +++ b/config/reproduction/snips/deberta.yaml @@ -0,0 +1,70 @@ +device: "Tesla V100-SXM2-16GB" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 32 + +model_manager: + load_dir: null + save_dir: save/deberta-snips + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: snips + +tokenizer: + _tokenizer_name_: microsoft/deberta-v3-base + _padding_side_: right + add_special_tokens: true + max_length: 512 + +optimizer: + _model_target_: torch.optim.AdamW + _model_partial_: true + lr: 2e-5 + weight_decay: 1e-8 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.open_slu_model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: microsoft/deberta-v3-base + output_dim: 768 + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.base_decoder.BaseDecoder + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/snips/electra.yaml b/config/reproduction/snips/electra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c923723dfc47c3bfb4dadaf0a7735dd8723b321 --- /dev/null +++ b/config/reproduction/snips/electra.yaml @@ -0,0 +1,69 @@ +device: "Tesla V100-SXM2-16GB" +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 32 + +model_manager: + load_dir: null + save_dir: save/electra-snips + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: snips + +tokenizer: + _tokenizer_name_: google/electra-small-discriminator + _padding_side_: right + add_special_tokens: true + max_length: 512 + +optimizer: + _model_target_: torch.optim.AdamW + _model_partial_: true + lr: 2e-5 + weight_decay: 1e-8 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.open_slu_model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: google/electra-small-discriminator + output_dim: 256 + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.base_decoder.BaseDecoder + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/snips/joint-bert.yaml b/config/reproduction/snips/joint-bert.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fb0fbe2698186952e7c78fbde13620d1b7c0391e --- /dev/null +++ b/config/reproduction/snips/joint-bert.yaml @@ -0,0 +1,75 @@ +device: "Tesla V100-SXM2-16GB" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 128 + +model_manager: + load_dir: null + save_dir: save/joint-bert-snips + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: snips + +metric: + - intent_acc + - slot_f1 + - EMA + +tokenizer: + _tokenizer_name_: bert-base-uncased + _padding_side_: right + _align_mode_: general + add_special_tokens: true + +optimizer: + _model_target_: torch.optim.AdamW + _model_partial_: true + lr: 4e-6 + weight_decay: 1e-8 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.open_slu_model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: bert-base-uncased + output_dim: 768 + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.base_decoder.BaseDecoder + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/snips/roberta.yaml b/config/reproduction/snips/roberta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..965ec41ccffb10c7f8a01289aa1486ba128361e5 --- /dev/null +++ b/config/reproduction/snips/roberta.yaml @@ -0,0 +1,70 @@ +device: "Tesla V100-SXM2-16GB" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 32 + +model_manager: + load_dir: null + save_dir: save/roberta-snips + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: snips + +tokenizer: + _tokenizer_name_: roberta-base + _padding_side_: right + add_special_tokens: true + max_length: 512 + +optimizer: + _model_target_: torch.optim.AdamW + _model_partial_: true + lr: 2e-5 + weight_decay: 1e-8 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.open_slu_model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: roberta-base + output_dim: 768 + return_with_input: true + return_sentence_level_hidden: true + + decoder: + _model_target_: model.decoder.base_decoder.BaseDecoder + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/snips/slot-gated.yaml b/config/reproduction/snips/slot-gated.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52f9a06479821c42d5bf8fd2a2e144064da9dd9e --- /dev/null +++ b/config/reproduction/snips/slot-gated.yaml @@ -0,0 +1,87 @@ +device: "NVIDIA GeForce RTX 2080 Ti" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 16 + +model_manager: + load_dir: null + save_dir: save/slot-gated-snips + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: snips + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + ignore_index: -100 + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: lstm + + embedding: + embedding_dim: 256 + dropout_rate: 0.4 + + lstm: + dropout_rate: 0.5 + output_dim: 256 + layer_num: 2 + bidirectional: true + + return_with_input: true + return_sentence_level_hidden: false + + decoder: + _model_target_: model.decoder.BaseDecoder + + interaction: + _model_target_: model.decoder.interaction.SlotGatedInteraction + remove_slot_attn: false + output_dim: 256 + dropout_rate: 0.4 + + intent_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "intent" + ignore_index: -100 + + slot_classifier: + _model_target_: model.decoder.classifier.LinearClassifier + mode: "slot" + ignore_index: -100 \ No newline at end of file diff --git a/config/reproduction/snips/stack-propagation.yaml b/config/reproduction/snips/stack-propagation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0039d929b242724b07121368c65a7b18cf2a7e54 --- /dev/null +++ b/config/reproduction/snips/stack-propagation.yaml @@ -0,0 +1,105 @@ +device: "Tesla V100-SXM2-16GB" + +base: + name: "OpenSLUv1" + train: true + test: true + device: cuda + seed: 42 + epoch_num: 300 + batch_size: 16 + +model_manager: + load_dir: null + save_dir: save/stack-propagation-snips + +evaluator: + best_key: EMA + eval_by_epoch: true + # eval_step: 1800 + metric: + - intent_acc + - slot_f1 + - EMA + +accelerator: + use_accelerator: false + +dataset: + dataset_name: snips + +tokenizer: + _tokenizer_name_: word_tokenizer + _padding_side_: right + _align_mode_: fast + add_special_tokens: false + max_length: 512 + +optimizer: + _model_target_: torch.optim.Adam + _model_partial_: true + lr: 0.001 + weight_decay: 1e-6 + +scheduler: + _model_target_: transformers.get_scheduler + _model_partial_: true + name : "linear" + num_warmup_steps: 0 + +model: + _model_target_: model.OpenSLUModel + + encoder: + _model_target_: model.encoder.AutoEncoder + encoder_name: self-attention-lstm + + embedding: + embedding_dim: 256 + dropout_rate: 0.4 + + lstm: + layer_num: 1 + bidirectional: true + output_dim: 256 + dropout_rate: 0.4 + + attention: + hidden_dim: 1024 + output_dim: 128 + dropout_rate: 0.4 + + return_with_input: true + return_sentence_level_hidden: false + + decoder: + _model_target_: model.decoder.StackPropagationDecoder + interaction: + _model_target_: model.decoder.interaction.StackInteraction + differentiable: false + + intent_classifier: + _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier + layer_num: 1 + bidirectional: false + force_ratio: 0.9 + hidden_dim: 64 + embedding_dim: 8 + ignore_index: -100 + dropout_rate: 0.4 + mode: "token-level-intent" + use_multi: false + return_sentence_level: true + + slot_classifier: + _model_target_: model.decoder.classifier.AutoregressiveLSTMClassifier + layer_num: 1 + bidirectional: false + force_ratio: 0.9 + hidden_dim: 64 + embedding_dim: 32 + ignore_index: -100 + dropout_rate: 0.4 + mode: "slot" + use_multi: false + return_sentence_level: false \ No newline at end of file diff --git a/config/visual.yaml b/config/visual.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9eb80b81b14be9602ef90c81d2af0f83e56e45f5 --- /dev/null +++ b/config/visual.yaml @@ -0,0 +1,6 @@ +host: 127.0.0.1 +port: 7861 + +is_push_to_public: true +output_path: save/stack/outputs.jsonl +page-size: 2 \ No newline at end of file diff --git a/model/decoder/base_decoder.py b/model/decoder/base_decoder.py index a8a1f8b48b0371ef7f1278194b716d91dcbb82c1..2c8ae3baffea61879c8ca912134cb9ac02c4a82f 100644 --- a/model/decoder/base_decoder.py +++ b/model/decoder/base_decoder.py @@ -16,7 +16,7 @@ class BaseDecoder(nn.Module): Notice: t is often only necessary to change this module and its sub-modules """ - def __init__(self, intent_classifier, slot_classifier, interaction=None): + def __init__(self, intent_classifier=None, slot_classifier=None, interaction=None): super().__init__() self.intent_classifier = intent_classifier self.slot_classifier = slot_classifier @@ -33,7 +33,13 @@ class BaseDecoder(nn.Module): """ if self.interaction is not None: hidden = self.interaction(hidden) - return OutputData(self.intent_classifier(hidden), self.slot_classifier(hidden)) + intent = None + slot = None + if self.intent_classifier is not None: + intent = self.intent_classifier(hidden) + if self.slot_classifier is not None: + slot = self.slot_classifier(hidden) + return OutputData(intent, slot) def decode(self, output: OutputData, target: InputData = None): """decode output logits @@ -45,7 +51,12 @@ class BaseDecoder(nn.Module): Returns: List: decoded sequence ids """ - return OutputData(self.intent_classifier.decode(output, target), self.slot_classifier.decode(output, target)) + intent, slot = None, None + if self.intent_classifier is not None: + intent = self.intent_classifier.decode(output, target) + if self.slot_classifier is not None: + slot = self.slot_classifier.decode(output, target) + return OutputData(intent, slot) def compute_loss(self, pred: OutputData, target: InputData, compute_intent_loss=True, compute_slot_loss=True): """compute loss. @@ -60,16 +71,18 @@ class BaseDecoder(nn.Module): Returns: Tensor: loss result """ - intent_loss = self.intent_classifier.compute_loss(pred, target) if compute_intent_loss else None - slot_loss = self.slot_classifier.compute_loss(pred, target) if compute_slot_loss else None - slot_weight = self.slot_classifier.config.get("weight") - slot_weight = slot_weight if slot_weight is not None else 1. - intent_weight = self.intent_classifier.config.get("weight") - intent_weight = intent_weight if intent_weight is not None else 1. loss = 0 - if intent_loss is not None: + intent_loss = None + slot_loss = None + if self.intent_classifier is not None: + intent_loss = self.intent_classifier.compute_loss(pred, target) if compute_intent_loss else None + intent_weight = self.intent_classifier.config.get("weight") + intent_weight = intent_weight if intent_weight is not None else 1. loss += intent_loss * intent_weight - if slot_loss is not None: + if self.slot_classifier is not None: + slot_loss = self.slot_classifier.compute_loss(pred, target) if compute_slot_loss else None + slot_weight = self.slot_classifier.config.get("weight") + slot_weight = slot_weight if slot_weight is not None else 1. loss += slot_loss * slot_weight return loss, intent_loss, slot_loss diff --git a/model/encoder/auto_encoder.py b/model/encoder/auto_encoder.py index 174e9eb7cde8d6845f602bb5b4c3e10bd7890d46..9c15f4c7f341794e68a1882704fe181b0e58f824 100644 --- a/model/encoder/auto_encoder.py +++ b/model/encoder/auto_encoder.py @@ -2,7 +2,7 @@ Author: Qiguang Chen Date: 2023-01-11 10:39:26 LastEditors: Qiguang Chen -LastEditTime: 2023-01-26 17:46:10 +LastEditTime: 2023-02-18 19:33:34 Description: ''' diff --git a/model/encoder/non_pretrained_encoder.py b/model/encoder/non_pretrained_encoder.py index 4370dcc0d8dc01f9e416a7bc87b207026ebc4277..f842b4bd2ca8c5e6eb9001a03e5f46ec98650e37 100644 --- a/model/encoder/non_pretrained_encoder.py +++ b/model/encoder/non_pretrained_encoder.py @@ -2,7 +2,7 @@ Author: Qiguang Chen Date: 2023-01-11 10:39:26 LastEditors: Qiguang Chen -LastEditTime: 2023-01-30 15:00:29 +LastEditTime: 2023-02-17 21:08:19 Description: non-pretrained encoder model ''' @@ -50,7 +50,7 @@ class NonPretrainedEncoder(BaseEncoder): # Embedding Initialization embed_config = config["embedding"] self.__embedding_dim = embed_config["embedding_dim"] - if embed_config.get("load_embedding_name"): + if embed_config.get("load_embedding_name") and embed_config.get("embedding_matrix"): self.__embedding_layer = nn.Embedding.from_pretrained(embed_config["embedding_matrix"], padding_idx=0) else: self.__embedding_layer = nn.Embedding( diff --git a/model/encoder/pretrained_encoder.py b/model/encoder/pretrained_encoder.py index 448fa31fbfb600b078185c1e386d009de1e5e68c..3003ce20658050f6471ae9c828c1c69c3bf6abca 100644 --- a/model/encoder/pretrained_encoder.py +++ b/model/encoder/pretrained_encoder.py @@ -2,11 +2,12 @@ Author: Qiguang Chen Date: 2023-01-11 10:39:26 LastEditors: Qiguang Chen -LastEditTime: 2023-01-26 17:18:01 +LastEditTime: 2023-02-18 17:38:30 Description: pretrained encoder model ''' -from transformers import AutoModel +from transformers import AutoModel, AutoConfig +from common import utils from common.utils import InputData, HiddenData from model.encoder.base_encoder import BaseEncoder @@ -21,7 +22,11 @@ class PretrainedEncoder(BaseEncoder): encoder_name (str): pretrained model name in hugging face. """ super().__init__(**config) - self.encoder = AutoModel.from_pretrained(config["encoder_name"]) + if self.config.get("_is_check_point_"): + self.encoder = utils.instantiate(config["pretrained_model"], target="_pretrained_model_target_") + # print(self.encoder) + else: + self.encoder = AutoModel.from_pretrained(config["encoder_name"]) def forward(self, inputs: InputData): output = self.encoder(**inputs.get_inputs()) diff --git a/requirements.txt b/requirements.txt index 32f9fe1277736dcf29c529419322fc1018cc1b6f..da5624771ffd3f5386c2e5b6d3b32ebd5b0b074d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,10 @@ accelerate==0.13.2 dill==0.3.6 einops==0.6.0 wandb==0.13.8 -scikit-learn +scikit-learn==1.2.0 pytorch-crf==0.7.2 -ordered-set==4.1.0 \ No newline at end of file +ordered-set==4.1.0 +gradio==3.16.2 +flask==2.2.2 +datasets==2.8.0 +colorlog==6.7.0 \ No newline at end of file diff --git a/run.py b/run.py new file mode 100644 index 0000000000000000000000000000000000000000..00c67d4b0e822fbbb41ce89969becb42f2b1bb56 --- /dev/null +++ b/run.py @@ -0,0 +1,34 @@ +''' +Author: Qiguang Chen +Date: 2023-01-11 10:39:26 +LastEditors: Qiguang Chen +LastEditTime: 2023-02-19 19:01:40 +Description: main executive file + +''' +import argparse + +from common.config import Config +from common.model_manager import ModelManager + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config_path', '-cp', type=str, default='config/stack-propagation.yaml') + parser.add_argument('--model', '-m', type=str, default=None) + parser.add_argument('--dataset', '-ds', type=str, default=None) + parser.add_argument('--device', '-dv', type=str, default=None) + parser.add_argument('--learning_rate', '-lr', type=float, default=None) + parser.add_argument('--epoch_num', '-en', type=int, default=None) + args = parser.parse_args() + config = Config.load_from_args(args) + model_manager = ModelManager(config) + model_manager.init_model() + if config.base.get("train"): + model_manager.train() + if not config.base.get("train") and config.base.get("test"): + model_manager.test() + + +if __name__ == "__main__": + main() diff --git a/static/css/style.css b/static/css/style.css new file mode 100644 index 0000000000000000000000000000000000000000..4298d946ab63c672517d16cde93535f0553f520b --- /dev/null +++ b/static/css/style.css @@ -0,0 +1,98 @@ +.card { + --phoenix-card-spacer-y: 1.5rem; + --phoenix-card-spacer-x: 1.5rem; + --phoenix-card-title-spacer-y: 1rem; + --phoenix-card-border-width: 1px; + --phoenix-card-border-color: var(--phoenix-gray-200); + --phoenix-card-border-radius: 0.5rem; + --phoenix-card-box-shadow: ; + --phoenix-card-inner-border-radius: calc(0.5rem - 1px); + --phoenix-card-cap-padding-y: 1.5rem; + --phoenix-card-cap-padding-x: 1.5rem; + --phoenix-card-cap-bg: var(--phoenix-card-cap-bg); + --phoenix-card-cap-color: ; + --phoenix-card-height: ; + --phoenix-card-color: ; + --phoenix-card-bg: #fff; + --phoenix-card-img-overlay-padding: 1rem; + --phoenix-card-group-margin: 1rem; + position: relative; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + min-width: 0; + height: var(--phoenix-card-height); + word-wrap: break-word; + background-color: var(--phoenix-card-bg); + background-clip: border-box; + border: var(--phoenix-card-border-width) solid var(--phoenix-card-border-color); + border-radius: var(--phoenix-card-border-radius); + -webkit-box-shadow: var(--phoenix-card-box-shadow); + box-shadow: var(--phoenix-card-box-shadow); +} +.h-100 { + height: 100% !important; +} +.card-body { + -webkit-box-flex: 1; + -ms-flex: 1 1 auto; + flex: 1 1 auto; + padding: var(--phoenix-card-spacer-y) var(--phoenix-card-spacer-x); + color: var(--phoenix-card-color); +} + +.justify-content-between { + -webkit-box-pack: justify !important; + -ms-flex-pack: justify !important; + justify-content: space-between !important; +} +.d-flex { + display: -webkit-box !important; + display: -ms-flexbox !important; + display: flex !important; +} +.pt-3 { + padding-top: 1rem !important; +} +.mb-2 { + margin-bottom: 0.5rem !important; +} +.align-items-center { + -webkit-box-align: center !important; + -ms-flex-align: center !important; + align-items: center !important; +} +.bullet-item { + height: 0.5rem; + width: 1rem; + border-radius: 2px; +} +.bg-primary { + --phoenix-bg-opacity: 1; + background-color: rgba(var(--phoenix-primary-rgb), var(--phoenix-bg-opacity)) !important; +} +.me-2 { + margin-right: 0.5rem !important; +} +.flex-1 { + -webkit-box-flex: 1; + -ms-flex: 1; + flex: 1; +} +.text-900 { + --phoenix-text-opacity: 1; + color: rgba(var(--phoenix-900-rgb), var(--phoenix-text-opacity)) !important; +} +.fw-semi-bold { + font-weight: 600 !important; +} +.mb-0 { + margin-bottom: 0 !important; +} +h6, .h6 { + font-size: 0.8rem; +} \ No newline at end of file diff --git a/static/favicon.ico b/static/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..7d2b5ffd61175d0dc0450166381b093d890bbfee Binary files /dev/null and b/static/favicon.ico differ diff --git a/static/template/application.html b/static/template/application.html new file mode 100644 index 0000000000000000000000000000000000000000..c92797bd17d9fcb5238854e32c29ce47db5e0834 --- /dev/null +++ b/static/template/application.html @@ -0,0 +1,97 @@ + + + + + + + + + + + + + +
+
+
Input Sample +
+
+
+ +
+
+ +
+
+
+
Prediction Result +
+
+
+
+ + + + + \ No newline at end of file diff --git a/static/template/visualization.html b/static/template/visualization.html new file mode 100644 index 0000000000000000000000000000000000000000..ededde14d239f1dcb1487ff584f5039d5772a037 --- /dev/null +++ b/static/template/visualization.html @@ -0,0 +1,453 @@ + + + + + OpenSLU Visual Analysis + + + + + + + + + + +
+
+ +
+
+
+
+ Error Intent Label Distribution +
+
+
+ +
+
+
+
+ Error Slot Label Distribution +
+
+
+
+
+
+
+
+
+

+

+
+
+
+
+
+ + + + +
+
+
+
Instance Analysis +
+
+ {% for example in examples %} + Intent: + {% for e in example["intent"] %} + {% if e["intent"] == e["pred_intent"] %} + + + {% else %} + + {% endif %} + {% endfor %} + +
+ Slot: + {% for e in example["slot"] %} + {% if e["slot"] == e["pred_slot"] %} + + + {% else %} + + {% endif %} + {% endfor %} +

+ {% endfor %} +
+
+
+
+
Page Index:
+
+
+ +
+
+
+ + +
+ +
+
+ + +
+
+ + + + + \ No newline at end of file diff --git a/tools/__init__.py b/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/tools/__init__.py @@ -0,0 +1 @@ + diff --git a/tools/load_from_hugging_face.py b/tools/load_from_hugging_face.py new file mode 100644 index 0000000000000000000000000000000000000000..ad50ed015d0d883e42bad22d3cf1f58c1d7fe103 --- /dev/null +++ b/tools/load_from_hugging_face.py @@ -0,0 +1,71 @@ +''' +Author: Qiguang Chen +LastEditors: Qiguang Chen +Date: 2023-02-13 10:44:39 +LastEditTime: 2023-02-14 10:28:43 +Description: + +''' + +import os +import dill +from common import utils +from common.utils import InputData, download +from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer + + +# parser = argparse.ArgumentParser() +# parser.add_argument('--config_path', '-cp', type=str, default="config/reproduction/atis/joint_bert.yaml") +# args = parser.parse_args() +# config = Config.load_from_yaml(args.config_path) +# config.base["train"] = False +# config.base["test"] = False + +# model_manager = ModelManager(config) +# model_manager.load() + + +class PretrainedConfigForSLU(PretrainedConfig): + def __init__(self, **kargs) -> None: + super().__init__(**kargs) + +# pretrained_config = PretrainedConfigForSLU() +# # pretrained_config.push_to_hub("xxxx") + + +class PretrainedModelForSLU(PreTrainedModel): + def __init__(self, config: PretrainedConfig, *inputs, **kwargs) -> None: + super().__init__(config, *inputs, **kwargs) + self.config_class = config + self.model = utils.instantiate(config.model) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + cls.config_class = PretrainedConfigForSLU + return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + + +class PreTrainedTokenizerForSLU(PreTrainedTokenizer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + dir_names = f"save/{pretrained_model_name_or_path}".split("/") + dir_name = "" + for name in dir_names: + dir_name += name+"/" + if not os.path.exists(dir_name): + os.mkdir(dir_name) + cache_path = f"./save/{pretrained_model_name_or_path}/tokenizer.pkl" + if not os.path.exists(cache_path): + download(f"https://huggingface.co/{pretrained_model_name_or_path}/resolve/main/tokenizer.pkl", cache_path) + with open(cache_path, "rb") as f: + tokenizer = dill.load(f) + return tokenizer + + +# pretrained_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") +# pretrained_tokenizer = PreTrainedTokenizerForSLU.from_pretrained("LightChen2333/joint-bert-slu-atis") +# test_model = PretrainedModelForSLU.from_pretrained("LightChen2333/joint-bert-slu-atis") +# print(test_model(InputData([pretrained_tokenizer("I want to go to Beijing !")]))) \ No newline at end of file diff --git a/tools/parse_to_hugging_face.py b/tools/parse_to_hugging_face.py new file mode 100644 index 0000000000000000000000000000000000000000..4231d89f997ea42f16cc54b1489ef5b5a42c076d --- /dev/null +++ b/tools/parse_to_hugging_face.py @@ -0,0 +1,86 @@ +''' +Author: Qiguang Chen +LastEditors: Qiguang Chen +Date: 2023-02-13 10:44:39 +LastEditTime: 2023-02-19 15:45:08 +Description: + +''' + +import argparse +import sys +import os + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import dill + +from common.config import Config +from common.model_manager import ModelManager +from transformers import PretrainedConfig, PreTrainedModel, AutoModel, AutoTokenizer, PreTrainedTokenizer + +class PretrainedConfigForSLUToSave(PretrainedConfig): + def __init__(self, **kargs) -> None: + cfg = model_manager.config + kargs["name_or_path"] = cfg.base["name"] + kargs["return_dict"] = False + kargs["is_decoder"] = True + kargs["_id2label"] = {"intent": model_manager.intent_list, "slot": model_manager.slot_list} + kargs["_label2id"] = {"intent": model_manager.intent_dict, "slot": model_manager.slot_dict} + kargs["_num_labels"] = {"intent": len(model_manager.intent_list), "slot": len(model_manager.slot_list)} + kargs["tokenizer_class"] = cfg.base["name"] + kargs["vocab_size"] = model_manager.tokenizer.vocab_size + kargs["model"] = cfg.model + kargs["model"]["decoder"]["intent_classifier"]["intent_label_num"] = len(model_manager.intent_list) + kargs["model"]["decoder"]["slot_classifier"]["slot_label_num"] = len(model_manager.slot_list) + kargs["tokenizer"] = cfg.tokenizer + len(model_manager.slot_list) + super().__init__(**kargs) + +class PretrainedModelForSLUToSave(PreTrainedModel): + def __init__(self, config: PretrainedConfig, *inputs, **kwargs) -> None: + super().__init__(config, *inputs, **kwargs) + self.model = model_manager.model + self.config_class = config + + +class PreTrainedTokenizerForSLUToSave(PreTrainedTokenizer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.tokenizer = model_manager.tokenizer + + # @overload + def save_vocabulary(self, save_directory: str, filename_prefix = None): + if filename_prefix is not None: + path = os.path.join(save_directory, filename_prefix+"-tokenizer.pkl") + else: + path = os.path.join(save_directory, "tokenizer.pkl") + # tokenizer_name=model_manager.config.tokenizer.get("_tokenizer_name_") + # if tokenizer_name == "word_tokenizer": + # self.tokenizer.save(path) + # else: + # torch.save() + with open(path,'wb') as f: + dill.dump(self.tokenizer,f) + return (path,) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--config_path', '-cp', type=str, required=True) + parser.add_argument('--output_path', '-op', type=str, default="save/temp") + args = parser.parse_args() + config = Config.load_from_yaml(args.config_path) + config.base["train"] = False + config.base["test"] = False + if config.model_manager["load_dir"] is None: + config.model_manager["load_dir"] = config.model_manager["save_dir"] + model_manager = ModelManager(config) + model_manager.load() + model_manager.config.autoload_template() + + pretrained_config = PretrainedConfigForSLUToSave() + pretrained_model= PretrainedModelForSLUToSave(pretrained_config) + pretrained_model.save_pretrained(args.output_path) + + pretrained_tokenizer = PreTrainedTokenizerForSLUToSave() + pretrained_tokenizer.save_pretrained(args.output_path) diff --git a/tools/visualization.py b/tools/visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..9f9f18f26e5fb86f46c8209f2515125d2a6ffb31 --- /dev/null +++ b/tools/visualization.py @@ -0,0 +1,163 @@ +''' +Author: Qiguang Chen +LastEditors: Qiguang Chen +Date: 2023-01-23 17:26:47 +LastEditTime: 2023-02-14 20:07:02 +Description: + +''' +import argparse +import os +import signal +import sys + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import time +from gradio import networking +from common.utils import load_yaml, str2bool +import json +import threading + +from flask import Flask, request, render_template, render_template_string + + +def get_example(start, end, predict_data_file_path): + data_list = [] + with open(predict_data_file_path, "r", encoding="utf8") as f1: + for index, line1 in enumerate(f1): + if index < start: + continue + if index > end: + break + line1 = json.loads(line1.strip()) + obj = {"text": line1["text"]} + obj["intent"] = [{"intent": line1["golden_intent"], + "pred_intent": line1["pred_intent"]}] + obj["slot"] = [{"text": t, "pred_slot": ps, "slot": s} for t, s, ps in zip( + line1["text"], line1["pred_slot"], line1["golden_slot"])] + data_list.append(obj) + return data_list + + +def analysis(predict_data_file_path): + intent_dict = {} + slot_dict = {} + sample_num = 0 + with open(predict_data_file_path, "r", encoding="utf8") as f1: + for index, line1 in enumerate(f1): + sample_num += 1 + line1 = json.loads(line1.strip()) + for s, ps in zip(line1["golden_slot"], line1["pred_slot"]): + if s not in slot_dict: + slot_dict[s] = {"_error_": 0, "_total_": 0} + if s != ps: + slot_dict[s]["_error_"] += 1 + if ps not in slot_dict[s]: + slot_dict[s][ps] = 0 + slot_dict[s][ps] += 1 + slot_dict[s]["_total_"] += 1 + for i, pi in zip([line1["golden_intent"]], [line1["pred_intent"]]): + if i not in intent_dict: + intent_dict[i] = {"_error_": 0, "_total_": 0} + if i != pi: + intent_dict[i]["_error_"] += 1 + if pi not in intent_dict[i]: + intent_dict[i][pi] = 0 + intent_dict[i][pi] += 1 + intent_dict[i]["_total_"] += 1 + intent_dict_list = [{"value": intent_dict[name]["_error_"], "name": name} for name in intent_dict] + + for intent in intent_dict_list: + temp_intent = sorted( + intent_dict[intent["name"]].items(), key=lambda d: d[1], reverse=True) + # [:7] + temp_intent = [[key, value] for key, value in temp_intent] + intent_dict[intent["name"]] = temp_intent + slot_dict_list = [{"value": slot_dict[name]["_error_"], "name": name} for name in slot_dict] + + for slot in slot_dict_list: + temp_slot = sorted( + slot_dict[slot["name"]].items(), key=lambda d: d[1], reverse=True) + temp_slot = [[key, value] for key, value in temp_slot] + slot_dict[slot["name"]] = temp_slot + return intent_dict_list, slot_dict_list, intent_dict, slot_dict, sample_num + +parser = argparse.ArgumentParser() +parser.add_argument('--config_path', '-cp', type=str, default="config/visual.yaml") +parser.add_argument('--output_path', '-op', type=str, default=None) +parser.add_argument('--push_to_public', '-p', type=str2bool, nargs='?', + const=True, default=None, + help="Push to public network.(Higher priority than config file)") +args = parser.parse_args() +button_html = "" +config = load_yaml(args.config_path) +if args.output_path is not None: + config["output_path"] = args.output_path +if args.push_to_public is not None: + config["is_push_to_public"] = args.push_to_public +intent_dict_list, slot_dict_list, intent_dict, slot_dict, sample_num = analysis(config["output_path"]) +PAGE_SIZE = config["page-size"] +PAGE_NUM = int(sample_num / PAGE_SIZE) + 1 + +app = Flask(__name__, template_folder="static//template") + + +@app.route("/") +def hello(): + page = request.args.get('page') + if page is None: + page = 0 + page = int(page) if int(page) >= 0 else 0 + init_index = page*PAGE_SIZE + examples = get_example(init_index, init_index + + PAGE_SIZE - 1, config["output_path"]) + return render_template('visualization.html', + examples=examples, + intent_dict_list=intent_dict_list, + slot_dict_list=slot_dict_list, + intent_dict=intent_dict, + slot_dict=slot_dict, + page=page) + +thread_lock_1 = False + + + + +class PushToPublicThread(): + def __init__(self, config) -> None: + self.thread = threading.Thread(target=self.push_to_public, args=(config,)) + self.thread_lock_2 = False + self.thread.daemon = True + + def start(self): + self.thread.start() + + def push_to_public(self, config): + print("Push visualization results to public by Gradio....") + print("Push to URL: ", networking.setup_tunnel(config["host"], str(config["port"]))) + print("This share link expires in 72 hours. And do not close this process for public sharing.") + while not self.thread_lock_2: + continue + + def exit(self, signum, frame): + self.thread_lock_2 = True + print("Exit..") + os._exit(0) + # exit() +if __name__ == '__main__': + + if config["is_push_to_public"]: + + thread_1 = threading.Thread(target=lambda: app.run( + config["host"], config["port"])) + thread_1.start() + thread_2 = PushToPublicThread(config) + signal.signal(signal.SIGINT, thread_2.exit) + signal.signal(signal.SIGTERM, thread_2.exit) + thread_2.start() + while True: + time.sleep(1) + else: + app.run(config["host"], config["port"])