import datetime import json import os import shutil from typing import Optional from typing import Tuple from typing import Union import gradio as gr import requests import torch from fastchat.conversation import Conversation from fastchat.conversation import SeparatorStyle from fastchat.conversation import compute_skip_echo_len from fastchat.conversation import get_default_conv_template from fastchat.serve.cli import SimpleChatIO from fastchat.serve.inference import generate_stream from huggingface_hub import Repository from huggingface_hub import snapshot_download from peft import LoraConfig from peft import PeftModel from peft import get_peft_model from peft import set_peft_model_state_dict from transformers import LlamaForCausalLM from transformers import LlamaTokenizer from transformers import PreTrainedModel from transformers import PreTrainedTokenizerBase def load_lora_model( model_path: str, lora_weight: str, device: str, num_gpus: int, max_gpu_memory: Optional[str] = None, load_8bit: bool = False, debug: bool = False, ) -> Tuple[Union[PreTrainedModel, PeftModel], PreTrainedTokenizerBase]: model: Union[PreTrainedModel, PeftModel] tokenizer: PreTrainedTokenizerBase tokenizer = LlamaTokenizer.from_pretrained(model_path) model = LlamaForCausalLM.from_pretrained( model_path, load_in_8bit=load_8bit, device_map="auto" if device == "cuda" else {"": device}, max_memory={i: max_gpu_memory for i in range(num_gpus)}, torch_dtype=torch.float16, ) if lora_weight is not None: # model = PeftModelForCausalLM.from_pretrained(model, model_path, **kwargs) config = LoraConfig.from_pretrained(lora_weight) model = get_peft_model(model, config) # Check the available weights and load them checkpoint_name = os.path.join( lora_weight, "pytorch_model.bin" ) # Full checkpoint if not os.path.exists(checkpoint_name): checkpoint_name = os.path.join( lora_weight, "adapter_model.bin" ) # only LoRA model - LoRA config above has to fit # The two files above have a different name depending on how they were saved, # but are actually the same. if os.path.exists(checkpoint_name): adapters_weights = torch.load(checkpoint_name) set_peft_model_state_dict(model, adapters_weights) else: raise IOError(f"Checkpoint {checkpoint_name} not found") if debug: print(model) return model, tokenizer print(datetime.datetime.now()) NUM_THREADS = 1 print(NUM_THREADS) print("starting server ...") BASE_MODEL = "decapoda-research/llama-13b-hf" LORA_WEIGHTS_HF = "izumi-lab/llama-13b-japanese-lora-v0-1ep" HF_TOKEN = os.environ.get("HF_TOKEN", None) DATASET_REPOSITORY = os.environ.get("DATASET_REPOSITORY", None) SLACK_WEBHOOK = os.environ.get("SLACK_WEBHOOK", None) LORA_WEIGHTS = snapshot_download(LORA_WEIGHTS_HF) repo = None LOCAL_DIR = "/home/user/data/" if HF_TOKEN and DATASET_REPOSITORY: try: shutil.rmtree(LOCAL_DIR) except Exception: pass repo = Repository( local_dir=LOCAL_DIR, clone_from=DATASET_REPOSITORY, use_auth_token=HF_TOKEN, repo_type="dataset", ) repo.git_pull() if torch.cuda.is_available(): device = "cuda" else: device = "cpu" model, tokenizer = load_lora_model( model_path=BASE_MODEL, lora_weight=LORA_WEIGHTS, device=device, num_gpus=1, max_gpu_memory="16GiB", load_8bit=True, debug=False, ) Conversation._get_prompt = Conversation.get_prompt Conversation._append_message = Conversation.append_message def conversation_append_message(cls, role: str, message: str): cls.offset = -2 return cls._append_message(role, message) def conversation_get_prompt_overrider(cls: Conversation) -> str: cls.messages = cls.messages[-2:] return cls._get_prompt() def save_inputs_and_outputs(now, inputs, outputs, generate_kwargs): current_hour = now.strftime("%Y-%m-%d_%H") file_name = f"prompts_{LORA_WEIGHTS_HF.split('/')[-1]}_{current_hour}.jsonl" if repo is not None: repo.git_pull(rebase=True) with open(os.path.join(LOCAL_DIR, file_name), "a", encoding="utf-8") as f: json.dump( { "inputs": inputs, "outputs": outputs, "generate_kwargs": generate_kwargs, }, f, ensure_ascii=False, ) f.write("\n") repo.push_to_hub() # we cant add typing now # https://github.com/gradio-app/gradio/issues/3514 def evaluate( instruction, temperature=0.7, max_tokens=256, repetition_penalty=1.0, ): try: inputs = tokenizer(instruction, return_tensors="pt") if len(inputs["input_ids"][0]) > max_tokens - 40: if HF_TOKEN and DATASET_REPOSITORY: try: now = datetime.datetime.now() current_time = now.strftime("%Y-%m-%d %H:%M:%S") print(f"[{current_time}] Pushing prompt and completion to the Hub") save_inputs_and_outputs( now, instruction, "", { "temperature": temperature, "max_tokens": max_tokens, "repetition_penalty": repetition_penalty, }, ) except Exception as e: print(e) return ( f"please reduce the input length. Currently, {len(inputs['input_ids'][0])} ( > {max_tokens - 40}) tokens are used.", gr.update(interactive=True), gr.update(interactive=True), ) conv = get_default_conv_template(BASE_MODEL).copy() conv.append_message(conv.roles[0], instruction) conv.append_message(conv.roles[1], None) generate_stream_func = generate_stream prompt = conv.get_prompt() skip_echo_len = compute_skip_echo_len(BASE_MODEL, conv, prompt) gen_params = { "model": BASE_MODEL, "prompt": prompt, "temperature": temperature, "max_new_tokens": max_tokens - len(inputs["input_ids"][0]) - 30, "stop": conv.sep if conv.sep_style == SeparatorStyle.SINGLE else None, } chatio = SimpleChatIO() chatio.prompt_for_output(conv.roles[1]) output_stream = generate_stream_func(model, tokenizer, gen_params, device) output = chatio.stream_output(output_stream, skip_echo_len) if HF_TOKEN and DATASET_REPOSITORY: try: now = datetime.datetime.now() current_time = now.strftime("%Y-%m-%d %H:%M:%S") print(f"[{current_time}] Pushing prompt and completion to the Hub") save_inputs_and_outputs( now, prompt, output, { "temperature": temperature, "max_tokens": max_tokens, "repetition_penalty": repetition_penalty, }, ) except Exception as e: print(e) return output, gr.update(interactive=True), gr.update(interactive=True) except Exception as e: print(e) import traceback if SLACK_WEBHOOK: payload_dic = { "text": f"BASE_MODEL: {BASE_MODEL}\n LORA_WEIGHTS: {LORA_WEIGHTS_HF}\n" + f"instruction: {instruction}\ninput: {input}\ntemperature: {temperature}\n" + f"max_tokens: {max_tokens}\nrepetition_penalty: {repetition_penalty}\n\n" + str(traceback.format_exc()), "username": "Hugging Face Space", "channel": "#monitor", } try: requests.post(SLACK_WEBHOOK, data=json.dumps(payload_dic)) except Exception: pass return ( "Error happend. Please return later.", gr.update(interactive=True), gr.update(interactive=True), ) def reset_textbox(): return gr.update(value=""), gr.update(value=""), gr.update(value="") def no_interactive() -> Tuple[gr.Request, gr.Request]: return gr.update(interactive=False), gr.update(interactive=False) title = """