import datetime import json import os import shutil from typing import Optional from typing import Tuple from typing import Union import gradio as gr import requests import torch import transformers from fastchat.conversation import Conversation from fastchat.conversation import get_default_conv_template from fastchat.serve.cli import SimpleChatIO from fastchat.serve.inference import generate_stream from fastchat.serve.inference import load_model from huggingface_hub import Repository from huggingface_hub import snapshot_download from peft import LoraConfig from peft import PeftModel from peft import get_peft_model from peft import set_peft_model_state_dict from transformers import LlamaForCausalLM from transformers import LlamaTokenizer from transformers import PreTrainedModel from transformers import PreTrainedTokenizerBase transformers.AutoTokenizer.from_pretrained = LlamaTokenizer.from_pretrained transformers.AutoModelForCausalLM.from_pretrained = LlamaForCausalLM.from_pretrained def load_lora_model( model_path: str, lora_weight: str, device: str, num_gpus: int, max_gpu_memory: Optional[str] = None, load_8bit: bool = False, cpu_offloading: bool = False, debug: bool = False, ) -> Tuple[Union[PreTrainedModel, PeftModel], PreTrainedTokenizerBase]: model: Union[PreTrainedModel, PeftModel] tokenizer: PreTrainedTokenizerBase model, tokenizer = load_model( model_path=model_path, device=device, num_gpus=num_gpus, max_gpu_memory=max_gpu_memory, load_8bit=True, cpu_offloading=cpu_offloading, debug=debug, ) if lora_weight is not None: # model = PeftModelForCausalLM.from_pretrained(model, model_path, **kwargs) config = LoraConfig.from_pretrained(lora_weight) model = get_peft_model(model, config) # Check the available weights and load them checkpoint_name = os.path.join( lora_weight, "pytorch_model.bin" ) # Full checkpoint if not os.path.exists(checkpoint_name): checkpoint_name = os.path.join( lora_weight, "adapter_model.bin" ) # only LoRA model - LoRA config above has to fit # The two files above have a different name depending on how they were saved, # but are actually the same. if os.path.exists(checkpoint_name): adapters_weights = torch.load(checkpoint_name) set_peft_model_state_dict(model, adapters_weights) else: raise IOError(f"Checkpoint {checkpoint_name} not found") if debug: print(model) return model, tokenizer print(datetime.datetime.now()) NUM_THREADS = 1 print(NUM_THREADS) print("starting server ...") BASE_MODEL = "decapoda-research/llama-13b-hf" LORA_WEIGHTS_HF = "izumi-lab/llama-13b-japanese-lora-v0-1ep" HF_TOKEN = os.environ.get("HF_TOKEN", None) DATASET_REPOSITORY = os.environ.get("DATASET_REPOSITORY", None) SLACK_WEBHOOK = os.environ.get("SLACK_WEBHOOK", None) LORA_WEIGHTS = snapshot_download(LORA_WEIGHTS_HF) repo = None LOCAL_DIR = "/home/user/data/" if HF_TOKEN and DATASET_REPOSITORY: try: shutil.rmtree(LOCAL_DIR) except Exception: pass repo = Repository( local_dir=LOCAL_DIR, clone_from=DATASET_REPOSITORY, use_auth_token=HF_TOKEN, repo_type="dataset", ) repo.git_pull() if torch.cuda.is_available(): device = "cuda" else: device = "cpu" model, tokenizer = load_lora_model( model_path=BASE_MODEL, lora_weight=LORA_WEIGHTS, device=device, num_gpus=1, max_gpu_memory="16GiB", load_8bit=True, cpu_offloading=False, debug=False, ) Conversation._get_prompt = Conversation.get_prompt Conversation._append_message = Conversation.append_message def conversation_append_message(cls, role: str, message: str): cls.offset = -2 return cls._append_message(role, message) def conversation_get_prompt_overrider(cls: Conversation) -> str: cls.messages = cls.messages[-2:] return cls._get_prompt() def save_inputs_and_outputs(now, inputs, outputs, generate_kwargs): current_hour = now.strftime("%Y-%m-%d_%H") file_name = f"prompts_{LORA_WEIGHTS.split('/')[-1]}_{current_hour}.jsonl" if repo is not None: repo.git_pull(rebase=True) with open(os.path.join(LOCAL_DIR, file_name), "a", encoding="utf-8") as f: json.dump( { "inputs": inputs, "outputs": outputs, "generate_kwargs": generate_kwargs, }, f, ensure_ascii=False, ) f.write("\n") repo.push_to_hub() # we cant add typing now # https://github.com/gradio-app/gradio/issues/3514 def evaluate( instruction, temperature=0.7, max_tokens=256, repetition_penalty=1.0, ): try: inputs = tokenizer(instruction, return_tensors="pt") if len(inputs["input_ids"][0]) > max_tokens - 40: if HF_TOKEN and DATASET_REPOSITORY: try: now = datetime.datetime.now() current_time = now.strftime("%Y-%m-%d %H:%M:%S") print(f"[{current_time}] Pushing prompt and completion to the Hub") save_inputs_and_outputs( now, instruction, "", { "temperature": temperature, "max_tokens": max_tokens, "repetition_penalty": repetition_penalty, }, ) except Exception as e: print(e) return ( f"please reduce the input length. Currently, {len(inputs['input_ids'][0])} ( > {max_tokens - 40}) tokens are used.", gr.update(interactive=True), gr.update(interactive=True), ) conv = get_default_conv_template(BASE_MODEL).copy() conv.append_message(conv.roles[0], instruction) conv.append_message(conv.roles[1], None) generate_stream_func = generate_stream prompt = conv.get_prompt() gen_params = { "model": BASE_MODEL, "prompt": prompt, "temperature": temperature, "max_new_tokens": max_tokens - len(inputs["input_ids"][0]) - 30, "stop": conv.stop_str, "stop_token_ids": conv.stop_token_ids, "echo": False, "repetition_penalty": repetition_penalty, } chatio = SimpleChatIO() chatio.prompt_for_output(conv.roles[1]) output_stream = generate_stream_func(model, tokenizer, gen_params, device) output = chatio.stream_output(output_stream) if HF_TOKEN and DATASET_REPOSITORY: try: now = datetime.datetime.now() current_time = now.strftime("%Y-%m-%d %H:%M:%S") print(f"[{current_time}] Pushing prompt and completion to the Hub") save_inputs_and_outputs( now, prompt, output, { "temperature": temperature, "max_tokens": max_tokens, "repetition_penalty": repetition_penalty, }, ) except Exception as e: print(e) return output, gr.update(interactive=True), gr.update(interactive=True) except Exception as e: print(e) import traceback if SLACK_WEBHOOK: payload_dic = { "text": f"BASE_MODEL: {BASE_MODEL}\n LORA_WEIGHTS: {LORA_WEIGHTS}\n" + f"instruction: {instruction}\ninput: {input}\ntemperature: {temperature}\n" + f"max_tokens: {max_tokens}\nrepetition_penalty: {repetition_penalty}\n\n" + str(traceback.format_exc()), "username": "Hugging Face Space", "channel": "#monitor", } try: requests.post(SLACK_WEBHOOK, data=json.dumps(payload_dic)) except Exception: pass return ( "Error happend. Please return later.", gr.update(interactive=True), gr.update(interactive=True), ) def reset_textbox(): return gr.update(value=""), gr.update(value=""), gr.update(value="") def no_interactive() -> Tuple[gr.Request, gr.Request]: return gr.update(interactive=False), gr.update(interactive=False) title = """