from transformers import AutoModelForCausalLM, AutoTokenizer import gradio as gr import torch # Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="mistralai/Mistral-7B-v0.1") # Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") title = "microsoft/DialoGPT" description = "" examples = [["How are you?"]] #tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large") #model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large") #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1") #model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1") def predict(input, history=[]): # tokenize the new input sentence new_user_input_ids = tokenizer.encode( input + tokenizer.eos_token, return_tensors="pt" ) # append the new user input tokens to the chat history bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1) # generate a response history = model.generate( bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id ).tolist() # convert the tokens to text, and then split the responses into lines response = tokenizer.decode(history[0]).split("<|endoftext|>") # print('decoded_response-->>'+str(response)) response = [ (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2) ] # convert to tuples of list # print('response-->>'+str(response)) return response, history gr.Interface( fn=predict, title=title, description=description, examples=examples, inputs=["text", "state"], outputs=["chatbot", "state"], theme="finlaymacklon/boxy_violet", ).launch()