import gradio as gr from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config import torch from langchain.memory import ConversationBufferMemory # Move model to device (GPU if available) device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") # Load the tokenizer (you can use the pre-trained tokenizer for GPT-2 family) tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") # Manually create a configuration for the model (since we don't have config.json) config = GPT2Config.from_pretrained("distilgpt2") # Initialize the model using the manually created configuration model = GPT2LMHeadModel(config) # Load the weights from the pytorch_model.bin file model_path = "./pytorch_model_100.bin" # Path to local model file state_dict = torch.load(model_path, map_location=device) # Load the state_dict model.load_state_dict(state_dict) # Load the state dict into the model # Move model to the device (GPU or CPU) model.to(device) # Set up conversational memory using LangChain's ConversationBufferMemory memory = ConversationBufferMemory() # Define the chatbot function with memory and additional parameters def chat_with_distilgpt2(input_text, temperature, top_p, top_k): # Retrieve conversation history conversation_history = memory.load_memory_variables({})['history'] # Combine the (possibly summarized) history with the current user input no_memory_input = f"Question: {input_text}\nAnswer:" # Tokenize the input and convert to tensor input_ids = tokenizer.encode(no_memory_input, return_tensors="pt").to(device) # Generate the response using the model with adjusted parameters outputs = model.generate( input_ids, max_length=input_ids.shape[1] + 50, # Limit total length max_new_tokens=15, num_return_sequences=1, no_repeat_ngram_size=3, repetition_penalty=1.2, early_stopping=True, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id, temperature=temperature, # Add temperature from slider top_p=top_p, # Add top_p from slider top_k=top_k # Add top_k from slider ) # Decode the model output response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Update the memory with the user input and model response memory.save_context({"input": input_text}, {"output": response}) return response # Set up the Gradio interface with additional sliders interface = gr.Interface( fn=chat_with_distilgpt2, inputs=[ gr.Textbox(label="Chat with DistilGPT-2"), # User input text gr.Slider(0.1, 1.0, step=0.1, value=1.0, label="Temperature"), # Slider for temperature gr.Slider(0.0, 1.0, step=0.1, value=1.0, label="Top-p"), # Slider for top-p gr.Slider(1, 100, step=1, value=50, label="Top-k") # Slider for top-k ], outputs=gr.Textbox(label="DistilGPT-2's Response"), # Model response title="DistilGPT-2 Chatbot with Memory and Adjustable Parameters", description="This is a simple chatbot powered by the DistilGPT-2 model with conversational memory, using LangChain. You can adjust temperature, top-p, and top-k using the sliders.", ) # Launch the Gradio app interface.launch() How can this be modified to give the results for both a baseline DistilGPT2 and the fine tuned version for each input prompt?