from transformers import GPT3Tokenizer, GPT3LMHeadModel
Load the tokenizer
tokenizer = GPT3Tokenizer.from_pretrained("gpt-3.5-turbo")
Load the model
model = GPT3LMHeadModel.from_pretrained("gpt-3.5-turbo")
Define the conversation loop
while True: # Capture user input user_input = input("User: ")
# Format user input as prompts
prompts = ["User: " + user_input]
# Generate model response
model_output = model.generate(tokenizer.encode(prompts, return_tensors="pt"), max_length=100)
# Extract and display model-generated response
model_response = tokenizer.decode(model_output[0], skip_special_tokens=True)
print("Bot: " + model_response)
from flask import Flask, request, jsonify
app = Flask(name)
@app.route('/chatbot', methods=['POST']) def chatbot(): user_input = request.json['user_input'] prompts = ["User: " + user_input] model_output = model.generate(tokenizer.encode(prompts, return_tensors="pt"), max_length=100) model_response = tokenizer.decode(model_output[0], skip_special_tokens=True) return jsonify({'bot_response': model_response})
if name == 'main': app.run(debug=True)