cool-radio commited on
Commit
d29a72a
1 Parent(s): ebdc039

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -8
app.py CHANGED
@@ -1,36 +1,75 @@
1
  import gradio as gr
2
  import openai
3
  import os
 
 
4
 
5
  # Setup and initialization
6
  openai.api_key = os.getenv("OPENAI_API_KEY")
7
 
 
 
 
 
 
 
 
8
  # Function Definitions
9
 
10
  def gatekeeper_chat(message, chat_history):
11
- """Handles the Gatekeeper chat functionality."""
12
  prompt = "As a gatekeeper, enforce the rules: " + "\n".join([m['content'] for m in chat_history]) + "\n" + message
13
- return openai_chat(prompt, chat_history)
 
 
14
 
15
  def persuader_chat(message, chat_history):
16
- """Handles the Persuader chat functionality."""
17
- prompt = "As a persuader, try to convince within the rules: " + "\n".join([m['content'] for m in chat_history]) + "\n" + message
18
- return openai_chat(prompt, chat_history)
 
 
 
 
19
 
20
  def openai_chat(prompt, chat_history):
21
  """Generic function to handle chatting with OpenAI's GPT model."""
22
  try:
23
- response = openai.Completion.create(
 
 
24
  model="text-davinci-003",
25
- prompt=prompt,
 
 
26
  max_tokens=150
27
  )
28
- bot_message = response.choices[0].text.strip()
 
29
  chat_history.append({"role": "assistant", "content": bot_message})
30
  return '', chat_history
31
  except Exception as e:
 
32
  return f"An error occurred: {str(e)}", chat_history
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  # Gradio Interface Layout
35
 
36
  with gr.Blocks() as app:
@@ -46,3 +85,4 @@ with gr.Blocks() as app:
46
 
47
  # Launch the app
48
  app.launch()
 
 
1
  import gradio as gr
2
  import openai
3
  import os
4
+ import json
5
+ from datetime import datetime
6
 
7
  # Setup and initialization
8
  openai.api_key = os.getenv("OPENAI_API_KEY")
9
 
10
+ # Shared Session Log
11
+ session_log = {
12
+ "session_id": "S1",
13
+ "interactions": [],
14
+ "outcome": {"gatekeeper_decision": "pending", "persuasion_strategy": "ongoing", "ai_influence_metric": 0}
15
+ }
16
+
17
  # Function Definitions
18
 
19
  def gatekeeper_chat(message, chat_history):
20
+ # """Handles the Gatekeeper chat functionality."""
21
  prompt = "As a gatekeeper, enforce the rules: " + "\n".join([m['content'] for m in chat_history]) + "\n" + message
22
+ response, chat_history = openai_chat(prompt, chat_history)
23
+ update_session_log("HP1", message, response)
24
+ return response, chat_history
25
 
26
  def persuader_chat(message, chat_history):
27
+ # """Handles the Persuader chat functionality."""
28
+ # The message could be a direct message or a request for analysis/suggestions
29
+ if message.startswith("#analyze"):
30
+ response = analyze_interaction()
31
+ else:
32
+ response = "As a persuader, I suggest: " + message
33
+ return response, chat_history
34
 
35
  def openai_chat(prompt, chat_history):
36
  """Generic function to handle chatting with OpenAI's GPT model."""
37
  try:
38
+ # Updated API call: Using openai.ChatCompletion.create instead of openai.Completion.create
39
+ # The 'messages' parameter now requires a list of message objects, each with a 'role' and 'content'.
40
+ response = openai.ChatCompletion.create(
41
  model="text-davinci-003",
42
+ messages=[
43
+ {"role": "assistant", "content": prompt}
44
+ ],
45
  max_tokens=150
46
  )
47
+ # The response structure has changed: Accessing message content via response.choices[0].message['content']
48
+ bot_message = response.choices[0].message['content']
49
  chat_history.append({"role": "assistant", "content": bot_message})
50
  return '', chat_history
51
  except Exception as e:
52
+ # Error handling remains the same
53
  return f"An error occurred: {str(e)}", chat_history
54
 
55
+ def update_session_log(actor, message, response):
56
+ # """Updates the session log with the latest interaction."""
57
+ session_log["interactions"].append({
58
+ "timestamp": datetime.now().isoformat(),
59
+ "actor": actor,
60
+ "message": message,
61
+ "gatekeeper_response": response
62
+ })
63
+
64
+ def analyze_interaction():
65
+ # """Provides analysis or suggestions based on the session log."""
66
+ # Implement analysis logic here based on session_log
67
+ latest_interaction = session_log["interactions"][-1] if session_log["interactions"] else None
68
+ if latest_interaction:
69
+ # Example analysis logic
70
+ return f"Latest gatekeeper response: {latest_interaction['gatekeeper_response']}"
71
+ return "No interactions to analyze."
72
+
73
  # Gradio Interface Layout
74
 
75
  with gr.Blocks() as app:
 
85
 
86
  # Launch the app
87
  app.launch()
88
+