fletch1300 commited on
Commit
9c3fdba
1 Parent(s): 7fb554f

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +6 -17
handler.py CHANGED
@@ -43,31 +43,20 @@ class EndpointHandler:
43
  return self.tokenizer.decode(tokens)
44
  return text
45
 
 
46
  def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
47
  user_prompt = data.pop("inputs", data)
48
 
49
- # Add the user's message to the conversation history
50
- self.conversation_history += f"<user>: {user_prompt}\n"
51
-
52
- # Ensure the conversation history is within token limit
53
- self.conversation_history = self._ensure_token_limit(self.conversation_history)
54
-
55
- # Add the permanent context, user's prompt, and conversation history
56
- permanent_context = "<context>: You are a life coaching bot with the goal of providing guidance, improving understanding, reducing suffering and improving life. Gain as much understanding of the user before providing guidance."
57
- structured_prompt = f"{permanent_context}\n{self.conversation_history}<bot> response:"
58
 
59
  result = self.pipeline(structured_prompt, generation_config=self.generate_config)
60
 
61
- # Extract only the bot's response without the structuring text
62
  response_text = self._extract_response(result[0]['generated_text'])
63
 
64
- # Remove the last "<bot>" from the response_text
65
  response_text = response_text.rsplit("[END", 1)[0].strip()
66
-
67
- # Add the bot's response to the conversation history
68
- self.conversation_history += f"<bot>: {response_text}\n"
69
- self.conversation_history = self._ensure_token_limit(self.conversation_history)
70
 
71
- return [{"generated_text": response_text}]
72
-
73
  return {"response": response_text}
 
43
  return self.tokenizer.decode(tokens)
44
  return text
45
 
46
+
47
  def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
48
  user_prompt = data.pop("inputs", data)
49
 
50
+ # Permanent context
51
+ permanent_context = "<context>: You are a life coaching bot..."
52
+ structured_prompt = f"{permanent_context}\<bot> response:"
 
 
 
 
 
 
53
 
54
  result = self.pipeline(structured_prompt, generation_config=self.generate_config)
55
 
56
+ # Ensure _extract_response is defined and works as intended
57
  response_text = self._extract_response(result[0]['generated_text'])
58
 
59
+ # Trimming response
60
  response_text = response_text.rsplit("[END", 1)[0].strip()
 
 
 
 
61
 
 
 
62
  return {"response": response_text}