XThomasBU commited on
Commit
3a1356f
1 Parent(s): 0cab995

logging fixed

Browse files
.gitignore CHANGED
@@ -169,4 +169,5 @@ code/.chainlit/translations/
169
  storage/logs/*
170
  vectorstores/*
171
 
172
- */.files/*
 
 
169
  storage/logs/*
170
  vectorstores/*
171
 
172
+ */.files/*
173
+ code/storage/models/
code/.chainlit/config.toml CHANGED
@@ -61,8 +61,8 @@ name = "AI Tutor"
61
  # Large size content are by default collapsed for a cleaner ui
62
  default_collapse_content = true
63
 
64
- # Hide the chain of thought details from the user in the UI.
65
- hide_cot = true
66
 
67
  # Link to your github repo. This will add a github button in the UI's header.
68
  # github = "https://github.com/DL4DS/dl4ds_tutor"
@@ -117,4 +117,4 @@ custom_meta_image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/f/
117
  #secondary = "#BDBDBD"
118
 
119
  [meta]
120
- generated_by = "1.1.306"
 
61
  # Large size content are by default collapsed for a cleaner ui
62
  default_collapse_content = true
63
 
64
+ # Chain of Thought (CoT) display mode. Can be "hidden", "tool_call" or "full".
65
+ cot = "hidden"
66
 
67
  # Link to your github repo. This will add a github button in the UI's header.
68
  # github = "https://github.com/DL4DS/dl4ds_tutor"
 
117
  #secondary = "#BDBDBD"
118
 
119
  [meta]
120
+ generated_by = "1.1.402"
code/main.py CHANGED
@@ -23,11 +23,11 @@ from chainlit.types import ThreadDict
23
  import time
24
 
25
  USER_TIMEOUT = 60_000
26
- SYSTEM = "System 🖥️"
27
- LLM = "LLM 🧠"
28
- AGENT = "Agent <>"
29
- YOU = "You 😃"
30
- ERROR = "Error 🚫"
31
 
32
  with open("modules/config/config.yml", "r") as f:
33
  config = yaml.safe_load(f)
@@ -111,11 +111,6 @@ class Chatbot:
111
  ) # update only llm attributes that are changed
112
  self.chain = self.llm_tutor.qa_bot(
113
  memory=conversation_list,
114
- callbacks=(
115
- [cl.LangchainCallbackHandler()]
116
- if cl_data._data_layer and self.config["chat_logging"]["callbacks"]
117
- else None
118
- ),
119
  )
120
 
121
  cl.user_session.set("chain", self.chain)
@@ -279,7 +274,7 @@ class Chatbot:
279
  Returns:
280
  str: The renamed author.
281
  """
282
- rename_dict = {"Chatbot": "AI Tutor"}
283
  return rename_dict.get(orig_author, orig_author)
284
 
285
  async def start(self, config=None):
@@ -318,11 +313,6 @@ class Chatbot:
318
 
319
  self.chain = self.llm_tutor.qa_bot(
320
  memory=memory,
321
- callbacks=(
322
- [cl.LangchainCallbackHandler()]
323
- if cl_data._data_layer and self.config["chat_logging"]["callbacks"]
324
- else None
325
- ),
326
  )
327
  self.question_generator = self.llm_tutor.question_generator
328
  cl.user_session.set("llm_tutor", self.llm_tutor)
@@ -375,7 +365,12 @@ class Chatbot:
375
  "user_id": self.user["user_id"],
376
  "conversation_id": self.user["session_id"],
377
  "memory_window": self.config["llm_params"]["memory_window"],
378
- }
 
 
 
 
 
379
  }
380
 
381
  if stream:
@@ -456,7 +451,11 @@ class Chatbot:
456
  type="user_message",
457
  author=self.user["user_id"],
458
  ).send()
459
- await self.main(message)
 
 
 
 
460
 
461
 
462
  chatbot = Chatbot(config=config)
 
23
  import time
24
 
25
  USER_TIMEOUT = 60_000
26
+ SYSTEM = "System"
27
+ LLM = "AI Tutor"
28
+ AGENT = "Agent"
29
+ YOU = "User"
30
+ ERROR = "Error"
31
 
32
  with open("modules/config/config.yml", "r") as f:
33
  config = yaml.safe_load(f)
 
111
  ) # update only llm attributes that are changed
112
  self.chain = self.llm_tutor.qa_bot(
113
  memory=conversation_list,
 
 
 
 
 
114
  )
115
 
116
  cl.user_session.set("chain", self.chain)
 
274
  Returns:
275
  str: The renamed author.
276
  """
277
+ rename_dict = {"Chatbot": LLM}
278
  return rename_dict.get(orig_author, orig_author)
279
 
280
  async def start(self, config=None):
 
313
 
314
  self.chain = self.llm_tutor.qa_bot(
315
  memory=memory,
 
 
 
 
 
316
  )
317
  self.question_generator = self.llm_tutor.question_generator
318
  cl.user_session.set("llm_tutor", self.llm_tutor)
 
365
  "user_id": self.user["user_id"],
366
  "conversation_id": self.user["session_id"],
367
  "memory_window": self.config["llm_params"]["memory_window"],
368
+ },
369
+ "callbacks": (
370
+ [cl.LangchainCallbackHandler()]
371
+ if cl_data._data_layer and self.config["chat_logging"]["callbacks"]
372
+ else None
373
+ ),
374
  }
375
 
376
  if stream:
 
451
  type="user_message",
452
  author=self.user["user_id"],
453
  ).send()
454
+ async with cl.Step(
455
+ name="on_follow_up", type="run", parent_id=message.id
456
+ ) as step:
457
+ await self.main(message)
458
+ step.output = message.content
459
 
460
 
461
  chatbot = Chatbot(config=config)
code/modules/chat/chat_model_loader.py CHANGED
@@ -28,7 +28,7 @@ class ChatModelLoader:
28
  elif self.config["llm_params"]["llm_loader"] == "local_llm":
29
  n_batch = 512 # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
30
  model_path = self._verify_model_cache(
31
- self.config["llm_params"]["local_llm_params"]["model"]
32
  )
33
  llm = LlamaCpp(
34
  model_path=model_path,
 
28
  elif self.config["llm_params"]["llm_loader"] == "local_llm":
29
  n_batch = 512 # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
30
  model_path = self._verify_model_cache(
31
+ self.config["llm_params"]["local_llm_params"]["model_path"]
32
  )
33
  llm = LlamaCpp(
34
  model_path=model_path,
code/modules/chat/helpers.py CHANGED
@@ -110,6 +110,7 @@ def get_prompt(config, prompt_type):
110
  return prompts["openai"]["rephrase_prompt"]
111
 
112
 
 
113
  def get_history_chat_resume(steps, k, SYSTEM, LLM):
114
  conversation_list = []
115
  count = 0
@@ -119,14 +120,17 @@ def get_history_chat_resume(steps, k, SYSTEM, LLM):
119
  conversation_list.append(
120
  {"type": "user_message", "content": step["output"]}
121
  )
 
122
  elif step["type"] == "assistant_message":
123
  if step["name"] == LLM:
124
  conversation_list.append(
125
  {"type": "ai_message", "content": step["output"]}
126
  )
 
127
  else:
128
- raise ValueError("Invalid message type")
129
- count += 1
 
130
  if count >= 2 * k: # 2 * k to account for both user and assistant messages
131
  break
132
  conversation_list = conversation_list[::-1]
 
110
  return prompts["openai"]["rephrase_prompt"]
111
 
112
 
113
+ # TODO: Do this better
114
  def get_history_chat_resume(steps, k, SYSTEM, LLM):
115
  conversation_list = []
116
  count = 0
 
120
  conversation_list.append(
121
  {"type": "user_message", "content": step["output"]}
122
  )
123
+ count += 1
124
  elif step["type"] == "assistant_message":
125
  if step["name"] == LLM:
126
  conversation_list.append(
127
  {"type": "ai_message", "content": step["output"]}
128
  )
129
+ count += 1
130
  else:
131
+ pass
132
+ # raise ValueError("Invalid message type")
133
+ # count += 1
134
  if count >= 2 * k: # 2 * k to account for both user and assistant messages
135
  break
136
  conversation_list = conversation_list[::-1]
code/modules/config/config.yml CHANGED
@@ -37,13 +37,14 @@ llm_params:
37
  temperature: 0.7 # float
38
  repo_id: 'TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF' # HuggingFace repo id
39
  filename: 'tinyllama-1.1b-chat-v1.0.Q5_0.gguf' # Specific name of gguf file in the repo
 
40
  stream: False # bool
41
  pdf_reader: 'gpt' # str [llama, pymupdf, gpt]
42
 
43
  chat_logging:
44
  log_chat: True # bool
45
  platform: 'literalai'
46
- callbacks: False # bool
47
 
48
  splitter_options:
49
  use_splitter: True # bool
 
37
  temperature: 0.7 # float
38
  repo_id: 'TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF' # HuggingFace repo id
39
  filename: 'tinyllama-1.1b-chat-v1.0.Q5_0.gguf' # Specific name of gguf file in the repo
40
+ model_path: 'storage/models/tinyllama-1.1b-chat-v1.0.Q5_0.gguf' # Path to the model file
41
  stream: False # bool
42
  pdf_reader: 'gpt' # str [llama, pymupdf, gpt]
43
 
44
  chat_logging:
45
  log_chat: True # bool
46
  platform: 'literalai'
47
+ callbacks: True # bool
48
 
49
  splitter_options:
50
  use_splitter: True # bool
code/public/avatars/{ai-tutor.png → ai_tutor.png} RENAMED
File without changes
code/public/test.css CHANGED
@@ -13,10 +13,6 @@ a[href*='https://github.com/Chainlit/chainlit'] {
13
  border-radius: 50%; /* Maintain circular shape */
14
  }
15
 
16
- /* Hide the default image */
17
- .MuiAvatar-root.MuiAvatar-circular.css-m2icte .MuiAvatar-img.css-1hy9t21 {
18
- display: none;
19
- }
20
 
21
  .MuiAvatar-root.MuiAvatar-circular.css-v72an7 {
22
  background-image: url('/public/avatars/ai-tutor.png'); /* Replace with your custom image URL */
@@ -26,18 +22,3 @@ a[href*='https://github.com/Chainlit/chainlit'] {
26
  height: 40px; /* Ensure the dimensions match the original */
27
  border-radius: 50%; /* Maintain circular shape */
28
  }
29
-
30
- /* Hide the default image */
31
- .MuiAvatar-root.MuiAvatar-circular.css-v72an7 .MuiAvatar-img.css-1hy9t21 {
32
- display: none;
33
- }
34
-
35
- /* Hide the new chat button
36
- #new-chat-button {
37
- display: none;
38
- } */
39
-
40
- /* Hide the open sidebar button
41
- #open-sidebar-button {
42
- display: none;
43
- } */
 
13
  border-radius: 50%; /* Maintain circular shape */
14
  }
15
 
 
 
 
 
16
 
17
  .MuiAvatar-root.MuiAvatar-circular.css-v72an7 {
18
  background-image: url('/public/avatars/ai-tutor.png'); /* Replace with your custom image URL */
 
22
  height: 40px; /* Ensure the dimensions match the original */
23
  border-radius: 50%; /* Maintain circular shape */
24
  }