File size: 6,935 Bytes
453b8b8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
from lollms.config import ASCIIColors
from lollms.config import TypedConfig, BaseConfig, ConfigTemplate, InstallOption
from lollms.types import MSG_TYPE
from lollms.personality import APScript, AIPersonality
import subprocess
from pathlib import Path
import os
import sys
sd_folder = Path(__file__).resolve().parent.parent / "sd"
sys.path.append(str(sd_folder))
import urllib.parse
import urllib.request
import json
import time
from functools import partial
import sys
import yaml
import re
import random
def find_matching_number(numbers, text):
for index, number in enumerate(numbers):
number_str = str(number)
pattern = r"\b" + number_str + r"\b" # Match the whole word
match = re.search(pattern, text)
if match:
return number, index
return None, None # No matching number found
class Processor(APScript):
"""
A class that processes model inputs and outputs.
Inherits from APScript.
"""
def __init__(
self,
personality: AIPersonality
) -> None:
personality_config = TypedConfig(
ConfigTemplate([
{"name":"max_thought_size","type":"int","value":50, "min":10, "max":personality.model.config["ctx_size"]},
{"name":"max_judgement_size","type":"int","value":50, "min":10, "max":personality.model.config["ctx_size"]},
{"name":"nb_samples_per_idea","type":"int","value":3, "min":2, "max":100},
{"name":"nb_ideas","type":"int","value":3, "min":2, "max":100}
]),
BaseConfig(config={
'max_thought_size' : 50,
'max_judgement_size' : 50,
'nb_samples_per_idea' : 3,
'nb_ideas' : 3
})
)
super().__init__(
personality,
personality_config
)
def install(self):
super().install()
requirements_file = self.personality.personality_package_path / "requirements.txt"
# install requirements
subprocess.run(["pip", "install", "--upgrade", "--no-cache-dir", "-r", str(requirements_file)])
ASCIIColors.success("Installed successfully")
def process(self, text):
bot_says = self.bot_says + text
if self.personality.detect_antiprompt(bot_says):
print("Detected hallucination")
return False
else:
self.bot_says = bot_says
return True
def generate(self, prompt, max_size):
self.bot_says = ""
return self.personality.model.generate(
prompt,
max_size,
self.process,
temperature = self.personality.model_temperature,
top_k = self.personality.model_top_k,
top_p = self.personality.model_top_p,
repeat_penalty = self.personality.model_repeat_penalty,
).strip()
def run_workflow(self, prompt, previous_discussion_text="", callback=None):
"""
Runs the workflow for processing the model input and output.
This method should be called to execute the processing workflow.
Args:
generate_fn (function): A function that generates model output based on the input prompt.
The function should take a single argument (prompt) and return the generated text.
prompt (str): The input prompt for the model.
previous_discussion_text (str, optional): The text of the previous discussion. Default is an empty string.
callback a callback function that gets called each time a new token is received
Returns:
None
"""
self.word_callback = callback
self.bot_says = ""
# 1 first ask the model to formulate a query
final_ideas = []
summary_prompt = ""
for j in range(self.personality_config.nb_ideas):
print(f"============= Starting level {j} of the tree =====================")
local_ideas=[]
judgement_prompt = f"### prompt:\n{prompt}\n"
for i in range(self.personality_config.nb_samples_per_idea):
print(f"\nIdea {i+1}")
if len(final_ideas)>0:
final_ideas_text = [f'Idea {n}:{i}' for n,i in enumerate(final_ideas)]
idea_prompt = f"""### Instruction:
Write the next idea. Please give a single idea.
### Prompt:
{prompt}
### Previous ideas:
{final_ideas_text}
### Idea:To"""
else:
idea_prompt = f"""### Instruction:
Write the next idea. Please give a single idea.
### Prompt:
{prompt}
### Idea:"""
print(idea_prompt)
idea = self.generate(idea_prompt)
local_ideas.append(idea.strip())
judgement_prompt += f"\n### Idea {i}:{idea}\n"
if callback is not None:
callback(f"\n### Idea {i+1}:\n"+idea, MSG_TYPE.MSG_TYPE_FULL)
prompt_ids = ",".join([str(i) for i in range(self.config["nb_samples_per_idea"])])
judgement_prompt += f"### Instructions:\nWhich idea seems the most approcpriate. Answer the question by giving the best idea number without explanations.\nWhat is the best idea number {prompt_ids}?\n"
print(judgement_prompt)
self.bot_says = ""
best_local_idea = self.generate(judgement_prompt).strip()
number, index = find_matching_number([i for i in range(self.config["nb_samples_per_idea"])], best_local_idea)
if index is not None:
print(f"Chosen thoght n:{number}")
final_ideas.append(local_ideas[number])
if callback is not None:
callback(f"### Best local idea:\n{best_local_idea}", MSG_TYPE.MSG_TYPE_FULL)
else:
print("Warning, the model made a wrond answer, taking random idea as the best")
number = random.randint(0,self.config["nb_samples_per_idea"])-1
print(f"Chosen thoght n:{number}")
final_ideas.append(local_ideas[number])
if callback is not None:
callback(f"### Best local idea:\n{best_local_idea}", MSG_TYPE.MSG_TYPE_FULL)
summary_prompt += "### Instructions:\nCombine these ideas in a comprihensive essai.\n"
for idea in final_ideas:
summary_prompt += f"### Idea: {idea}\n"
summary_prompt += "### Ideas summary:\n"
print(summary_prompt)
best_local_idea = self.generate(summary_prompt)
return best_local_idea
|