Spaces:
Running
on
Zero
Running
on
Zero
""" | |
The `learning` module is designed to facilitate the collection and storage of user feedback on the outputs generated by the GPT Engineer tool. It provides mechanisms for obtaining user consent, capturing user reviews, and storing this information for future analysis and enhancement of the tool's performance. | |
Classes | |
------- | |
Review : dataclass | |
Represents a user's review of the generated code, including whether it ran, was perfect, was useful, and any additional comments. | |
Learning : dataclass | |
Encapsulates the metadata and feedback collected during a session of using the GPT Engineer tool, including the prompt, model, temperature, configuration, logs, session identifier, user review, and timestamp. | |
Functions | |
--------- | |
human_review_input() -> Optional[Review] | |
Interactively gathers feedback from the user regarding the performance of generated code and returns a Review instance. | |
check_collection_consent() -> bool | |
Checks if the user has previously given consent to store their data and, if not, asks for it. | |
ask_collection_consent() -> bool | |
Prompts the user for consent to store their data for the purpose of improving GPT Engineer. | |
extract_learning(prompt: Prompt, model: str, temperature: float, config: Tuple[str, ...], memory: DiskMemory, review: Review) -> Learning | |
Extracts feedback and session details to create a Learning instance based on the provided parameters. | |
get_session() -> str | |
Retrieves a unique identifier for the current user session, creating one if it does not exist. | |
Constants | |
--------- | |
TERM_CHOICES : tuple | |
Terminal color choices for user interactive prompts, formatted with termcolor for readability. | |
""" | |
import json | |
import random | |
import tempfile | |
from dataclasses import dataclass, field | |
from datetime import datetime | |
from pathlib import Path | |
from typing import Optional, Tuple | |
from dataclasses_json import dataclass_json | |
from termcolor import colored | |
from gpt_engineer.core.default.disk_memory import DiskMemory | |
from gpt_engineer.core.prompt import Prompt | |
class Review: | |
""" | |
A dataclass that represents a user's review of the generated code. | |
Attributes | |
---------- | |
ran : Optional[bool] | |
Indicates whether the generated code ran without errors. | |
perfect : Optional[bool] | |
Indicates whether the generated code met all the user's requirements. | |
works : Optional[bool] | |
Indicates whether the generated code was useful, even if not perfect. | |
comments : str | |
Any additional comments provided by the user. | |
raw : str | |
A raw string representation of the user's responses. | |
""" | |
ran: Optional[bool] | |
perfect: Optional[bool] | |
works: Optional[bool] | |
comments: str | |
raw: str | |
class Learning: | |
""" | |
A dataclass that encapsulates the learning data collected during a GPT Engineer session. | |
Attributes | |
---------- | |
prompt : str | |
A JSON string representing the prompt provided to GPT Engineer. | |
model : str | |
The name of the model used during the session. | |
temperature : float | |
The temperature setting used for the model's responses. | |
config : str | |
A JSON string representing the configuration settings for the session. | |
logs : str | |
A JSON string representing the logs of the session. | |
session : str | |
A unique identifier for the user session. | |
review : Optional[Review] | |
The user's review of the generated code. | |
timestamp : str | |
The UTC timestamp when the learning data was created. | |
version : str | |
The version of the learning data schema. | |
""" | |
prompt: str | |
model: str | |
temperature: float | |
config: str | |
logs: str | |
session: str | |
review: Optional[Review] | |
timestamp: str = field(default_factory=lambda: datetime.utcnow().isoformat()) | |
version: str = "0.3" | |
TERM_CHOICES = ( | |
colored("y", "green") | |
+ "/" | |
+ colored("n", "red") | |
+ "/" | |
+ colored("u", "yellow") | |
+ "(ncertain): " | |
) | |
def human_review_input() -> Optional[Review]: | |
""" | |
Interactively prompts the user to review the generated code and returns their feedback encapsulated in a Review object. | |
This function will first check if the user has given consent to collect their feedback. If consent is given, it will ask the user a series of questions about the generated code's performance and capture their responses. | |
Returns | |
------- | |
Optional[Review] | |
A Review object containing the user's feedback, or None if consent is not given. | |
""" | |
print() | |
if not check_collection_consent(): | |
return None | |
print() | |
print( | |
colored("To help gpt-engineer learn, please answer 3 questions:", "light_green") | |
) | |
print() | |
ran = input("Did the generated code run at all? " + TERM_CHOICES) | |
ran = ask_for_valid_input(ran) | |
if ran == "y": | |
perfect = input( | |
"Did the generated code do everything you wanted? " + TERM_CHOICES | |
) | |
perfect = ask_for_valid_input(perfect) | |
if perfect != "y": | |
useful = input("Did the generated code do anything useful? " + TERM_CHOICES) | |
useful = ask_for_valid_input(useful) | |
else: | |
useful = "" | |
else: | |
perfect = "" | |
useful = "" | |
if perfect != "y": | |
comments = input( | |
"If you have time, please explain what was not working " | |
+ colored("(ok to leave blank)\n", "light_green") | |
) | |
else: | |
comments = "" | |
return Review( | |
raw=", ".join([ran, perfect, useful]), | |
ran={"y": True, "n": False, "u": None, "": None}[ran], | |
works={"y": True, "n": False, "u": None, "": None}[useful], | |
perfect={"y": True, "n": False, "u": None, "": None}[perfect], | |
comments=comments, | |
) | |
def ask_for_valid_input(ran): | |
while ran not in ("y", "n", "u"): | |
ran = input("Invalid input. Please enter y, n, or u: ") | |
return ran | |
def check_collection_consent() -> bool: | |
""" | |
Checks if the user has previously given consent to store their data for feedback collection. | |
This function looks for a file that stores the user's consent status. If the file exists and contains 'true', consent is assumed. If the file does not exist or does not contain 'true', the function will prompt the user for consent. | |
Returns | |
------- | |
bool | |
True if the user has given consent, False otherwise. | |
""" | |
path = Path(".gpte_consent") | |
if path.exists() and path.read_text() == "true": | |
return True | |
else: | |
return ask_collection_consent() | |
def ask_collection_consent() -> bool: | |
""" | |
Asks the user for their consent to store their data for the purpose of improving the GPT Engineer tool. | |
The user's response is recorded in a file for future reference. If the user consents, the function will write 'true' to the file. If the user does not consent, no data will be collected, and the function will not modify the file. | |
Returns | |
------- | |
bool | |
True if the user consents, False otherwise. | |
""" | |
answer = input( | |
"Is it ok if we store your prompts to help improve GPT Engineer? (y/n)" | |
) | |
while answer.lower() not in ("y", "n"): | |
answer = input("Invalid input. Please enter y or n: ") | |
if answer.lower() == "y": | |
path = Path(".gpte_consent") | |
path.write_text("true") | |
print(colored("Thank you️", "light_green")) | |
print() | |
print( | |
"(If you no longer wish to participate in data collection, delete the file .gpte_consent)" | |
) | |
return True | |
else: | |
print( | |
colored( | |
"No worries! GPT Engineer will not collect your prompts. ❤️", | |
"light_green", | |
) | |
) | |
return False | |
def extract_learning( | |
prompt: Prompt, | |
model: str, | |
temperature: float, | |
config: Tuple[str, ...], | |
memory: DiskMemory, | |
review: Review, | |
) -> Learning: | |
""" | |
Constructs a Learning object containing the session's metadata and user feedback. | |
Parameters | |
---------- | |
prompt : str | |
The initial prompt provided to the GPT Engineer. | |
model : str | |
The name of the model used during the session. | |
temperature : float | |
The temperature setting used for the model's responses. | |
config : Tuple[str, ...] | |
A tuple representing the configuration settings for the session. | |
memory : DiskMemory | |
An object representing the disk memory used during the session. | |
review : Review | |
The user's review of the generated code. | |
Returns | |
------- | |
Learning | |
An instance of Learning containing all the session details and user feedback. | |
""" | |
return Learning( | |
prompt=prompt.to_json(), | |
model=model, | |
temperature=temperature, | |
config=json.dumps(config), | |
session=get_session(), | |
logs=memory.to_json(), | |
review=review, | |
) | |
def get_session() -> str: | |
""" | |
Retrieves or generates a unique identifier for the current user session. | |
This function attempts to read a unique user ID from a temporary file. If the file does not exist, it generates a new random ID, writes it to the file, and returns it. This ID is used to uniquely identify the user's session. | |
Returns | |
------- | |
str | |
A unique identifier for the user session. | |
""" | |
path = Path(tempfile.gettempdir()) / "gpt_engineer_user_id.txt" | |
try: | |
if path.exists(): | |
user_id = path.read_text() | |
else: | |
# random uuid: | |
user_id = str(random.randint(0, 2**32)) | |
path.write_text(user_id) | |
return user_id | |
except IOError: | |
return "ephemeral_" + str(random.randint(0, 2**32)) | |